hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
ff839aef53eccbd48281df951e6151d797cf0216 | 25,359 | // Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#![allow(clippy::field_reassign_with_default)]
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use serde_with::skip_serializing_none;
use std::{collections::HashMap, path::PathBuf};
#[derive(Debug, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(untagged)]
pub enum BundleTarget {
All(Vec<String>),
One(String),
}
impl BundleTarget {
#[allow(dead_code)]
pub fn to_vec(&self) -> Vec<String> {
match self {
Self::All(list) => list.clone(),
Self::One(i) => vec![i.clone()],
}
}
}
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct DebConfig {
pub depends: Option<Vec<String>>,
#[serde(default)]
pub use_bootstrapper: bool,
#[serde(default)]
pub files: HashMap<PathBuf, PathBuf>,
}
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct MacConfig {
pub frameworks: Option<Vec<String>>,
pub minimum_system_version: Option<String>,
pub exception_domain: Option<String>,
pub license: Option<String>,
#[serde(default)]
pub use_bootstrapper: bool,
pub signing_identity: Option<String>,
pub entitlements: Option<String>,
}
fn default_language() -> String {
"en-US".into()
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct WixConfig {
/// App language. See https://docs.microsoft.com/en-us/windows/win32/msi/localizing-the-error-and-actiontext-tables.
#[serde(default = "default_language")]
pub language: String,
pub template: Option<PathBuf>,
#[serde(default)]
pub fragment_paths: Vec<PathBuf>,
#[serde(default)]
pub component_group_refs: Vec<String>,
#[serde(default)]
pub component_refs: Vec<String>,
#[serde(default)]
pub feature_group_refs: Vec<String>,
#[serde(default)]
pub feature_refs: Vec<String>,
#[serde(default)]
pub merge_refs: Vec<String>,
#[serde(default)]
pub skip_webview_install: bool,
/// Path to the license file.
pub license: Option<String>,
#[serde(default)]
pub enable_elevated_update_task: bool,
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct WindowsConfig {
pub digest_algorithm: Option<String>,
pub certificate_thumbprint: Option<String>,
pub timestamp_url: Option<String>,
pub wix: Option<WixConfig>,
}
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct PackageConfig {
/// App name. Automatically converted to kebab-case on Linux.
pub product_name: Option<String>,
/// App version.
pub version: Option<String>,
}
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct BundleConfig {
/// Whether we should build your app with tauri-bundler or plain `cargo build`
#[serde(default)]
pub active: bool,
/// The bundle targets, currently supports ["deb", "app", "msi", "appimage", "dmg"] or "all"
pub targets: Option<BundleTarget>,
/// The app's identifier
pub identifier: Option<String>,
/// The app's icons
pub icon: Option<Vec<String>>,
/// App resources to bundle.
/// Each resource is a path to a file or directory.
/// Glob patterns are supported.
pub resources: Option<Vec<String>>,
pub copyright: Option<String>,
pub category: Option<String>,
pub short_description: Option<String>,
pub long_description: Option<String>,
#[serde(default)]
pub deb: DebConfig,
#[serde(rename = "macOS", default)]
pub macos: MacConfig,
pub external_bin: Option<Vec<String>>,
#[serde(default)]
pub windows: WindowsConfig,
}
/// A CLI argument definition
#[skip_serializing_none]
#[derive(Debug, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct CliArg {
/// The short version of the argument, without the preceding -.
///
/// NOTE: Any leading - characters will be stripped, and only the first non - character will be used as the short version.
pub short: Option<char>,
/// The unique argument name
pub name: String,
/// The argument description which will be shown on the help information.
/// Typically, this is a short (one line) description of the arg.
pub description: Option<String>,
/// The argument long description which will be shown on the help information.
/// Typically this a more detailed (multi-line) message that describes the argument.
pub long_description: Option<String>,
/// Specifies that the argument takes a value at run time.
///
/// NOTE: values for arguments may be specified in any of the following methods
/// - Using a space such as -o value or --option value
/// - Using an equals and no space such as -o=value or --option=value
/// - Use a short and no space such as -ovalue
pub takes_value: Option<bool>,
/// Specifies that the argument may appear more than once.
///
/// - For flags, this results in the number of occurrences of the flag being recorded.
/// For example -ddd or -d -d -d would count as three occurrences.
/// - For options there is a distinct difference in multiple occurrences vs multiple values.
/// For example, --opt val1 val2 is one occurrence, but two values. Whereas --opt val1 --opt val2 is two occurrences.
pub multiple: Option<bool>,
/// specifies that the argument may appear more than once.
pub multiple_occurrences: Option<bool>,
///
pub number_of_values: Option<u64>,
/// Specifies a list of possible values for this argument.
/// At runtime, the CLI verifies that only one of the specified values was used, or fails with an error message.
pub possible_values: Option<Vec<String>>,
/// Specifies the minimum number of values for this argument.
/// For example, if you had a -f <file> argument where you wanted at least 2 'files',
/// you would set `minValues: 2`, and this argument would be satisfied if the user provided, 2 or more values.
pub min_values: Option<u64>,
/// Specifies the maximum number of values are for this argument.
/// For example, if you had a -f <file> argument where you wanted up to 3 'files',
/// you would set .max_values(3), and this argument would be satisfied if the user provided, 1, 2, or 3 values.
pub max_values: Option<u64>,
/// Sets whether or not the argument is required by default.
///
/// - Required by default means it is required, when no other conflicting rules have been evaluated
/// - Conflicting rules take precedence over being required.
pub required: Option<bool>,
/// Sets an arg that override this arg's required setting
/// i.e. this arg will be required unless this other argument is present.
pub required_unless_present: Option<String>,
/// Sets args that override this arg's required setting
/// i.e. this arg will be required unless all these other arguments are present.
pub required_unless_present_all: Option<Vec<String>>,
/// Sets args that override this arg's required setting
/// i.e. this arg will be required unless at least one of these other arguments are present.
pub required_unless_present_any: Option<Vec<String>>,
/// Sets a conflicting argument by name
/// i.e. when using this argument, the following argument can't be present and vice versa.
pub conflicts_with: Option<String>,
/// The same as conflictsWith but allows specifying multiple two-way conflicts per argument.
pub conflicts_with_all: Option<Vec<String>>,
/// Tets an argument by name that is required when this one is present
/// i.e. when using this argument, the following argument must be present.
pub requires: Option<String>,
/// Sts multiple arguments by names that are required when this one is present
/// i.e. when using this argument, the following arguments must be present.
pub requires_all: Option<Vec<String>>,
/// Allows a conditional requirement with the signature [arg, value]
/// the requirement will only become valid if `arg`'s value equals `${value}`.
pub requires_if: Option<Vec<String>>,
/// Allows specifying that an argument is required conditionally with the signature [arg, value]
/// the requirement will only become valid if the `arg`'s value equals `${value}`.
pub required_if_eq: Option<Vec<String>>,
/// Requires that options use the --option=val syntax
/// i.e. an equals between the option and associated value.
pub require_equals: Option<bool>,
/// The positional argument index, starting at 1.
///
/// The index refers to position according to other positional argument.
/// It does not define position in the argument list as a whole. When utilized with multiple=true,
/// only the last positional argument may be defined as multiple (i.e. the one with the highest index).
pub index: Option<u64>,
}
/// describes a CLI configuration
#[skip_serializing_none]
#[derive(Debug, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct CliConfig {
/// command description which will be shown on the help information
description: Option<String>,
/// command long description which will be shown on the help information
long_description: Option<String>,
/// adds additional help information to be displayed in addition to auto-generated help
/// this information is displayed before the auto-generated help information.
/// this is often used for header information
before_help: Option<String>,
/// adds additional help information to be displayed in addition to auto-generated help
/// this information is displayed after the auto-generated help information
/// this is often used to describe how to use the arguments, or caveats to be noted.
after_help: Option<String>,
/// list of args for the command
args: Option<Vec<CliArg>>,
/// list of subcommands of this command.
///
/// subcommands are effectively sub-apps, because they can contain their own arguments, subcommands, usage, etc.
/// they also function just like the app command, in that they get their own auto generated help and usage
subcommands: Option<HashMap<String, CliConfig>>,
}
#[derive(Debug, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(untagged)]
pub enum Port {
/// Port with a numeric value.
Value(u16),
/// Random port.
Random,
}
/// The window configuration object.
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct WindowConfig {
/// The window identifier.
pub label: Option<String>,
/// The window webview URL.
pub url: Option<String>,
/// Whether the file drop is enabled or not on the webview. By default it is enabled.
///
/// Disabling it is required to use drag and drop on the frontend on Windows.
#[serde(default = "default_file_drop_enabled")]
pub file_drop_enabled: bool,
/// Whether or not the window starts centered or not.
#[serde(default)]
pub center: bool,
/// The horizontal position of the window's top left corner
pub x: Option<f64>,
/// The vertical position of the window's top left corner
pub y: Option<f64>,
/// The window width.
pub width: Option<f64>,
/// The window height.
pub height: Option<f64>,
/// The min window width.
pub min_width: Option<f64>,
/// The min window height.
pub min_height: Option<f64>,
/// The max window width.
pub max_width: Option<f64>,
/// The max window height.
pub max_height: Option<f64>,
/// Whether the window is resizable or not.
#[serde(default)]
pub resizable: bool,
/// The window title.
pub title: Option<String>,
/// Whether the window starts as fullscreen or not.
#[serde(default)]
pub fullscreen: bool,
/// Whether the window will be initially hidden or focused.
#[serde(default = "default_focus")]
pub focus: bool,
/// Whether the window is transparent or not.
#[serde(default)]
pub transparent: bool,
/// Whether the window is maximized or not.
#[serde(default)]
pub maximized: bool,
/// Whether the window is visible or not.
#[serde(default = "default_visible")]
pub visible: bool,
/// Whether the window should have borders and bars.
#[serde(default = "default_decorations")]
pub decorations: bool,
/// Whether the window should always be on top of other windows.
#[serde(default)]
pub always_on_top: bool,
/// Whether or not the window icon should be added to the taskbar.
#[serde(default)]
pub skip_taskbar: bool,
}
fn default_focus() -> bool {
true
}
fn default_visible() -> bool {
true
}
fn default_decorations() -> bool {
true
}
fn default_file_drop_enabled() -> bool {
true
}
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct SecurityConfig {
pub csp: Option<String>,
}
pub trait Allowlist {
fn to_features(&self) -> Vec<&str>;
}
macro_rules! check_feature {
($self:ident, $features:ident, $flag:ident, $feature_name: expr) => {
if $self.$flag {
$features.push($feature_name)
}
};
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct FsAllowlistConfig {
#[serde(default)]
pub all: bool,
#[serde(default)]
pub read_text_file: bool,
#[serde(default)]
pub read_binary_file: bool,
#[serde(default)]
pub write_file: bool,
#[serde(default)]
pub write_binary_file: bool,
#[serde(default)]
pub read_dir: bool,
#[serde(default)]
pub copy_file: bool,
#[serde(default)]
pub create_dir: bool,
#[serde(default)]
pub remove_dir: bool,
#[serde(default)]
pub remove_file: bool,
#[serde(default)]
pub rename_file: bool,
}
impl Allowlist for FsAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["fs-all"]
} else {
let mut features = Vec::new();
check_feature!(self, features, read_text_file, "fs-read-text-file");
check_feature!(self, features, read_binary_file, "fs-read-binary-file");
check_feature!(self, features, write_file, "fs-write-file");
check_feature!(self, features, write_binary_file, "fs-write-binary-file");
check_feature!(self, features, read_dir, "fs-read-dir");
check_feature!(self, features, copy_file, "fs-copy-file");
check_feature!(self, features, create_dir, "fs-create-dir");
check_feature!(self, features, remove_dir, "fs-remove-dir");
check_feature!(self, features, remove_file, "fs-remove-file");
check_feature!(self, features, rename_file, "fs-rename-file");
features
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct WindowAllowlistConfig {
#[serde(default)]
pub all: bool,
#[serde(default)]
pub create: bool,
}
impl Allowlist for WindowAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["window-all"]
} else {
let mut features = Vec::new();
check_feature!(self, features, create, "window-create");
features
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct ShellAllowlistConfig {
#[serde(default)]
pub all: bool,
#[serde(default)]
pub execute: bool,
#[serde(default)]
pub open: bool,
}
impl Allowlist for ShellAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["shell-all"]
} else {
let mut features = Vec::new();
check_feature!(self, features, execute, "shell-execute");
check_feature!(self, features, open, "shell-open");
features
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct DialogAllowlistConfig {
#[serde(default)]
pub all: bool,
#[serde(default)]
pub open: bool,
#[serde(default)]
pub save: bool,
}
impl Allowlist for DialogAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["dialog-all"]
} else {
let mut features = Vec::new();
check_feature!(self, features, open, "dialog-open");
check_feature!(self, features, save, "dialog-save");
features
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct HttpAllowlistConfig {
#[serde(default)]
pub all: bool,
#[serde(default)]
pub request: bool,
}
impl Allowlist for HttpAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["http-all"]
} else {
let mut features = Vec::new();
check_feature!(self, features, request, "http-request");
features
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct NotificationAllowlistConfig {
#[serde(default)]
pub all: bool,
}
impl Allowlist for NotificationAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["notification-all"]
} else {
vec![]
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct GlobalShortcutAllowlistConfig {
#[serde(default)]
pub all: bool,
}
impl Allowlist for GlobalShortcutAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["global-shortcut-all"]
} else {
vec![]
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct OsAllowlistConfig {
#[serde(default)]
pub all: bool,
}
impl Allowlist for OsAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["os-all"]
} else {
vec![]
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct PathAllowlistConfig {
#[serde(default)]
pub all: bool,
}
impl Allowlist for PathAllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["path-all"]
} else {
vec![]
}
}
}
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct AllowlistConfig {
#[serde(default)]
pub all: bool,
#[serde(default)]
pub fs: FsAllowlistConfig,
#[serde(default)]
pub window: WindowAllowlistConfig,
#[serde(default)]
pub shell: ShellAllowlistConfig,
#[serde(default)]
pub dialog: DialogAllowlistConfig,
#[serde(default)]
pub http: HttpAllowlistConfig,
#[serde(default)]
pub notification: NotificationAllowlistConfig,
#[serde(default)]
pub global_shortcut: GlobalShortcutAllowlistConfig,
#[serde(default)]
pub os: OsAllowlistConfig,
#[serde(default)]
pub path: PathAllowlistConfig,
}
impl Allowlist for AllowlistConfig {
fn to_features(&self) -> Vec<&str> {
if self.all {
vec!["api-all"]
} else {
let mut features = Vec::new();
features.extend(self.fs.to_features());
features.extend(self.window.to_features());
features.extend(self.shell.to_features());
features.extend(self.dialog.to_features());
features.extend(self.http.to_features());
features.extend(self.notification.to_features());
features.extend(self.global_shortcut.to_features());
features.extend(self.os.to_features());
features.extend(self.path.to_features());
features
}
}
}
/// The Tauri configuration object.
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct TauriConfig {
/// The windows configuration.
#[serde(default)]
pub windows: Vec<WindowConfig>,
/// The CLI configuration.
pub cli: Option<CliConfig>,
/// The bundler configuration.
#[serde(default)]
pub bundle: BundleConfig,
#[serde(default)]
allowlist: AllowlistConfig,
pub security: Option<SecurityConfig>,
/// The updater configuration.
#[serde(default = "default_updater")]
pub updater: UpdaterConfig,
/// Configuration for app system tray.
pub system_tray: Option<SystemTrayConfig>,
}
impl TauriConfig {
#[allow(dead_code)]
pub fn features(&self) -> Vec<&str> {
self.allowlist.to_features()
}
}
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct UpdaterConfig {
/// Whether the updater is active or not.
#[serde(default)]
pub active: bool,
/// Display built-in dialog or use event system if disabled.
#[serde(default = "default_dialog")]
pub dialog: Option<bool>,
/// The updater endpoints.
pub endpoints: Option<Vec<String>>,
/// Optional pubkey.
pub pubkey: Option<String>,
}
#[skip_serializing_none]
#[derive(Debug, Default, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct SystemTrayConfig {
/// Path to the icon to use on the system tray.
///
/// It is forced to be a `.png` file on Linux and macOS, and a `.ico` file on Windows.
pub icon_path: PathBuf,
/// A Boolean value that determines whether the image represents a [template](https://developer.apple.com/documentation/appkit/nsimage/1520017-template?language=objc) image on macOS.
#[serde(default)]
pub icon_as_template: bool,
}
// We enable the unnecessary_wraps because we need
// to use an Option for dialog otherwise the CLI schema will mark
// the dialog as a required field which is not as we default it to true.
#[allow(clippy::unnecessary_wraps)]
fn default_dialog() -> Option<bool> {
Some(true)
}
/// The `dev_path` and `dist_dir` options.
#[derive(Debug, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(untagged, deny_unknown_fields)]
pub enum AppUrl {
/// The app's external URL, or the path to the directory containing the app assets.
Url(String),
/// An array of files to embed on the app.
Files(Vec<PathBuf>),
}
impl std::fmt::Display for AppUrl {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Url(url) => write!(f, "{}", url),
Self::Files(files) => write!(f, "{}", serde_json::to_string(files).unwrap()),
}
}
}
/// The Build configuration object.
#[skip_serializing_none]
#[derive(Debug, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct BuildConfig {
/// The binary used to build and run the application.
pub runner: Option<String>,
/// The path or URL to use on development.
#[serde(default = "default_dev_path")]
pub dev_path: AppUrl,
/// the path to the app's dist dir. This path must contain your index.html file.
#[serde(default = "default_dist_dir")]
pub dist_dir: AppUrl,
/// a shell command to run before `tauri dev` kicks in
pub before_dev_command: Option<String>,
/// a shell command to run before `tauri build` kicks in
pub before_build_command: Option<String>,
/// features passed to `cargo` commands
pub features: Option<Vec<String>>,
/// Whether we should inject the Tauri API on `window.__TAURI__` or not.
#[serde(default)]
pub with_global_tauri: bool,
}
fn default_dev_path() -> AppUrl {
AppUrl::Url("".to_string())
}
fn default_dist_dir() -> AppUrl {
AppUrl::Url("../dist".to_string())
}
type JsonObject = HashMap<String, JsonValue>;
/// The tauri.conf.json mapper.
#[skip_serializing_none]
#[derive(Debug, PartialEq, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct Config {
/// Package settings.
#[serde(default)]
pub package: PackageConfig,
/// The Tauri configuration.
#[serde(default)]
pub tauri: TauriConfig,
/// The build configuration.
#[serde(default = "default_build")]
pub build: BuildConfig,
/// The plugins config.
#[serde(default)]
pub plugins: HashMap<String, JsonObject>,
}
fn default_build() -> BuildConfig {
BuildConfig {
runner: None,
dev_path: default_dev_path(),
dist_dir: default_dist_dir(),
before_dev_command: None,
before_build_command: None,
features: None,
with_global_tauri: false,
}
}
fn default_updater() -> UpdaterConfig {
UpdaterConfig {
active: false,
dialog: Some(true),
endpoints: None,
pubkey: None,
}
}
| 33.019531 | 184 | 0.703182 |
9b71f516212a920341339b706adc851225236948 | 4,402 | #[doc = "Register `RISE` reader"]
pub struct R(crate::R<RISE_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<RISE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<RISE_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<RISE_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `RISE` writer"]
pub struct W(crate::W<RISE_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<RISE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<RISE_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<RISE_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `RDET` reader - Rising edge detect. Bit n detects the rising edge of the pin selected in PINTSELn. Read 0: No rising edge has been detected on this pin since Reset or the last time a one was written to this bit. Write 0: no operation. Read 1: a rising edge has been detected since Reset or the last time a one was written to this bit. Write 1: clear rising edge detection for this pin."]
pub struct RDET_R(crate::FieldReader<u8, u8>);
impl RDET_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
RDET_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RDET_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RDET` writer - Rising edge detect. Bit n detects the rising edge of the pin selected in PINTSELn. Read 0: No rising edge has been detected on this pin since Reset or the last time a one was written to this bit. Write 0: no operation. Read 1: a rising edge has been detected since Reset or the last time a one was written to this bit. Write 1: clear rising edge detection for this pin."]
pub struct RDET_W<'a> {
w: &'a mut W,
}
impl<'a> RDET_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - Rising edge detect. Bit n detects the rising edge of the pin selected in PINTSELn. Read 0: No rising edge has been detected on this pin since Reset or the last time a one was written to this bit. Write 0: no operation. Read 1: a rising edge has been detected since Reset or the last time a one was written to this bit. Write 1: clear rising edge detection for this pin."]
#[inline(always)]
pub fn rdet(&self) -> RDET_R {
RDET_R::new((self.bits & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - Rising edge detect. Bit n detects the rising edge of the pin selected in PINTSELn. Read 0: No rising edge has been detected on this pin since Reset or the last time a one was written to this bit. Write 0: no operation. Read 1: a rising edge has been detected since Reset or the last time a one was written to this bit. Write 1: clear rising edge detection for this pin."]
#[inline(always)]
pub fn rdet(&mut self) -> RDET_W {
RDET_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Pin interrupt rising edge register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [rise](index.html) module"]
pub struct RISE_SPEC;
impl crate::RegisterSpec for RISE_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [rise::R](R) reader structure"]
impl crate::Readable for RISE_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [rise::W](W) writer structure"]
impl crate::Writable for RISE_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets RISE to value 0"]
impl crate::Resettable for RISE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 42.326923 | 419 | 0.648342 |
621c4c21bfcc7fab11ba70a3eb406537be122112 | 991 | /*
Copyright ⓒ 2016 rust-custom-derive contributors.
Licensed under the MIT license (see LICENSE or <http://opensource.org
/licenses/MIT>) or the Apache License, Version 2.0 (see LICENSE of
<http://www.apache.org/licenses/LICENSE-2.0>), at your option. All
files in the project carrying such notice may not be copied, modified,
or distributed except according to those terms.
*/
#![cfg_attr(feature="parse-generics-poc", feature(plugin))]
#![cfg_attr(feature="parse-generics-poc", plugin(parse_generics_poc))]
#[macro_use] extern crate custom_derive;
#[macro_use] extern crate enum_as_str;
#[macro_use] extern crate parse_generics_shim;
#[macro_use] extern crate parse_macros;
custom_derive! {
#[allow(dead_code)]
#[derive(enum_as_str)]
enum Dagashi {
Umaibou,
PotatoFries,
CoffeeMilk,
YoungDonuts,
KinakoBou,
NamaikiBeer,
FueRamune,
Menko,
}
}
fn main() {
println!("{}", Dagashi::FueRamune.as_str());
}
| 28.314286 | 70 | 0.694248 |
ab8e989787b160bbf2c1e633bf15a6c61f42d9c6 | 1,041 | use std::{
env,
fmt::{self, Debug, Display},
path::PathBuf,
};
#[derive(Clone)]
pub struct RootPath(PathBuf);
impl Default for RootPath {
fn default() -> Self {
Self(
env::current_dir()
.expect("current dir")
.canonicalize()
.expect("canonicalize"),
)
}
}
impl std::ops::Deref for RootPath {
type Target = PathBuf;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for RootPath {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Debug for RootPath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Debug::fmt(&self.0, f)
}
}
impl Display for RootPath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0.to_str().unwrap())
}
}
impl From<&std::ffi::OsStr> for RootPath {
fn from(s: &std::ffi::OsStr) -> Self {
Self(PathBuf::from(s).canonicalize().expect("canonicalize"))
}
}
| 21.244898 | 68 | 0.535062 |
1d946b0e1040d7b27ad96a0214997f854ab1fac1 | 624,862 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, models::*, API_VERSION};
pub mod big_data_pools {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
big_data_pool_name: &str,
) -> std::result::Result<BigDataPoolResourceInfo, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/bigDataPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
big_data_pool_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BigDataPoolResourceInfo =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
big_data_pool_name: &str,
force: Option<bool>,
big_data_pool_info: &BigDataPoolResourceInfo,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/bigDataPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
big_data_pool_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(force) = force {
url.query_pairs_mut().append_pair("force", force.to_string().as_str());
}
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(big_data_pool_info).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BigDataPoolResourceInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: BigDataPoolResourceInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(BigDataPoolResourceInfo),
Accepted202(BigDataPoolResourceInfo),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
big_data_pool_name: &str,
big_data_pool_patch_info: &BigDataPoolPatchInfo,
) -> std::result::Result<BigDataPoolResourceInfo, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/bigDataPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
big_data_pool_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(big_data_pool_patch_info).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BigDataPoolResourceInfo =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
big_data_pool_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/bigDataPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
big_data_pool_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Accepted202(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(serde_json::Value),
Accepted202(serde_json::Value),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<BigDataPoolResourceInfoListResult, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/bigDataPools",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BigDataPoolResourceInfoListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_workspace::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspaces {
use super::{models, models::*, API_VERSION};
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<WorkspaceInfoListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceInfoListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<Workspace, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Workspace =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
workspace_info: &Workspace,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(workspace_info).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Workspace = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: Workspace = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(Workspace),
Created201(Workspace),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
workspace_patch_info: &WorkspacePatchInfo,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(workspace_patch_info).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Workspace =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: Workspace =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(Workspace),
Created201(Workspace),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Accepted202(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(serde_json::Value),
Accepted202(serde_json::Value),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<WorkspaceInfoListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Synapse/workspaces",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceInfoListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_aad_admins {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<WorkspaceAadAdminInfo, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/administrators/activeDirectory",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceAadAdminInfo =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
aad_admin_info: &WorkspaceAadAdminInfo,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/administrators/activeDirectory",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(aad_admin_info).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceAadAdminInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceAadAdminInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(WorkspaceAadAdminInfo),
Accepted202(WorkspaceAadAdminInfo),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/administrators/activeDirectory",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_sql_aad_admins {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<WorkspaceAadAdminInfo, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlAdministrators/activeDirectory",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceAadAdminInfo =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
aad_admin_info: &WorkspaceAadAdminInfo,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlAdministrators/activeDirectory",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(aad_admin_info).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceAadAdminInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: WorkspaceAadAdminInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(WorkspaceAadAdminInfo),
Accepted202(WorkspaceAadAdminInfo),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlAdministrators/activeDirectory",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_identity_sql_control_settings {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<ManagedIdentitySqlControlSettingsModel, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/managedIdentitySqlControlSettings/default",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ManagedIdentitySqlControlSettingsModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
managed_identity_sql_control_settings: &ManagedIdentitySqlControlSettingsModel,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/managedIdentitySqlControlSettings/default",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(managed_identity_sql_control_settings).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ManagedIdentitySqlControlSettingsModel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => Ok(create_or_update::Response::Created201),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(ManagedIdentitySqlControlSettingsModel),
Created201,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod restorable_dropped_sql_pools {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
restorable_dropped_sql_pool_id: &str,
) -> std::result::Result<RestorableDroppedSqlPool, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/restorableDroppedSqlPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
restorable_dropped_sql_pool_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RestorableDroppedSqlPool =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<RestorableDroppedSqlPoolListResult, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/restorableDroppedSqlPools",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RestorableDroppedSqlPoolListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_workspace::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod operations {
use super::{models, models::*, API_VERSION};
pub async fn check_name_availability(
operation_config: &crate::OperationConfig,
subscription_id: &str,
request: &CheckNameAvailabilityRequest,
) -> std::result::Result<CheckNameAvailabilityResponse, check_name_availability::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Synapse/checkNameAvailability",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(check_name_availability::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(check_name_availability::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(request).map_err(check_name_availability::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(check_name_availability::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(check_name_availability::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: CheckNameAvailabilityResponse = serde_json::from_slice(rsp_body)
.map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?;
Err(check_name_availability::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod check_name_availability {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<Vec<AvailableRpOperation>, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Synapse/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<AvailableRpOperation> =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_location_header_result(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
operation_id: &str,
) -> std::result::Result<get_location_header_result::Response, get_location_header_result::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/operationResults/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get_location_header_result::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_location_header_result::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_location_header_result::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_location_header_result::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(get_location_header_result::Response::Ok200),
http::StatusCode::CREATED => Ok(get_location_header_result::Response::Created201),
http::StatusCode::ACCEPTED => Ok(get_location_header_result::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(get_location_header_result::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_location_header_result::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_location_header_result::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_location_header_result {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Created201,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_azure_async_header_result(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
operation_id: &str,
) -> std::result::Result<OperationResource, get_azure_async_header_result::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/operationStatuses/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get_azure_async_header_result::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_azure_async_header_result::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_azure_async_header_result::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_azure_async_header_result::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationResource = serde_json::from_slice(rsp_body)
.map_err(|source| get_azure_async_header_result::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(get_azure_async_header_result::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_azure_async_header_result::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_azure_async_header_result::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_azure_async_header_result {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod ip_firewall_rules {
use super::{models, models::*, API_VERSION};
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<IpFirewallRuleInfoListResult, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/firewallRules",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IpFirewallRuleInfoListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_workspace::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
rule_name: &str,
) -> std::result::Result<IpFirewallRuleInfo, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/firewallRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IpFirewallRuleInfo =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
rule_name: &str,
ip_firewall_rule_info: &IpFirewallRuleInfo,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/firewallRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(ip_firewall_rule_info).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IpFirewallRuleInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: IpFirewallRuleInfo = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(IpFirewallRuleInfo),
Created201(IpFirewallRuleInfo),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
rule_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/firewallRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(serde_json::Value),
NoContent204,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn replace_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
request: &ReplaceAllIpFirewallRulesRequest,
) -> std::result::Result<replace_all::Response, replace_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/replaceAllIpFirewallRules",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(replace_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(replace_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(request).map_err(replace_all::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(replace_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(replace_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ReplaceAllFirewallRulesOperationResponse =
serde_json::from_slice(rsp_body).map_err(|source| replace_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(replace_all::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(replace_all::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| replace_all::Error::DeserializeError(source, rsp_body.clone()))?;
Err(replace_all::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod replace_all {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(ReplaceAllFirewallRulesOperationResponse),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtimes {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
if_none_match: Option<&str>,
) -> std::result::Result<IntegrationRuntimeResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(if_none_match) = if_none_match {
req_builder = req_builder.header("If-None-Match", if_none_match);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_MODIFIED => Err(get::Error::NotModified304 {}),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotModified304 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
if_match: Option<&str>,
integration_runtime: &IntegrationRuntimeResource,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(if_match) = if_match {
req_builder = req_builder.header("If-Match", if_match);
}
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(integration_runtime).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeResource =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(IntegrationRuntimeResource),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
update_integration_runtime_request: &UpdateIntegrationRuntimeRequest,
) -> std::result::Result<IntegrationRuntimeResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(update_integration_runtime_request).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn upgrade(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<(), upgrade::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/upgrade",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(upgrade::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(upgrade::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(upgrade::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(upgrade::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| upgrade::Error::DeserializeError(source, rsp_body.clone()))?;
Err(upgrade::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod upgrade {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<IntegrationRuntimeListResponse, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeListResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_workspace::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn start(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<start::Response, start::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/start",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(start::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeStatusResponse =
serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(start::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?;
Err(start::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod start {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(IntegrationRuntimeStatusResponse),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn stop(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<stop::Response, stop::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/stop",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(stop::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(stop::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(stop::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(stop::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(stop::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(stop::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| stop::Error::DeserializeError(source, rsp_body.clone()))?;
Err(stop::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod stop {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_outbound_network_dependencies_endpoints(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<
IntegrationRuntimeOutboundNetworkDependenciesEndpointsResponse,
list_outbound_network_dependencies_endpoints::Error,
> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/outboundNetworkDependenciesEndpoints" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , integration_runtime_name) ;
let mut url = url::Url::parse(url_str).map_err(list_outbound_network_dependencies_endpoints::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_outbound_network_dependencies_endpoints::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_outbound_network_dependencies_endpoints::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_outbound_network_dependencies_endpoints::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeOutboundNetworkDependenciesEndpointsResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_outbound_network_dependencies_endpoints::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_outbound_network_dependencies_endpoints::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_outbound_network_dependencies_endpoints::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_outbound_network_dependencies_endpoints {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn enable_interactive_query(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<enable_interactive_query::Response, enable_interactive_query::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/enableInteractiveQuery",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(enable_interactive_query::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(enable_interactive_query::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(enable_interactive_query::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(enable_interactive_query::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(enable_interactive_query::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(enable_interactive_query::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| enable_interactive_query::Error::DeserializeError(source, rsp_body.clone()))?;
Err(enable_interactive_query::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod enable_interactive_query {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn disable_interactive_query(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<disable_interactive_query::Response, disable_interactive_query::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/disableInteractiveQuery" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , integration_runtime_name) ;
let mut url = url::Url::parse(url_str).map_err(disable_interactive_query::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(disable_interactive_query::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(disable_interactive_query::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(disable_interactive_query::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(disable_interactive_query::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(disable_interactive_query::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| disable_interactive_query::Error::DeserializeError(source, rsp_body.clone()))?;
Err(disable_interactive_query::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod disable_interactive_query {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_node_ip_address {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
node_name: &str,
) -> std::result::Result<IntegrationRuntimeNodeIpAddress, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/nodes/{}/ipAddress",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeNodeIpAddress =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_object_metadata {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
get_metadata_request: Option<&GetSsisObjectMetadataRequest>,
) -> std::result::Result<SsisObjectMetadataListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/getObjectMetadata",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(get_metadata_request) = get_metadata_request {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(get_metadata_request).map_err(list::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SsisObjectMetadataListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn refresh(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<refresh::Response, refresh::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/refreshObjectMetadata",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(refresh::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(refresh::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(refresh::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(refresh::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SsisObjectMetadataStatusResponse =
serde_json::from_slice(rsp_body).map_err(|source| refresh::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(refresh::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(refresh::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| refresh::Error::DeserializeError(source, rsp_body.clone()))?;
Err(refresh::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod refresh {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SsisObjectMetadataStatusResponse),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_nodes {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
node_name: &str,
) -> std::result::Result<SelfHostedIntegrationRuntimeNode, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/nodes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SelfHostedIntegrationRuntimeNode =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
node_name: &str,
update_integration_runtime_node_request: &UpdateIntegrationRuntimeNodeRequest,
) -> std::result::Result<SelfHostedIntegrationRuntimeNode, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/nodes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(update_integration_runtime_node_request).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SelfHostedIntegrationRuntimeNode =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
node_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/nodes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_credentials {
use super::{models, models::*, API_VERSION};
pub async fn sync(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<(), sync::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/syncCredentials",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(sync::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(sync::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(sync::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(sync::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| sync::Error::DeserializeError(source, rsp_body.clone()))?;
Err(sync::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod sync {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_connection_infos {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<IntegrationRuntimeConnectionInfo, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/getConnectionInfo",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeConnectionInfo =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_auth_keys {
use super::{models, models::*, API_VERSION};
pub async fn regenerate(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
regenerate_key_parameters: &IntegrationRuntimeRegenerateKeyParameters,
) -> std::result::Result<IntegrationRuntimeAuthKeys, regenerate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/regenerateAuthKey",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(regenerate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(regenerate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(regenerate_key_parameters).map_err(regenerate::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(regenerate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(regenerate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeAuthKeys =
serde_json::from_slice(rsp_body).map_err(|source| regenerate::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| regenerate::Error::DeserializeError(source, rsp_body.clone()))?;
Err(regenerate::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod regenerate {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<IntegrationRuntimeAuthKeys, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/listAuthKeys",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeAuthKeys =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_monitoring_data {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<IntegrationRuntimeMonitoringData, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/monitoringData",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeMonitoringData =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod integration_runtime_status {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
integration_runtime_name: &str,
) -> std::result::Result<IntegrationRuntimeStatusResponse, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/integrationRuntimes/{}/getStatus",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
integration_runtime_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IntegrationRuntimeStatusResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod keys {
use super::{models, models::*, API_VERSION};
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<KeyInfoListResult, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/keys",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: KeyInfoListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_workspace::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
key_name: &str,
) -> std::result::Result<Key, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/keys/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
key_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Key =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
key_name: &str,
key_properties: &Key,
) -> std::result::Result<Key, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/keys/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
key_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(key_properties).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Key = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
key_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/keys/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
key_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Key =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(Key),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod library {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
library_name: &str,
workspace_name: &str,
) -> std::result::Result<LibraryResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/libraries/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
library_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LibraryResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod libraries {
use super::{models, models::*, API_VERSION};
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<LibraryListResponse, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/libraries",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LibraryListResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_workspace::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod private_endpoint_connections {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
private_endpoint_connection_name: &str,
) -> std::result::Result<PrivateEndpointConnection, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/privateEndpointConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
private_endpoint_connection_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnection =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
private_endpoint_connection_name: &str,
request: &PrivateEndpointConnection,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/privateEndpointConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
private_endpoint_connection_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(request).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnection =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnection =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(PrivateEndpointConnection),
Created201(PrivateEndpointConnection),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
private_endpoint_connection_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/privateEndpointConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
private_endpoint_connection_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: OperationResource =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Accepted202(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202(OperationResource),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<PrivateEndpointConnectionList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/privateEndpointConnections",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnectionList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod private_link_resources {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<PrivateLinkResourceListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/privateLinkResources",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkResourceListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
private_link_resource_name: &str,
) -> std::result::Result<PrivateLinkResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/privateLinkResources/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
private_link_resource_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod private_link_hub_private_link_resources {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
) -> std::result::Result<PrivateLinkResourceListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}/privateLinkResources",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkResourceListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
private_link_resource_name: &str,
) -> std::result::Result<PrivateLinkResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}/privateLinkResources/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name,
private_link_resource_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod private_link_hubs {
use super::{models, models::*, API_VERSION};
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<PrivateLinkHubInfoListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkHubInfoListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
) -> std::result::Result<PrivateLinkHub, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkHub =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
private_link_hub_info: &PrivateLinkHub,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(private_link_hub_info).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkHub = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkHub = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(PrivateLinkHub),
Created201(PrivateLinkHub),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
private_link_hub_patch_info: &PrivateLinkHubPatchInfo,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(private_link_hub_patch_info).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkHub =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkHub =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(PrivateLinkHub),
Created201(PrivateLinkHub),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<PrivateLinkHubInfoListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Synapse/privateLinkHubs",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateLinkHubInfoListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod private_endpoint_connections_private_link_hub {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
) -> std::result::Result<PrivateEndpointConnectionForPrivateLinkHubResourceCollectionResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}/privateEndpointConnections",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnectionForPrivateLinkHubResourceCollectionResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
private_link_hub_name: &str,
private_endpoint_connection_name: &str,
) -> std::result::Result<PrivateEndpointConnectionForPrivateLinkHub, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/privateLinkHubs/{}/privateEndpointConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
private_link_hub_name,
private_endpoint_connection_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnectionForPrivateLinkHub =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pools {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<SqlPool, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPool =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
sql_pool_info: &SqlPool,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(sql_pool_info).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPool =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
http::StatusCode::NOT_FOUND => Err(create::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SqlPool),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
sql_pool_info: &SqlPoolPatchInfo,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(sql_pool_info).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPool =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(update::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SqlPool),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Accepted202(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(serde_json::Value),
Accepted202(serde_json::Value),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<SqlPoolInfoListResult, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolInfoListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_workspace::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn pause(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<pause::Response, pause::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/pause",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(pause::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(pause::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(pause::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(pause::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| pause::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(pause::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(pause::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| pause::Error::DeserializeError(source, rsp_body.clone()))?;
Err(pause::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod pause {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(serde_json::Value),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn resume(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<resume::Response, resume::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/resume",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(resume::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(resume::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(resume::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(resume::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value =
serde_json::from_slice(rsp_body).map_err(|source| resume::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(resume::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(resume::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| resume::Error::DeserializeError(source, rsp_body.clone()))?;
Err(resume::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod resume {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(serde_json::Value),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn rename(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
parameters: &ResourceMoveDefinition,
) -> std::result::Result<(), rename::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/move",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(rename::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(rename::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(rename::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(rename::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(rename::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => Err(rename::Error::DefaultResponse { status_code }),
}
}
pub mod rename {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_metadata_sync_configs {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<MetadataSyncConfig, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/metadataSync/config",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetadataSyncConfig =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(get::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
metadata_sync_configuration: &MetadataSyncConfig,
) -> std::result::Result<MetadataSyncConfig, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/metadataSync/config",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(metadata_sync_configuration).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetadataSyncConfig =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(create::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_operation_results {
use super::{models, models::*, API_VERSION};
pub async fn get_location_header_result(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
operation_id: &str,
) -> std::result::Result<get_location_header_result::Response, get_location_header_result::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/operationResults/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get_location_header_result::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_location_header_result::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_location_header_result::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_location_header_result::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value = serde_json::from_slice(rsp_body)
.map_err(|source| get_location_header_result::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(get_location_header_result::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value = serde_json::from_slice(rsp_body)
.map_err(|source| get_location_header_result::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(get_location_header_result::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_location_header_result::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_location_header_result::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_location_header_result {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(serde_json::Value),
Accepted202(serde_json::Value),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_geo_backup_policies {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<GeoBackupPolicyListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/geoBackupPolicies",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: GeoBackupPolicyListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
geo_backup_policy_name: &str,
) -> std::result::Result<GeoBackupPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/geoBackupPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
geo_backup_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: GeoBackupPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
geo_backup_policy_name: &str,
parameters: &GeoBackupPolicy,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/geoBackupPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
geo_backup_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: GeoBackupPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: GeoBackupPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Created201(GeoBackupPolicy),
Ok200(GeoBackupPolicy),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_data_warehouse_user_activities {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
data_warehouse_user_activity_name: &str,
) -> std::result::Result<DataWarehouseUserActivities, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/dataWarehouseUserActivities/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
data_warehouse_user_activity_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DataWarehouseUserActivities =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_restore_points {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<RestorePointListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/restorePoints",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RestorePointListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
parameters: &CreateSqlPoolRestorePointDefinition,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/restorePoints",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RestorePoint =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: RestorePoint =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Created201(rsp_value))
}
status_code => Err(create::Error::DefaultResponse { status_code }),
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(RestorePoint),
Accepted202,
Created201(RestorePoint),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
restore_point_name: &str,
) -> std::result::Result<RestorePoint, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/restorePoints/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
restore_point_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RestorePoint =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
restore_point_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/restorePoints/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
restore_point_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => Err(delete::Error::DefaultResponse { status_code }),
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_replication_links {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<ReplicationLinkListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/replicationLinks",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ReplicationLinkListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_by_name(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
link_id: &str,
) -> std::result::Result<ReplicationLink, get_by_name::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/replicationLinks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
link_id
);
let mut url = url::Url::parse(url_str).map_err(get_by_name::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_by_name::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_by_name::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_by_name::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ReplicationLink =
serde_json::from_slice(rsp_body).map_err(|source| get_by_name::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get_by_name::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_by_name::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_by_name {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_maintenance_windows {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
maintenance_window_name: &str,
) -> std::result::Result<MaintenanceWindows, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/maintenancewindows/current",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
url.query_pairs_mut().append_pair("maintenanceWindowName", maintenance_window_name);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MaintenanceWindows =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
maintenance_window_name: &str,
parameters: &MaintenanceWindows,
) -> std::result::Result<(), create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/maintenancewindows/current",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
url.query_pairs_mut().append_pair("maintenanceWindowName", maintenance_window_name);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_maintenance_window_options {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
maintenance_window_options_name: &str,
) -> std::result::Result<MaintenanceWindowOptions, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/maintenanceWindowOptions/current",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
url.query_pairs_mut()
.append_pair("maintenanceWindowOptionsName", maintenance_window_options_name);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MaintenanceWindowOptions =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_transparent_data_encryptions {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
transparent_data_encryption_name: &str,
) -> std::result::Result<TransparentDataEncryption, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/transparentDataEncryption/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
transparent_data_encryption_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: TransparentDataEncryption =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
transparent_data_encryption_name: &str,
parameters: &TransparentDataEncryption,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/transparentDataEncryption/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
transparent_data_encryption_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: TransparentDataEncryption = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: TransparentDataEncryption = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(TransparentDataEncryption),
Created201(TransparentDataEncryption),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<TransparentDataEncryptionListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/transparentDataEncryption",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: TransparentDataEncryptionListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_blob_auditing_policies {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
blob_auditing_policy_name: &str,
) -> std::result::Result<SqlPoolBlobAuditingPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/auditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolBlobAuditingPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
blob_auditing_policy_name: &str,
parameters: &SqlPoolBlobAuditingPolicy,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/auditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolBlobAuditingPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolBlobAuditingPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SqlPoolBlobAuditingPolicy),
Created201(SqlPoolBlobAuditingPolicy),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_sql_pool(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<SqlPoolBlobAuditingPolicyListResult, list_by_sql_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/auditingSettings",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_sql_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_sql_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_sql_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_sql_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolBlobAuditingPolicyListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_sql_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list_by_sql_pool::Error::DefaultResponse { status_code }),
}
}
pub mod list_by_sql_pool {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_operations {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<SqlPoolBlobAuditingPolicySqlPoolOperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/operations",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolBlobAuditingPolicySqlPoolOperationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_usages {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<SqlPoolUsageListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/usages",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolUsageListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_sensitivity_labels {
use super::{models, models::*, API_VERSION};
pub async fn list_current(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
filter: Option<&str>,
) -> std::result::Result<SensitivityLabelListResult, list_current::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/currentSensitivityLabels",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_current::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_current::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_current::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_current::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SensitivityLabelListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_current::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_current::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_current::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_current {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
parameters: &SensitivityLabelUpdateList,
) -> std::result::Result<(), update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/currentSensitivityLabels",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => Err(update::Error::DefaultResponse { status_code }),
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_recommended(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
include_disabled_recommendations: Option<bool>,
skip_token: Option<&str>,
filter: Option<&str>,
) -> std::result::Result<SensitivityLabelListResult, list_recommended::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/recommendedSensitivityLabels",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_recommended::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_recommended::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(include_disabled_recommendations) = include_disabled_recommendations {
url.query_pairs_mut().append_pair(
"includeDisabledRecommendations",
include_disabled_recommendations.to_string().as_str(),
);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_recommended::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_recommended::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SensitivityLabelListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_recommended::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list_recommended::Error::DefaultResponse { status_code }),
}
}
pub mod list_recommended {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
column_name: &str,
sensitivity_label_source: &str,
) -> std::result::Result<SensitivityLabel, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}/columns/{}/sensitivityLabels/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , schema_name , table_name , column_name , sensitivity_label_source) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SensitivityLabel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
column_name: &str,
sensitivity_label_source: &str,
parameters: &SensitivityLabel,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}/columns/{}/sensitivityLabels/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , schema_name , table_name , column_name , sensitivity_label_source) ;
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SensitivityLabel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: SensitivityLabel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SensitivityLabel),
Created201(SensitivityLabel),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
column_name: &str,
sensitivity_label_source: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}/columns/{}/sensitivityLabels/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , schema_name , table_name , column_name , sensitivity_label_source) ;
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => Err(delete::Error::DefaultResponse { status_code }),
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn enable_recommendation(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
column_name: &str,
sensitivity_label_source: &str,
) -> std::result::Result<(), enable_recommendation::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}/columns/{}/sensitivityLabels/{}/enable" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , schema_name , table_name , column_name , sensitivity_label_source) ;
let mut url = url::Url::parse(url_str).map_err(enable_recommendation::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(enable_recommendation::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(enable_recommendation::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(enable_recommendation::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => Err(enable_recommendation::Error::DefaultResponse { status_code }),
}
}
pub mod enable_recommendation {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn disable_recommendation(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
column_name: &str,
sensitivity_label_source: &str,
) -> std::result::Result<(), disable_recommendation::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}/columns/{}/sensitivityLabels/{}/disable" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , schema_name , table_name , column_name , sensitivity_label_source) ;
let mut url = url::Url::parse(url_str).map_err(disable_recommendation::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(disable_recommendation::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(disable_recommendation::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(disable_recommendation::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => Err(disable_recommendation::Error::DefaultResponse { status_code }),
}
}
pub mod disable_recommendation {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_recommended_sensitivity_labels {
use super::{models, models::*, API_VERSION};
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
parameters: &RecommendedSensitivityLabelUpdateList,
) -> std::result::Result<(), update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/recommendedSensitivityLabels",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => Err(update::Error::DefaultResponse { status_code }),
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_schemas {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
filter: Option<&str>,
) -> std::result::Result<SqlPoolSchemaListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolSchemaListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
) -> std::result::Result<SqlPoolSchema, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolSchema =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_tables {
use super::{models, models::*, API_VERSION};
pub async fn list_by_schema(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
filter: Option<&str>,
) -> std::result::Result<SqlPoolTableListResult, list_by_schema::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_schema::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_schema::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_schema::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_schema::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolTableListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_by_schema::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list_by_schema::Error::DefaultResponse { status_code }),
}
}
pub mod list_by_schema {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
) -> std::result::Result<SqlPoolTable, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolTable =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_table_columns {
use super::{models, models::*, API_VERSION};
pub async fn list_by_table_name(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
filter: Option<&str>,
) -> std::result::Result<SqlPoolColumnListResult, list_by_table_name::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}/columns",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_table_name::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_table_name::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_table_name::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_table_name::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolColumnListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_table_name::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list_by_table_name::Error::DefaultResponse { status_code }),
}
}
pub mod list_by_table_name {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_connection_policies {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
connection_policy_name: &str,
) -> std::result::Result<SqlPoolConnectionPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/connectionPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
connection_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolConnectionPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_vulnerability_assessments {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<SqlPoolVulnerabilityAssessmentListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessmentListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
) -> std::result::Result<SqlPoolVulnerabilityAssessment, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
vulnerability_assessment_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessment =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
parameters: &SqlPoolVulnerabilityAssessment,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
vulnerability_assessment_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessment = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessment = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SqlPoolVulnerabilityAssessment),
Created201(SqlPoolVulnerabilityAssessment),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
vulnerability_assessment_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => Err(delete::Error::DefaultResponse { status_code }),
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_vulnerability_assessment_scans {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
) -> std::result::Result<VulnerabilityAssessmentScanRecordListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}/scans",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
vulnerability_assessment_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VulnerabilityAssessmentScanRecordListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn initiate_scan(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
scan_id: &str,
) -> std::result::Result<initiate_scan::Response, initiate_scan::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}/scans/{}/initiateScan" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , vulnerability_assessment_name , scan_id) ;
let mut url = url::Url::parse(url_str).map_err(initiate_scan::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(initiate_scan::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(initiate_scan::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(initiate_scan::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(initiate_scan::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(initiate_scan::Response::Accepted202),
status_code => Err(initiate_scan::Error::DefaultResponse { status_code }),
}
}
pub mod initiate_scan {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn export(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
scan_id: &str,
) -> std::result::Result<export::Response, export::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}/scans/{}/export" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , vulnerability_assessment_name , scan_id) ;
let mut url = url::Url::parse(url_str).map_err(export::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(export::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(export::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(export::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessmentScansExport =
serde_json::from_slice(rsp_body).map_err(|source| export::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(export::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessmentScansExport =
serde_json::from_slice(rsp_body).map_err(|source| export::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(export::Response::Created201(rsp_value))
}
status_code => Err(export::Error::DefaultResponse { status_code }),
}
}
pub mod export {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SqlPoolVulnerabilityAssessmentScansExport),
Created201(SqlPoolVulnerabilityAssessmentScansExport),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
scan_id: &str,
) -> std::result::Result<VulnerabilityAssessmentScanRecord, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}/scans/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , vulnerability_assessment_name , scan_id) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VulnerabilityAssessmentScanRecord =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_security_alert_policies {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<ListSqlPoolSecurityAlertPolicies, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/securityAlertPolicies",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ListSqlPoolSecurityAlertPolicies =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
security_alert_policy_name: &str,
) -> std::result::Result<SqlPoolSecurityAlertPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/securityAlertPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
security_alert_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolSecurityAlertPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
security_alert_policy_name: &str,
parameters: &SqlPoolSecurityAlertPolicy,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/securityAlertPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
security_alert_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolSecurityAlertPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolSecurityAlertPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(SqlPoolSecurityAlertPolicy),
Created201(SqlPoolSecurityAlertPolicy),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_vulnerability_assessment_rule_baselines {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
rule_id: &str,
baseline_name: &str,
) -> std::result::Result<SqlPoolVulnerabilityAssessmentRuleBaseline, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}/rules/{}/baselines/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , vulnerability_assessment_name , rule_id , baseline_name) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessmentRuleBaseline =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
rule_id: &str,
baseline_name: &str,
parameters: &SqlPoolVulnerabilityAssessmentRuleBaseline,
) -> std::result::Result<SqlPoolVulnerabilityAssessmentRuleBaseline, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}/rules/{}/baselines/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , vulnerability_assessment_name , rule_id , baseline_name) ;
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolVulnerabilityAssessmentRuleBaseline = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
vulnerability_assessment_name: &str,
rule_id: &str,
baseline_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/vulnerabilityAssessments/{}/rules/{}/baselines/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , vulnerability_assessment_name , rule_id , baseline_name) ;
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => Err(delete::Error::DefaultResponse { status_code }),
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod extended_sql_pool_blob_auditing_policies {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
blob_auditing_policy_name: &str,
) -> std::result::Result<ExtendedSqlPoolBlobAuditingPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/extendedAuditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExtendedSqlPoolBlobAuditingPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
blob_auditing_policy_name: &str,
parameters: &ExtendedSqlPoolBlobAuditingPolicy,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/extendedAuditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExtendedSqlPoolBlobAuditingPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ExtendedSqlPoolBlobAuditingPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(ExtendedSqlPoolBlobAuditingPolicy),
Created201(ExtendedSqlPoolBlobAuditingPolicy),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_sql_pool(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<ExtendedSqlPoolBlobAuditingPolicyListResult, list_by_sql_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/extendedAuditingSettings",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_sql_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_sql_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_sql_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_sql_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExtendedSqlPoolBlobAuditingPolicyListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_sql_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list_by_sql_pool::Error::DefaultResponse { status_code }),
}
}
pub mod list_by_sql_pool {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod data_masking_policies {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
data_masking_policy_name: &str,
) -> std::result::Result<DataMaskingPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/dataMaskingPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
data_masking_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DataMaskingPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
data_masking_policy_name: &str,
parameters: &DataMaskingPolicy,
) -> std::result::Result<DataMaskingPolicy, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/dataMaskingPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
data_masking_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DataMaskingPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod data_masking_rules {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
data_masking_policy_name: &str,
data_masking_rule_name: &str,
) -> std::result::Result<DataMaskingRule, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/dataMaskingPolicies/{}/rules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
data_masking_policy_name,
data_masking_rule_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DataMaskingRule =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
data_masking_policy_name: &str,
data_masking_rule_name: &str,
parameters: &DataMaskingRule,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/dataMaskingPolicies/{}/rules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
data_masking_policy_name,
data_masking_rule_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DataMaskingRule = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: DataMaskingRule = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(DataMaskingRule),
Created201(DataMaskingRule),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_sql_pool(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
data_masking_policy_name: &str,
) -> std::result::Result<DataMaskingRuleListResult, list_by_sql_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/dataMaskingPolicies/{}/rules",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
data_masking_policy_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_sql_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_sql_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_sql_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_sql_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DataMaskingRuleListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_sql_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_sql_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_sql_pool::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_sql_pool {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_columns {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
schema_name: &str,
table_name: &str,
column_name: &str,
) -> std::result::Result<SqlPoolColumn, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/schemas/{}/tables/{}/columns/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
column_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SqlPoolColumn =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_workload_group {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
workload_group_name: &str,
) -> std::result::Result<WorkloadGroup, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
workload_group_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadGroup =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
workload_group_name: &str,
parameters: &WorkloadGroup,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
workload_group_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadGroup = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: WorkloadGroup = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(WorkloadGroup),
Accepted202,
Created201(WorkloadGroup),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
workload_group_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name,
workload_group_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => Err(delete::Error::DefaultResponse { status_code }),
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<WorkloadGroupListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadGroupListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod sql_pool_workload_classifier {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
workload_group_name: &str,
workload_classifier_name: &str,
) -> std::result::Result<WorkloadClassifier, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups/{}/workloadClassifiers/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , workload_group_name , workload_classifier_name) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadClassifier =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
workload_group_name: &str,
workload_classifier_name: &str,
parameters: &WorkloadClassifier,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups/{}/workloadClassifiers/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , workload_group_name , workload_classifier_name) ;
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadClassifier = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: WorkloadClassifier = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(WorkloadClassifier),
Accepted202,
Created201(WorkloadClassifier),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
workload_group_name: &str,
workload_classifier_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups/{}/workloadClassifiers/{}" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , workload_group_name , workload_classifier_name) ;
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => Err(delete::Error::DefaultResponse { status_code }),
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
workload_group_name: &str,
) -> std::result::Result<WorkloadClassifierListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlPools/{}/workloadGroups/{}/workloadClassifiers" , operation_config . base_path () , subscription_id , resource_group_name , workspace_name , sql_pool_name , workload_group_name) ;
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadClassifierListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_sql_server_blob_auditing_policies {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
blob_auditing_policy_name: &str,
) -> std::result::Result<ServerBlobAuditingPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/auditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerBlobAuditingPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
blob_auditing_policy_name: &str,
parameters: &ServerBlobAuditingPolicy,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/auditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerBlobAuditingPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(ServerBlobAuditingPolicy),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<ServerBlobAuditingPolicyListResult, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/auditingSettings",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerBlobAuditingPolicyListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list_by_workspace::Error::DefaultResponse { status_code }),
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_sql_server_extended_blob_auditing_policies {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
blob_auditing_policy_name: &str,
) -> std::result::Result<ExtendedServerBlobAuditingPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/extendedAuditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExtendedServerBlobAuditingPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
blob_auditing_policy_name: &str,
parameters: &ExtendedServerBlobAuditingPolicy,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/extendedAuditingSettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
blob_auditing_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExtendedServerBlobAuditingPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(ExtendedServerBlobAuditingPolicy),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_workspace(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<ExtendedServerBlobAuditingPolicyListResult, list_by_workspace::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/extendedAuditingSettings",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_workspace::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_workspace::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_workspace::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_workspace::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExtendedServerBlobAuditingPolicyListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_workspace::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list_by_workspace::Error::DefaultResponse { status_code }),
}
}
pub mod list_by_workspace {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_sql_server_security_alert_policy {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
security_alert_policy_name: &str,
) -> std::result::Result<ServerSecurityAlertPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/securityAlertPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
security_alert_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerSecurityAlertPolicy =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
security_alert_policy_name: &str,
parameters: &ServerSecurityAlertPolicy,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/securityAlertPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
security_alert_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerSecurityAlertPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(ServerSecurityAlertPolicy),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<ServerSecurityAlertPolicyListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/securityAlertPolicies",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerSecurityAlertPolicyListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_sql_server_vulnerability_assessments {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
vulnerability_assessment_name: &str,
) -> std::result::Result<ServerVulnerabilityAssessment, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/vulnerabilityAssessments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
vulnerability_assessment_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerVulnerabilityAssessment =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
vulnerability_assessment_name: &str,
parameters: &ServerVulnerabilityAssessment,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/vulnerabilityAssessments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
vulnerability_assessment_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerVulnerabilityAssessment = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ServerVulnerabilityAssessment = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => Err(create_or_update::Error::DefaultResponse { status_code }),
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(ServerVulnerabilityAssessment),
Created201(ServerVulnerabilityAssessment),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
vulnerability_assessment_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/vulnerabilityAssessments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
vulnerability_assessment_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => Err(delete::Error::DefaultResponse { status_code }),
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<ServerVulnerabilityAssessmentListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/vulnerabilityAssessments",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerVulnerabilityAssessmentListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_sql_server_encryption_protector {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
encryption_protector_name: &str,
) -> std::result::Result<EncryptionProtector, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/encryptionProtector/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
encryption_protector_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: EncryptionProtector =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
encryption_protector_name: &str,
parameters: &EncryptionProtector,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/encryptionProtector/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
encryption_protector_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: EncryptionProtector = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(EncryptionProtector),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<EncryptionProtectorListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/encryptionProtector",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: EncryptionProtectorListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn revalidate(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
encryption_protector_name: &str,
) -> std::result::Result<revalidate::Response, revalidate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/encryptionProtector/{}/revalidate",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
encryption_protector_name
);
let mut url = url::Url::parse(url_str).map_err(revalidate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(revalidate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(revalidate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(revalidate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(revalidate::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(revalidate::Response::Accepted202),
status_code => Err(revalidate::Error::DefaultResponse { status_code }),
}
}
pub mod revalidate {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_sql_server_usages {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<ServerUsageListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/sqlUsages",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ServerUsageListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod workspace_managed_sql_server_recoverable_sql_pools {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<RecoverableSqlPoolListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/recoverableSqlPools",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RecoverableSqlPoolListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(list::Error::DefaultResponse { status_code }),
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
sql_pool_name: &str,
) -> std::result::Result<RecoverableSqlPool, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/recoverableSqlPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
sql_pool_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RecoverableSqlPool =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => Err(get::Error::DefaultResponse { status_code }),
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse { status_code: http::StatusCode },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod azure_ad_only_authentications {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
azure_ad_only_authentication_name: &str,
) -> std::result::Result<AzureAdOnlyAuthentication, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/azureADOnlyAuthentications/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
azure_ad_only_authentication_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AzureAdOnlyAuthentication =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
azure_ad_only_authentication_name: &str,
azure_ad_only_authentication_info: &AzureAdOnlyAuthentication,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/azureADOnlyAuthentications/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name,
azure_ad_only_authentication_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(azure_ad_only_authentication_info).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AzureAdOnlyAuthentication =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: AzureAdOnlyAuthentication =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(AzureAdOnlyAuthentication),
Created201(AzureAdOnlyAuthentication),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<AzureAdOnlyAuthenticationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Synapse/workspaces/{}/azureADOnlyAuthentications",
operation_config.base_path(),
subscription_id,
resource_group_name,
workspace_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AzureAdOnlyAuthenticationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
| 48.855512 | 366 | 0.594818 |
4805f9dc6f904c6e41318e002446e22879736bb0 | 2,556 | ///
/// Since Rust is a little different and having global variable is difficult
/// we need a place to have separate instances of players, with paddle images
/// and that counts their own score, and movement.
///
use ggez::event::{EventHandler, KeyCode};
use ggez::graphics::{self, Color, DrawParam, Mesh};
use ggez::{timer, Context, GameResult};
use ggez::input::{keyboard};
use ggez::nalgebra::{Point2};
use crate::{WINDOW_HEIGHT};
pub const PADDLE_SPEED: f32 = 400.0;
pub struct Player {
paddle: Mesh,
bbox: graphics::Rect,
pos: Point2<f32>,
score: u32,
// For now I am keeping the Keyboard Keys stored for (UP, DOWN)
// for each instance of player, as players can be multiple, and will
// use different keys to move up/down
keys: (KeyCode, KeyCode),
}
// GGez has some kind of internal code, that even when things are drawn on a smaller
// Canvas, the coordinate system it works with, is for the full-fledged Window size.
impl Player {
pub fn new(ctx: &mut Context, width: f32, height: f32, init_pos: Point2<f32>, keys: Option<(KeyCode, KeyCode)>) -> GameResult<Self> {
let dpi_factor = graphics::window(ctx).get_hidpi_factor() as f32;
let bbox = graphics::Rect::new(0.0, 0.0, width * dpi_factor, height * dpi_factor);
let paddle = Mesh::new_rectangle(
ctx,
graphics::DrawMode::Fill(graphics::FillOptions::default()),
bbox.clone(),
Color::from_rgb(255, 255, 255),
)?;
Ok(Player {
paddle,
bbox,
pos: init_pos,
score: 0,
keys: if let Some(_keys) = keys { _keys } else { (KeyCode::Up, KeyCode::Down) }
})
}
pub fn get_score_string(&self) -> String {
self.score.to_string()
}
pub fn update(&mut self, ctx: &mut Context, elapsed_time: f32) -> GameResult {
if keyboard::is_key_pressed(ctx, self.keys.0) {
// TODO(shub) update with using time delta instead of constant
let new_y = self.pos.y + -PADDLE_SPEED * elapsed_time;
self.pos = Point2::new(self.pos.x, new_y.max(0.0));
} else if keyboard::is_key_pressed(ctx, self.keys.1) {
let new_y = self.pos.y + PADDLE_SPEED * elapsed_time;
self.pos = Point2::new(self.pos.x, new_y.min(WINDOW_HEIGHT - self.bbox.h));
}
Ok(())
}
pub fn draw(&mut self, ctx: &mut Context) -> GameResult {
graphics::draw(ctx, &self.paddle, DrawParam::default().dest(self.pos))?;
Ok(())
}
}
| 36.514286 | 137 | 0.616197 |
9138b96ad1bb632fcc39add9b87979d7a34b0782 | 5,399 | use crate::Entity;
use std::any::{Any, TypeId};
use std::fmt::Debug;
/// Determines how the event propagates through the tree
#[derive(Debug, Clone, PartialEq)]
pub enum Propagation {
/// Events propagate down the tree to the target entity, e.g. from grand-parent to parent to child (target)
Down,
/// Events propagate up the tree to the target entity, e.g. from child (target) to parent to grand-parent
Up,
/// Events propagate down the tree to the target entity and then back up to the root
DownUp,
/// Events propagate from the target entity to all entities below but on the same branch
Fall,
/// Events propagate directly to the target entity and to no others
Direct,
/// Events propagate to all entities in the tree
All,
}
/// A message can be any static type.
pub trait Message: Any {
// An &Any can be cast to a reference to a concrete type.
fn as_any(&self) -> &dyn Any;
// Perform the test
// fn equals_a(&self, _: &dyn Message) -> bool;
}
// An Any is not normally clonable. This is a way around that.
// pub trait MessageClone {
// fn clone_message(&self) -> Box<Message>;
// }
// Implements MessageClone for any type that Implements Message and Clone
// impl<T> MessageClone for T
// where
// T: 'static + Message + Clone,
// {
// fn clone_message(&self) -> Box<Message> {
// Box::new(self.clone())
// }
// }
// An implementation of clone for boxed messages
// impl Clone for Box<Message> {
// fn clone(&self) -> Box<Message> {
// self.clone_message()
// }
// }
impl dyn Message {
// Check if a message is a certain type
pub fn is<T: Message>(&self) -> bool {
// Get TypeId of the type this function is instantiated with
let t = TypeId::of::<T>();
// Get TypeId of the type in the trait object
let concrete = self.type_id();
// Compare both TypeIds on equality
t == concrete
}
// Casts a message to the specified type if the message is of that type
pub fn downcast<T>(&mut self) -> Option<&mut T>
where
T: Message,
{
if self.is::<T>() {
unsafe { Some(&mut *(self as *mut dyn Message as *mut T)) }
} else {
None
}
}
}
// Implements message for any static type that implements Clone
impl<S: 'static> Message for S {
fn as_any(&self) -> &dyn Any {
self
}
// fn equals_a(&self, other: &dyn Message) -> bool {
// //other.as_any().type_id() == self.as_any().type_id()
// //println!("{:?} {:?}", other.as_any().type_id(), self.as_any().type_id());
// //println!("{:?} {:?}", other, self);
// other
// .as_any()
// .downcast_ref::<S>()
// .map_or(false, |a| self == a)
// }
}
/// An event is a wrapper around a message and provides metadata on how the event should be propagated through the tree
pub struct Event {
// The entity that produced the event. Entity::null() for OS events or unspecified.
pub origin: Entity,
// The entity the event should be sent to. Entity::null() to send to all entities.
pub target: Entity,
// How the event propagates through the tree.
pub propagation: Propagation,
// Whether the event can be consumed
pub consumable: bool,
// Determines whether the event should continue to be propagated
pub(crate) consumed: bool,
// Whether the event is unique (only the latest copy can exist in a queue at a time)
pub unique: bool,
// Specifies an order index which is used to sort the event queue
pub order: i32,
// The event message
pub message: Box<dyn Message>,
}
// // Allows events to be compared for equality
// impl PartialEq for Event {
// fn eq(&self, other: &Event) -> bool {
// self.message.equals_a(&*other.message)
// //&& self.origin == other.origin
// && self.target == other.target
// }
// }
impl Event {
/// Creates a new event with a specified message
pub fn new<M>(message: M) -> Self
where
M: Message,
{
Event {
origin: Entity::null(),
target: Entity::null(),
propagation: Propagation::Up,
consumable: true,
consumed: false,
unique: false,
order: 0,
message: Box::new(message),
}
}
/// Sets the target of the event
pub fn target(mut self, entity: Entity) -> Self {
self.target = entity;
self
}
/// Sets the origin of the event
pub fn origin(mut self, entity: Entity) -> Self {
self.origin = entity;
self
}
/// Specifies that the event is unique
/// (only one of this event type should exist in the event queue at once)
pub fn unique(mut self) -> Self {
self.unique = true;
self
}
/// Sets the propagation of the event
pub fn propagate(mut self, propagation: Propagation) -> Self {
self.propagation = propagation;
self
}
pub fn direct(mut self, entity: Entity) -> Self {
self.propagation = Propagation::Direct;
self.target = entity;
self
}
/// Consumes the event
/// (prevents the event from continuing on its propagation path)
pub fn consume(&mut self) {
self.consumed = true;
}
}
| 29.342391 | 119 | 0.594925 |
118bf52f38af3fde1d40a0bfb67f4cf81caacc94 | 223 | // As a quick test here, let's try dumping the generated bindings here...
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
include!(concat!(env!("OUT_DIR"), "/linux-bindgen.rs"));
| 37.166667 | 73 | 0.726457 |
1422dda276b9b0a78eba0956d6ac92543f909b3f | 300 | // Make sure that we're handling bound lifetimes correctly when validating trait
// bounds.
// run-pass
trait X<'a> {
type F: FnOnce(&i32) -> &'a i32;
}
fn f<T: for<'r> X<'r> + ?Sized>() {
None::<T::F>.map(|f| f(&0));
}
fn main() {
f::<dyn for<'x> X<'x, F = fn(&i32) -> &'x i32>>();
}
| 18.75 | 80 | 0.523333 |
bb284f78f285ae9719eb1d1c20a7e9dbffafe62b | 550 | // Function calls aren't allowed in static initialiers.
#[allow(dead_code)]
fn screen_area() -> usize {
640 * 480
}
const NBADGERS: usize = 8; // ok, 8 is a constant
static NBUNNIES: usize = NBADGERS * 50; // ok, constant expression
static NCARROTS: usize = screen_area() / 100; // error: function call in static
//~^ ERROR: calls in statics are limited to constant functions, struct and enum constructors
fn main() {
assert_eq!(NBADGERS, 8);
assert_eq!(NBUNNIES, 400);
assert_eq!(NCARROTS, 64 * 48);
}
| 30.555556 | 92 | 0.654545 |
4a481beffd2a2c06d540bbaa50b2a32790d8da3c | 795 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// aux-build:go_trait.rs
#![feature(specialization)]
extern crate go_trait;
use go_trait::{Go,GoMut};
use std::fmt::Debug;
use std::default::Default;
struct MyThingy;
impl Go for MyThingy {
fn go(&self, arg: isize) { }
}
impl GoMut for MyThingy {
fn go_mut(&mut self, arg: isize) { }
}
fn main() { }
| 23.382353 | 68 | 0.70566 |
16a7c6d076c73d488c372642010b69f1a84efb9c | 1,573 | use dotenv::dotenv;
use serenity::{client::bridge::gateway::GatewayIntents, prelude::*};
use std::env;
mod commands;
mod database;
mod modules;
use modules::event_handler::Handler;
#[tokio::main]
async fn main() {
dotenv().ok();
database::core::migrate_database();
let token = env::var("DISCORD_TOKEN").expect("Expected environment variable DISCORD_TOKEN");
// An application ID is required to register slash commands.
// It's usually your bot's client ID, which can be derived from your bot's token.
let application_id: u64 = if let Ok(application_id) = env::var("DISCORD_APPLICATION_ID") {
application_id
.parse()
.expect("DISCORD_APPLICATION_ID must be an integer")
} else {
let index = token
.find('.')
.expect("A proper bot token must consist of three parts separated by periods.");
let client_id = &token[..index];
let base64_config = base64::Config::new(base64::CharacterSet::UrlSafe, true);
let client_id = base64::decode_config(client_id, base64_config).unwrap();
std::str::from_utf8(&client_id)
.expect("Expected decoded token slice to be UTF-8.")
.parse()
.expect("Expected decoded token slice to be an integer.")
};
let mut client = Client::builder(token)
.event_handler(Handler)
.application_id(application_id)
.intents(GatewayIntents::empty())
.await
.expect("Error creating client");
client.start().await.expect("Error starting client");
}
| 34.955556 | 96 | 0.6459 |
48ea9cd4e44754817b086816a467abba7e652a80 | 1,124 | use sdl2::VideoSubsystem;
use sdl2::video::Window;
use super::graphics::graphics_context::GraphicsContext;
use crate::core::graphics::renderer::Renderer;
pub struct AppWindow
{
sdl_window: Window,
graphics_context: GraphicsContext
}
impl AppWindow
{
pub fn new(video: &VideoSubsystem,
args: AppWindowArgs) -> Self
{
let sdl_window = video
.window(&args.name[..], args.width, args.height)
.opengl()
.resizable()
.build()
.unwrap();
let graphics_context = GraphicsContext::new(&sdl_window,
video,
args.width as i32,
args.height as i32);
AppWindow{sdl_window, graphics_context}
}
pub fn update_view(&self, renderer: Box<&mut dyn Renderer>)
{
renderer.render(&self.graphics_context);
self.sdl_window.gl_swap_window();
}
}
pub struct AppWindowArgs
{
name: String,
width: u32,
height: u32
}
impl AppWindowArgs
{
pub fn new(name: String, width: u32, height:u32) -> Self
{
AppWindowArgs{name, width, height}
}
} | 22.48 | 65 | 0.605872 |
dd2ea59f787f1854199c36055f455630f722a26a | 8,169 | /// Calls a function and aborts if it panics.
///
/// This is useful in unsafe code where we can't recover from panics.
#[cfg(feature = "default")]
#[inline]
pub fn abort_on_panic<T>(f: impl FnOnce() -> T) -> T {
struct Bomb;
impl Drop for Bomb {
fn drop(&mut self) {
std::process::abort();
}
}
let bomb = Bomb;
let t = f();
std::mem::forget(bomb);
t
}
/// Generates a random number in `0..n`.
#[cfg(any(feature = "unstable", feature = "default"))]
pub fn random(n: u32) -> u32 {
use std::cell::Cell;
use std::num::Wrapping;
thread_local! {
static RNG: Cell<Wrapping<u32>> = {
// Take the address of a local value as seed.
let mut x = 0i32;
let r = &mut x;
let addr = r as *mut i32 as usize;
Cell::new(Wrapping(addr as u32))
}
}
RNG.with(|rng| {
// This is the 32-bit variant of Xorshift.
//
// Source: https://en.wikipedia.org/wiki/Xorshift
let mut x = rng.get();
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
rng.set(x);
// This is a fast alternative to `x % n`.
//
// Author: Daniel Lemire
// Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
((u64::from(x.0)).wrapping_mul(u64::from(n)) >> 32) as u32
})
}
/// Add additional context to errors
pub(crate) trait Context {
fn context(self, message: impl Fn() -> String) -> Self;
}
/// Defers evaluation of a block of code until the end of the scope.
#[cfg(feature = "default")]
#[doc(hidden)]
macro_rules! defer {
($($body:tt)*) => {
let _guard = {
pub struct Guard<F: FnOnce()>(Option<F>);
impl<F: FnOnce()> Drop for Guard<F> {
fn drop(&mut self) {
(self.0).take().map(|f| f());
}
}
Guard(Some(|| {
let _ = { $($body)* };
}))
};
};
}
/// Declares unstable items.
#[doc(hidden)]
macro_rules! cfg_unstable {
($($item:item)*) => {
$(
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
$item
)*
}
}
/// Declares unstable and default items.
#[doc(hidden)]
macro_rules! cfg_unstable_default {
($($item:item)*) => {
$(
#[cfg(all(feature = "default", feature = "unstable"))]
#[cfg_attr(feature = "docs", doc(unstable))]
$item
)*
}
}
/// Declares Unix-specific items.
#[doc(hidden)]
macro_rules! cfg_unix {
($($item:item)*) => {
$(
#[cfg(any(unix, feature = "docs"))]
#[cfg_attr(feature = "docs", doc(cfg(unix)))]
$item
)*
}
}
/// Declares Windows-specific items.
#[doc(hidden)]
macro_rules! cfg_windows {
($($item:item)*) => {
$(
#[cfg(any(windows, feature = "docs"))]
#[cfg_attr(feature = "docs", doc(cfg(windows)))]
$item
)*
}
}
/// Declares items when the "docs" feature is enabled.
#[doc(hidden)]
macro_rules! cfg_docs {
($($item:item)*) => {
$(
#[cfg(feature = "docs")]
$item
)*
}
}
/// Declares items when the "docs" feature is disabled.
#[doc(hidden)]
macro_rules! cfg_not_docs {
($($item:item)*) => {
$(
#[cfg(not(feature = "docs"))]
$item
)*
}
}
/// Declares std items.
#[allow(unused_macros)]
#[doc(hidden)]
macro_rules! cfg_std {
($($item:item)*) => {
$(
#[cfg(feature = "std")]
$item
)*
}
}
/// Declares default items.
#[allow(unused_macros)]
#[doc(hidden)]
macro_rules! cfg_default {
($($item:item)*) => {
$(
#[cfg(feature = "default")]
$item
)*
}
}
/// Defines an extension trait for a base trait.
///
/// In generated docs, the base trait will contain methods from the extension trait. In actual
/// code, the base trait will be re-exported and the extension trait will be hidden. We then
/// re-export the extension trait from the prelude.
///
/// Inside invocations of this macro, we write a definitions that looks similar to the final
/// rendered docs, and the macro then generates all the boilerplate for us.
#[doc(hidden)]
macro_rules! extension_trait {
(
// Interesting patterns:
// - `$name`: trait name that gets rendered in the docs
// - `$ext`: name of the hidden extension trait
// - `$base`: base trait
#[doc = $doc:tt]
pub trait $name:ident {
$($body_base:tt)*
}
#[doc = $doc_ext:tt]
pub trait $ext:ident: $base:path {
$($body_ext:tt)*
}
// Shim trait impls that only appear in docs.
$($imp:item)*
) => {
// A fake `impl Future` type that doesn't borrow.
#[allow(dead_code)]
mod owned {
#[doc(hidden)]
pub struct ImplFuture<T>(std::marker::PhantomData<T>);
}
// A fake `impl Future` type that borrows its environment.
#[allow(dead_code)]
mod borrowed {
#[doc(hidden)]
pub struct ImplFuture<'a, T>(std::marker::PhantomData<&'a T>);
}
// Render a fake trait combining the bodies of the base trait and the extension trait.
#[cfg(feature = "docs")]
#[doc = $doc]
pub trait $name {
extension_trait!(@doc () $($body_base)* $($body_ext)*);
}
// When not rendering docs, re-export the base trait from the futures crate.
#[cfg(not(feature = "docs"))]
pub use $base as $name;
// The extension trait that adds methods to any type implementing the base trait.
#[doc = $doc_ext]
pub trait $ext: $name {
extension_trait!(@ext () $($body_ext)*);
}
// Blanket implementation of the extension trait for any type implementing the base trait.
impl<T: $name + ?Sized> $ext for T {}
// Shim trait impls that only appear in docs.
$(#[cfg(feature = "docs")] $imp)*
};
// Optimization: expand `$head` eagerly before starting a new method definition.
(@ext ($($head:tt)*) #[doc = $d:literal] $($tail:tt)*) => {
$($head)* extension_trait!(@ext (#[doc = $d]) $($tail)*);
};
// Parse the return type in an extension method.
(@doc ($($head:tt)*) -> impl Future<Output = $out:ty> $(+ $lt:lifetime)? [$f:ty] $($tail:tt)*) => {
extension_trait!(@doc ($($head)* -> owned::ImplFuture<$out>) $($tail)*);
};
(@ext ($($head:tt)*) -> impl Future<Output = $out:ty> $(+ $lt:lifetime)? [$f:ty] $($tail:tt)*) => {
extension_trait!(@ext ($($head)* -> $f) $($tail)*);
};
// Parse the return type in an extension method.
(@doc ($($head:tt)*) -> impl Future<Output = $out:ty> + $lt:lifetime [$f:ty] $($tail:tt)*) => {
extension_trait!(@doc ($($head)* -> borrowed::ImplFuture<$lt, $out>) $($tail)*);
};
(@ext ($($head:tt)*) -> impl Future<Output = $out:ty> + $lt:lifetime [$f:ty] $($tail:tt)*) => {
extension_trait!(@ext ($($head)* -> $f) $($tail)*);
};
// Parse a token.
(@doc ($($head:tt)*) $token:tt $($tail:tt)*) => {
extension_trait!(@doc ($($head)* $token) $($tail)*);
};
(@ext ($($head:tt)*) $token:tt $($tail:tt)*) => {
extension_trait!(@ext ($($head)* $token) $($tail)*);
};
// Handle the end of the token list.
(@doc ($($head:tt)*)) => { $($head)* };
(@ext ($($head:tt)*)) => { $($head)* };
// Parse imports at the beginning of the macro.
($import:item $($tail:tt)*) => {
#[cfg(feature = "docs")]
$import
extension_trait!($($tail)*);
};
}
| 28.968085 | 104 | 0.492716 |
09bd1c8492f919d8b4d6cf37df772ac1a3fc8892 | 202 | fn main() -> i32 {
//~^ ERROR `main` has invalid return type `i32`
//~| NOTE `main` can only return types that implement `std::process::Termination`
//~| HELP consider using `()`, or a `Result`
0
}
| 28.857143 | 81 | 0.633663 |
098aeb95b9e445349ef296c8df94b97c16981303 | 701 | use std::env;
use tools::{teprintln, tprintln, CompatibleDB, DB};
fn main() {
let args = env::args().collect::<Vec<String>>();
if args.len() < 2 {
teprintln!("Expected bano csv file");
return;
}
let mut db = DB::new("addresses.db", 10000, true).expect("failed to create DB");
bano::import_addresses(&args[1], &mut db);
tprintln!(
"Got {} addresses in {} cities (and {} errors)",
db.get_nb_addresses(),
db.get_nb_cities(),
db.get_nb_errors(),
);
teprintln!("Errors by categories:");
let rows = db.get_nb_by_errors_kind();
for (kind, nb) in rows {
teprintln!(" {} => {} occurences", kind, nb);
}
}
| 25.962963 | 84 | 0.563481 |
503509544b95eac4ca6650ad9f30401b20694839 | 629 | use std::ops::Sub;
use num_traits::{CheckedAdd, One, Zero};
use num_traits::cast::{FromPrimitive, AsPrimitive};
use std::hash::Hash;
use std::fmt::{Debug, Display};
/// Trait for exponents in polynomials.
pub trait Exponent
: Hash
+ Zero
+ Debug
+ Display
+ One
+ FromPrimitive
+ AsPrimitive<u32>
+ CheckedAdd
+ Sub<Output = Self>
+ Ord
+ Clone {
}
impl<
T: Hash
+ Zero
+ Debug
+ Display
+ One
+ FromPrimitive
+ AsPrimitive<u32>
+ CheckedAdd
+ Sub<Output = Self>
+ Ord
+ Clone,
> Exponent for T
{
}
| 17 | 51 | 0.54531 |
edd0d73bdb86dbc9e31c6986ed1404ac7b83ce40 | 4,792 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::rate_limiter::RateLimiterMonotonic,
anyhow::{Context as _, Error},
fidl_fuchsia_update_channel::{ProviderRequest, ProviderRequestStream},
fuchsia_syslog::fx_log_warn,
fuchsia_zircon as zx,
futures::prelude::*,
serde::{Deserialize, Serialize},
std::{fs::File, io, path::PathBuf},
};
pub(crate) struct ProviderHandler {
misc_info_dir: PathBuf,
warn_rate_limiter: RateLimiterMonotonic,
}
impl Default for ProviderHandler {
fn default() -> Self {
Self {
misc_info_dir: "/misc/ota".into(),
warn_rate_limiter: RateLimiterMonotonic::from_delay(GET_CURRENT_WARN_DELAY),
}
}
}
const GET_CURRENT_WARN_DELAY: zx::Duration = zx::Duration::from_minutes(30);
impl ProviderHandler {
pub(crate) async fn handle_request_stream(
&self,
mut stream: ProviderRequestStream,
) -> Result<(), Error> {
while let Some(request) =
stream.try_next().await.context("extracting request from stream")?
{
match request {
ProviderRequest::GetCurrent { responder } => {
let channel = self.get_current().unwrap_or_else(|err| {
self.warn_rate_limiter.rate_limit(|| {
fx_log_warn!("error getting current channel: {}", err);
});
"".into()
});
responder.send(&channel).context("sending GetCurrent response")?;
}
}
}
Ok(())
}
fn get_current(&self) -> Result<String, Error> {
// TODO: use async IO instead of sync IO once async IO is easy.
let file = File::open(self.misc_info_dir.join("current_channel.json"))
.context("opening current_channel.json")?;
let contents: ChannelProviderContents = serde_json::from_reader(io::BufReader::new(file))
.context("reading current_channel.json")?;
let ChannelProviderContents::Version1(info) = contents;
Ok(info.legacy_amber_source_name.unwrap_or_else(|| "".into()))
}
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(tag = "version", content = "content", deny_unknown_fields)]
enum ChannelProviderContents {
#[serde(rename = "1")]
Version1(ChannelProviderV1),
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
struct ChannelProviderV1 {
legacy_amber_source_name: Option<String>,
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints::create_proxy_and_stream,
fidl_fuchsia_update_channel::{ProviderMarker, ProviderProxy},
fuchsia_async as fasync,
std::fs,
tempfile::TempDir,
};
fn spawn_info_handler(info_dir: &TempDir) -> ProviderProxy {
let info_handler = ProviderHandler {
misc_info_dir: info_dir.path().into(),
warn_rate_limiter: RateLimiterMonotonic::from_delay(GET_CURRENT_WARN_DELAY),
};
let (proxy, stream) =
create_proxy_and_stream::<ProviderMarker>().expect("create_proxy_and_stream");
fasync::spawn(async move { info_handler.handle_request_stream(stream).map(|_| ()).await });
proxy
}
#[fasync::run_singlethreaded(test)]
async fn test_fidl_get_channel_works() {
let tempdir = TempDir::new().expect("create tempdir");
fs::write(
tempdir.path().join("current_channel.json"),
r#"{"version":"1","content":{"legacy_amber_source_name":"example"}}"#,
)
.expect("write current_channel.json");
let proxy = spawn_info_handler(&tempdir);
let res = proxy.get_current().await;
assert_eq!(res.map_err(|e| e.to_string()), Ok("example".into()));
}
#[fasync::run_singlethreaded(test)]
async fn test_fidl_get_channel_handles_missing_file() {
let tempdir = TempDir::new().expect("create tempdir");
let proxy = spawn_info_handler(&tempdir);
let res = proxy.get_current().await;
assert_eq!(res.map_err(|e| e.to_string()), Ok("".into()));
}
#[fasync::run_singlethreaded(test)]
async fn test_fidl_get_channel_handles_unexpected_contents() {
let tempdir = TempDir::new().expect("create tempdir");
let proxy = spawn_info_handler(&tempdir);
fs::write(tempdir.path().join("current_channel.json"), r#"{"version":"1","content":{}}"#)
.expect("write current_channel.json");
let res = proxy.get_current().await;
assert_eq!(res.map_err(|e| e.to_string()), Ok("".into()));
}
}
| 35.235294 | 99 | 0.622496 |
9087dec4469403a84ac6a99ed4653f96798eb0ab | 5,898 | use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
pub mod file;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Idl {
pub version: String,
pub name: String,
pub instructions: Vec<IdlInstruction>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub state: Option<IdlState>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub accounts: Vec<IdlTypeDefinition>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub types: Vec<IdlTypeDefinition>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub events: Option<Vec<IdlEvent>>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub errors: Option<Vec<IdlErrorCode>>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub metadata: Option<JsonValue>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlState {
#[serde(rename = "struct")]
pub strct: IdlTypeDefinition,
pub methods: Vec<IdlInstruction>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlInstruction {
pub name: String,
pub accounts: Vec<IdlAccountItem>,
pub args: Vec<IdlField>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct IdlAccounts {
pub name: String,
pub accounts: Vec<IdlAccountItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(untagged)]
pub enum IdlAccountItem {
IdlAccount(IdlAccount),
IdlAccounts(IdlAccounts),
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct IdlAccount {
pub name: String,
pub is_mut: bool,
pub is_signer: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlField {
pub name: String,
#[serde(rename = "type")]
pub ty: IdlType,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlEvent {
pub name: String,
pub fields: Vec<IdlEventField>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlEventField {
pub name: String,
#[serde(rename = "type")]
pub ty: IdlType,
pub index: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlTypeDefinition {
pub name: String,
#[serde(rename = "type")]
pub ty: IdlTypeDefinitionTy,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase", tag = "kind")]
pub enum IdlTypeDefinitionTy {
Struct { fields: Vec<IdlField> },
Enum { variants: Vec<IdlEnumVariant> },
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlEnumVariant {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub fields: Option<EnumFields>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(untagged)]
pub enum EnumFields {
Named(Vec<IdlField>),
Tuple(Vec<IdlType>),
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum IdlType {
Bool,
U8,
I8,
U16,
I16,
U32,
I32,
U64,
I64,
U128,
I128,
Bytes,
String,
PublicKey,
Defined(String),
Option(Box<IdlType>),
Vec(Box<IdlType>),
Array(Box<IdlType>, usize),
}
impl std::str::FromStr for IdlType {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut s = s.to_string();
s.retain(|c| !c.is_whitespace());
let r = match s.as_str() {
"bool" => IdlType::Bool,
"u8" => IdlType::U8,
"i8" => IdlType::I8,
"u16" => IdlType::U16,
"i16" => IdlType::I16,
"u32" => IdlType::U32,
"i32" => IdlType::I32,
"u64" => IdlType::U64,
"i64" => IdlType::I64,
"u128" => IdlType::U128,
"i128" => IdlType::I128,
"Vec<u8>" => IdlType::Bytes,
"String" => IdlType::String,
"Pubkey" => IdlType::PublicKey,
_ => match s.to_string().strip_prefix("Option<") {
None => match s.to_string().strip_prefix("Vec<") {
None => match s.to_string().strip_prefix('[') {
None => IdlType::Defined(s.to_string()),
Some(inner) => {
let inner = &inner[..inner.len() - 1];
let mut parts = inner.split(';');
let ty = IdlType::from_str(parts.next().unwrap()).unwrap();
let len = parts.next().unwrap().parse::<usize>().unwrap();
assert!(parts.next().is_none());
IdlType::Array(Box::new(ty), len)
}
},
Some(inner) => {
let inner_ty = Self::from_str(
inner
.strip_suffix('>')
.ok_or_else(|| anyhow::anyhow!("Invalid option"))?,
)?;
IdlType::Vec(Box::new(inner_ty))
}
},
Some(inner) => {
let inner_ty = Self::from_str(
inner
.strip_suffix('>')
.ok_or_else(|| anyhow::anyhow!("Invalid option"))?,
)?;
IdlType::Option(Box::new(inner_ty))
}
},
};
Ok(r)
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct IdlErrorCode {
pub code: u32,
pub name: String,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub msg: Option<String>,
}
| 29.939086 | 87 | 0.561038 |
165dde64dbb336aac5c10c325cb93ea867ea930c | 8,782 | // Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::pool::AccountSeqNumberClient;
use crate::TxStatus;
use anyhow::Result;
use crypto::keygen::KeyGen;
use network_api::messages::{PeerTransactionsMessage, TransactionsMessage};
use network_api::PeerId;
use parking_lot::RwLock;
use starcoin_config::NodeConfig;
use starcoin_executor::{
create_signed_txn_with_association_account, encode_transfer_script_function,
DEFAULT_EXPIRATION_TIME, DEFAULT_MAX_GAS_AMOUNT,
};
use starcoin_open_block::OpenedBlock;
use starcoin_state_api::ChainStateWriter;
use starcoin_statedb::ChainStateDB;
use starcoin_txpool_api::{TxPoolSyncService, TxnStatusFullEvent};
use std::time::Duration;
use std::{collections::HashMap, sync::Arc};
use stest::actix_export::time::delay_for;
use storage::BlockStore;
use types::{
account_address::{self, AccountAddress},
account_config,
transaction::{SignedUserTransaction, Transaction, TransactionPayload},
U256,
};
#[derive(Clone, Debug)]
struct MockNonceClient {
cache: Arc<RwLock<HashMap<AccountAddress, u64>>>,
}
impl Default for MockNonceClient {
fn default() -> Self {
Self {
cache: Arc::new(RwLock::new(HashMap::new())),
}
}
}
impl AccountSeqNumberClient for MockNonceClient {
fn account_seq_number(&self, address: &AccountAddress) -> u64 {
let cached = self.cache.read().get(address).cloned();
match cached {
Some(v) => v,
None => {
self.cache.write().insert(*address, 0);
0
}
}
}
}
#[stest::test]
async fn test_txn_expire() -> Result<()> {
let (txpool_service, _storage, config, _, _) = test_helper::start_txpool().await;
let txn = generate_txn(config, 0);
txpool_service.add_txns(vec![txn]).pop().unwrap()?;
let pendings = txpool_service.get_pending_txns(None, Some(0));
assert_eq!(pendings.len(), 1);
let pendings = txpool_service.get_pending_txns(None, Some(2));
assert_eq!(pendings.len(), 0);
Ok(())
}
#[stest::test]
async fn test_tx_pool() -> Result<()> {
let (txpool_service, _storage, config, _, _) = test_helper::start_txpool().await;
let (_private_key, public_key) = KeyGen::from_os_rng().generate_keypair();
let account_address = account_address::from_public_key(&public_key);
let txn = starcoin_executor::build_transfer_from_association(
account_address,
0,
10000,
1,
config.net(),
);
let txn = txn.as_signed_user_txn()?.clone();
let txn_hash = txn.id();
let mut result = txpool_service.add_txns(vec![txn]);
assert!(result.pop().unwrap().is_ok());
let mut pending_txns = txpool_service.get_pending_txns(Some(10), Some(0));
assert_eq!(pending_txns.pop().unwrap().id(), txn_hash);
let next_sequence_number =
txpool_service.next_sequence_number(account_config::association_address());
assert_eq!(next_sequence_number, Some(1));
Ok(())
}
#[stest::test]
async fn test_subscribe_txns() {
let (pool, ..) = test_helper::start_txpool().await;
let _ = pool.subscribe_txns();
}
#[stest::test(timeout = 200)]
async fn test_pool_pending() -> Result<()> {
let count = 5;
let (txpool_service, _storage, node_config, _, _) =
test_helper::start_txpool_with_size(count).await;
let mut txn_vec = vec![];
let mut index = 0;
loop {
txn_vec.push(generate_txn(node_config.clone(), index));
index += 1;
if index > count * 2 {
break;
}
}
let _ = txpool_service.add_txns(txn_vec.clone());
delay_for(Duration::from_millis(200)).await;
txn_vec.clear();
loop {
txn_vec.push(generate_txn(node_config.clone(), index));
index += 1;
if index > count * 4 {
break;
}
}
let _ = txpool_service.add_txns(txn_vec.clone());
let pending = txpool_service.get_pending_txns(Some(count), None);
assert!(!pending.is_empty());
delay_for(Duration::from_millis(200)).await;
Ok(())
}
#[stest::test]
async fn test_rollback() -> Result<()> {
let (pool, storage, config, _, _) = test_helper::start_txpool().await;
let start_timestamp = 0;
let retracted_txn = {
let (_private_key, public_key) = KeyGen::from_os_rng().generate_keypair();
let account_address = account_address::from_public_key(&public_key);
let txn = starcoin_executor::build_transfer_from_association(
account_address,
0,
10000,
start_timestamp + DEFAULT_EXPIRATION_TIME,
config.net(),
);
txn.as_signed_user_txn()?.clone()
};
let _ = pool.add_txns(vec![retracted_txn.clone()]);
let enacted_txn = {
let (_private_key, public_key) = KeyGen::from_os_rng().generate_keypair();
let account_address = account_address::from_public_key(&public_key);
let txn = starcoin_executor::build_transfer_from_association(
account_address,
0,
20000,
start_timestamp + DEFAULT_EXPIRATION_TIME,
config.net(),
);
txn.as_signed_user_txn()?.clone()
};
let pack_txn_to_block = |txn: SignedUserTransaction| {
let (_private_key, public_key) = KeyGen::from_os_rng().generate_keypair();
let account_address = account_address::from_public_key(&public_key);
let storage = storage.clone();
let main = storage.get_startup_info()?.unwrap().main;
let block_header = storage.get_block_header_by_hash(main)?.unwrap();
let mut open_block = OpenedBlock::new(
storage,
block_header,
u64::MAX,
account_address,
(start_timestamp + 60 * 10) * 1000,
vec![],
U256::from(1024u64),
config.net().genesis_config().consensus(),
)?;
let excluded_txns = open_block.push_txns(vec![txn])?;
assert_eq!(excluded_txns.discarded_txns.len(), 0);
assert_eq!(excluded_txns.untouched_txns.len(), 0);
let block_template = open_block.finalize()?;
let block = block_template.into_block(0, types::block::BlockHeaderExtra::new([0u8; 4]));
Ok::<_, anyhow::Error>(block)
};
let retracted_block = pack_txn_to_block(retracted_txn)?;
let enacted_block = pack_txn_to_block(enacted_txn)?;
// flush the state, to make txpool happy
{
let main = storage.get_startup_info()?.unwrap().main;
let block_header = storage.get_block_header_by_hash(main)?.unwrap();
let chain_state = ChainStateDB::new(storage.clone(), Some(block_header.state_root()));
let mut txns: Vec<_> = enacted_block
.transactions()
.iter()
.map(|t| Transaction::UserTransaction(t.clone()))
.collect();
let parent_block_header = storage
.get_block_header_by_hash(enacted_block.header().parent_hash())
.unwrap()
.unwrap();
txns.insert(
0,
Transaction::BlockMetadata(enacted_block.to_metadata(parent_block_header.gas_used())),
);
let root = starcoin_executor::block_execute(&chain_state, txns, u64::MAX)?.state_root;
assert_eq!(root, enacted_block.header().state_root());
chain_state.flush()?;
}
pool.chain_new_block(vec![enacted_block], vec![retracted_block])
.unwrap();
let txns = pool.get_pending_txns(Some(100), Some(start_timestamp + 60 * 10));
assert_eq!(txns.len(), 0);
Ok(())
}
#[stest::test(timeout = 480)]
async fn test_txpool_actor_service() {
let (_txpool_service, _storage, config, tx_pool_actor, _registry) =
test_helper::start_txpool().await;
let txn = generate_txn(config, 0);
tx_pool_actor
.notify(PeerTransactionsMessage::new(
PeerId::random(),
TransactionsMessage::new(vec![txn.clone()]),
))
.unwrap();
delay_for(Duration::from_millis(200)).await;
tx_pool_actor
.notify(Into::<TxnStatusFullEvent>::into(vec![(
txn.id(),
TxStatus::Added,
)]))
.unwrap();
delay_for(Duration::from_millis(300)).await;
}
fn generate_txn(config: Arc<NodeConfig>, seq: u64) -> SignedUserTransaction {
let (_private_key, public_key) = KeyGen::from_os_rng().generate_keypair();
let account_address = account_address::from_public_key(&public_key);
let txn = create_signed_txn_with_association_account(
TransactionPayload::ScriptFunction(encode_transfer_script_function(account_address, 10000)),
seq,
DEFAULT_MAX_GAS_AMOUNT,
1,
2,
config.net(),
);
txn
}
| 33.64751 | 100 | 0.64097 |
2306c9c93ec1f7254695257f999f67ed5f022c13 | 5,126 | use bytes::buf::BufMut;
use pin_project_lite::pin_project;
use std::{
io::Result,
pin::Pin,
task::{Context, Poll},
};
use tokio::io::{
AsyncBufRead as AsyncBufRead1, AsyncRead as AsyncRead1, AsyncWrite as AsyncWrite1, ReadBuf,
};
use tokio_02::{
io::{AsyncBufRead as AsyncBufRead02, AsyncRead as AsyncRead02, AsyncWrite as AsyncWrite02},
runtime::Handle,
};
use tokio_stream::Stream;
pin_project! {
/// `IoCompat` allows conversion between the 0.2 and 1.0 IO traits.
///
/// By wrapping any Tokio IO type in this compatibility wrapper, it becomes usable
/// with the traits of the other version of Tokio.
pub struct IoCompat<T> {
#[pin]
inner: T,
handle: Handle,
}
}
impl<T> IoCompat<T> {
pub fn new(inner: T) -> Self {
Self {
inner,
handle: crate::get_handle(),
}
}
}
impl<T: AsyncRead02> AsyncRead1 for IoCompat<T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<()>> {
let me = self.project();
let handle = me.handle;
let inner = me.inner;
let unfilled = buf.initialize_unfilled();
let poll = handle.enter(|| inner.poll_read(cx, unfilled));
if let Poll::Ready(Ok(num)) = &poll {
buf.advance(*num);
}
poll.map_ok(|_| ())
}
}
impl<T: AsyncRead1> AsyncRead02 for IoCompat<T> {
fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll<Result<usize>> {
let mut read_buf = ReadBuf::new(buf);
match self.project().inner.poll_read(cx, &mut read_buf) {
Poll::Ready(Ok(())) => Poll::Ready(Ok(read_buf.filled().len())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => Poll::Pending,
}
}
fn poll_read_buf<B: BufMut>(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut B,
) -> Poll<Result<usize>>
where
Self: Sized,
{
let slice = buf.bytes_mut();
let ptr = slice.as_ptr() as *const u8;
let mut read_buf = ReadBuf::uninit(slice);
match self.project().inner.poll_read(cx, &mut read_buf) {
Poll::Ready(Ok(())) => {
assert!(std::ptr::eq(ptr, read_buf.filled().as_ptr()));
let len = read_buf.filled().len();
unsafe {
buf.advance_mut(len);
}
Poll::Ready(Ok(len))
}
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => Poll::Pending,
}
}
}
impl<T: AsyncWrite02> AsyncWrite1 for IoCompat<T> {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> {
let me = self.project();
let handle = me.handle;
let inner = me.inner;
handle.enter(|| inner.poll_write(cx, buf))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
let me = self.project();
let handle = me.handle;
let inner = me.inner;
handle.enter(|| inner.poll_flush(cx))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
let me = self.project();
let handle = me.handle;
let inner = me.inner;
handle.enter(|| inner.poll_shutdown(cx))
}
}
impl<T: AsyncWrite1> AsyncWrite02 for IoCompat<T> {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> {
self.project().inner.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.project().inner.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.project().inner.poll_shutdown(cx)
}
}
impl<T: AsyncBufRead02> AsyncBufRead1 for IoCompat<T> {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> {
let me = self.project();
let handle = me.handle;
let inner = me.inner;
handle.enter(|| inner.poll_fill_buf(cx))
}
fn consume(self: Pin<&mut Self>, amt: usize) {
let me = self.project();
let handle = me.handle;
let inner = me.inner;
handle.enter(|| inner.consume(amt))
}
}
impl<T: AsyncBufRead1> AsyncBufRead02 for IoCompat<T> {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> {
self.project().inner.poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.project().inner.consume(amt)
}
}
impl<T: Stream> Stream for IoCompat<T> {
type Item = T::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> {
let me = self.project();
let handle = me.handle;
let inner = me.inner;
handle.enter(|| inner.poll_next(cx))
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
| 29.45977 | 98 | 0.557745 |
d6f4ffcd3718769d5dde9df0462c2d10e9f44a70 | 406 | // Copyright Kani Contributors
// SPDX-License-Identifier: Apache-2.0 OR MIT
// kani-verify-fail
#![feature(core_intrinsics)]
use std::intrinsics;
// The code below attempts to zero-initialize type `&i32`, causing the intrinsic
// `assert_zero_valid` to generate a panic during compilation.
#[kani::proof]
fn main() {
let _var: () = unsafe {
intrinsics::assert_zero_valid::<&i32>();
};
}
| 25.375 | 80 | 0.692118 |
fc613409194062a9d98aa99ad042bfda2500d7df | 3,099 | // Copyright 2018-2021 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use crate::actix_web::HttpResponse;
#[cfg(feature = "authorization")]
use crate::biome::profile::rest_api::BIOME_PROFILE_READ_PERMISSION;
use crate::biome::profile::store::{UserProfileStore, UserProfileStoreError};
use crate::futures::IntoFuture;
use crate::rest_api::{
ErrorResponse, HandlerFunction, Method, ProtocolVersionRangeGuard, Resource,
SPLINTER_PROTOCOL_VERSION,
};
const BIOME_FETCH_PROFILES_PROTOCOL_MIN: u32 = 1;
pub fn make_profiles_routes(profile_store: Arc<dyn UserProfileStore>) -> Resource {
let resource =
Resource::build("/biome/profiles/{id}").add_request_guard(ProtocolVersionRangeGuard::new(
BIOME_FETCH_PROFILES_PROTOCOL_MIN,
SPLINTER_PROTOCOL_VERSION,
));
#[cfg(feature = "authorization")]
{
resource.add_method(
Method::Get,
BIOME_PROFILE_READ_PERMISSION,
add_fetch_profile_method(profile_store.clone()),
)
}
#[cfg(not(feature = "authorization"))]
{
resource.add_method(Method::Get, add_fetch_profile_method(profile_store.clone()))
}
}
fn add_fetch_profile_method(profile_store: Arc<dyn UserProfileStore>) -> HandlerFunction {
Box::new(move |request, _| {
let profile_store = profile_store.clone();
let user_id = if let Some(t) = request.match_info().get("id") {
t.to_string()
} else {
return Box::new(
HttpResponse::BadRequest()
.json(ErrorResponse::bad_request(
&"Failed to process request: no user id".to_string(),
))
.into_future(),
);
};
Box::new(match profile_store.get_profile(&user_id) {
Ok(profile) => HttpResponse::Ok().json(profile).into_future(),
Err(err) => {
debug!("Failed to get profile from the database {}", err);
match err {
UserProfileStoreError::InvalidArgument(_) => HttpResponse::NotFound()
.json(ErrorResponse::not_found(&format!(
"User ID not found: {}",
&user_id
)))
.into_future(),
_ => HttpResponse::InternalServerError()
.json(ErrorResponse::internal_error())
.into_future(),
}
}
})
})
}
| 37.792683 | 97 | 0.60342 |
f560889218421b7c52cb7c6716ccbbe6034791fb | 124 | use crate::fetch_info;
use warp::reply::{json, Json};
pub fn invoke() -> Json {
let info = fetch_info();
json(&info)
}
| 15.5 | 30 | 0.629032 |
08ab9793182ca74685ffc2a76268713a685f5c7c | 9,577 | extern crate diff;
extern crate lalrpop_util;
use std::cell::RefCell;
use lalrpop_util::ParseError;
use util::tok::Tok;
/// demonstration from the Greene text; one of the simplest grammars
/// that still ensures we get parse tree correct
mod sub;
/// test something other than test-all
mod sub_ascent;
mod sub_table;
/// more interesting demonstration of parsing full expressions
mod expr;
/// more interesting demonstration of parsing full expressions, using LALR not LR
mod expr_lalr;
/// more interesting demonstration of parsing full expressions, using intern tok
mod expr_intern_tok;
/// test that passes in lifetime/type/formal parameters and threads
/// them through, building an AST from the result
mod expr_arena;
/// definitions of the AST
mod expr_arena_ast;
/// expr defined with a generic type `F`
mod expr_generic;
mod generics_issue_104;
mod generics_issue_104_lib;
/// test of inlining
mod inline;
/// test that exercises internal token generation, as well as locations and spans
mod intern_tok;
/// test that exercises using a lifetime parameter in the token type
mod lifetime_tok;
/// library for lifetime_tok test
mod lifetime_tok_lib;
/// test that exercises locations and spans
mod loc;
/// regression test for location issue #90
mod loc_issue_90;
mod loc_issue_90_lib;
/// test that uses `super` in paths in various places
mod use_super;
/// test that exercises locations and spans
mod error;
/// Test error recovery
mod error_recovery;
/// test for inlining expansion issue #55
mod issue_55;
/// test for unit action code
mod unit;
mod util;
/// This constant is here so that some of the generator parsers can
/// refer to it in order to test `super::` handling in action code.
const ZERO: i32 = 0;
#[test]
fn expr_test1() {
util::test(|v| expr::parse_Expr(1, v), "22 - 3", 22 - 3);
}
#[test]
fn expr_test2() {
util::test(|v| expr::parse_Expr(1, v), "22 - (3 + 5)", 22 - (3 + 5));
}
#[test]
fn expr_test3() {
util::test(|v| expr::parse_Expr(1, v), "22 - (3 - 5) - 13", 22 - (3 - 5) - 13);
}
#[test]
fn expr_test4() {
util::test(|v| expr::parse_Expr(1, v), "22 * 3 - 6", 22 * 3 - 6);
}
#[test]
fn expr_test5() {
util::test(|v| expr::parse_Expr(11, v), "22 * 3 - 6", 22*11 * 3*11 - 6*11);
}
#[test]
fn expr_intern_tok_test1() {
assert_eq!(expr_intern_tok::parse_Expr(1, "22 - 3").unwrap(), 22 - 3);
}
#[test]
fn expr_intern_tok_test2() {
assert_eq!(expr_intern_tok::parse_Expr(1, "22 - (3 - 5) - 13").unwrap(), 22 - (3 - 5) - 13);
}
#[test]
fn expr_intern_tok_test_err() {
match expr_intern_tok::parse_Expr(1, "22 - (3 - 5) - X") {
// 0123456789012345
Err(ParseError::InvalidToken { location }) => {
assert_eq!(location, 15);
}
r => {
panic!("invalid result {:?}", r);
}
}
}
#[test]
fn expr_lifetime_tok1() {
// the problem here was that we were improperly pruning the 'input from the
let tokens = lifetime_tok_lib::lt_tokenize("x");
let tree = lifetime_tok::parse_Expr(tokens).unwrap();
assert_eq!(tree, vec!["x"]);
}
#[test]
fn expr_lalr_test1() {
util::test(|v| expr_lalr::parse_Expr(1, v), "22 - 3", 22 - 3);
}
#[test]
fn expr_lalr_test2() {
util::test(|v| expr_lalr::parse_Expr(1, v), "22 - (3 + 5)", 22 - (3 + 5));
}
#[test]
fn expr_lalr_test3() {
util::test(|v| expr_lalr::parse_Expr(1, v), "22 - (3 - 5) - 13", 22 - (3 - 5) - 13);
}
#[test]
fn expr_lalr_test4() {
util::test(|v| expr_lalr::parse_Expr(1, v), "22 * 3 - 6", 22 * 3 - 6);
}
#[test]
fn expr_lalr_test5() {
util::test(|v| expr_lalr::parse_Expr(11, v), "22 * 3 - 6", 22*11 * 3*11 - 6*11);
}
#[test]
fn inline_test1() {
assert_eq!(inline::parse_E("& L L").unwrap(), "& L L");
}
#[test]
fn sub_test1() {
util::test(sub::parse_S, "22 - 3", 22 - 3);
}
#[test]
fn sub_test2() {
util::test(sub::parse_S, "22 - (3 - 5)", 22 - (3 - 5));
}
#[test]
fn sub_test3() {
util::test(sub::parse_S, "22 - (3 - 5) - 13", 22 - (3 - 5) - 13);
}
#[test]
fn sub_ascent_test1() {
util::test(sub_ascent::parse_S, "22 - 3", 22 - 3);
}
#[test]
fn sub_table_test1() {
util::test(sub_table::parse_S, "22 - 3", 22 - 3);
}
#[test]
fn expr_arena_test1() {
use expr_arena_ast::*;
let arena = Arena::new();
let expected =
arena.alloc(Node::Binary(Op::Sub,
arena.alloc(Node::Binary(Op::Mul,
arena.alloc(Node::Value(22)),
arena.alloc(Node::Value(3)))),
arena.alloc(Node::Value(6))));
util::test_loc(|v| expr_arena::parse_Expr(&arena, v), "22 * 3 - 6", expected);
}
#[test]
fn expr_arena_test2() {
use expr_arena_ast::*;
let arena = Arena::new();
let expected =
arena.alloc(Node::Reduce(Op::Mul,
vec![arena.alloc(Node::Value(22)),
arena.alloc(Node::Value(3)),
arena.alloc(Node::Value(6))]));;
util::test_loc(|v| expr_arena::parse_Expr(&arena, v), "*(22, 3, 6)", expected);
util::test_loc(|v| expr_arena::parse_Expr(&arena, v), "*(22, 3, 6,)", expected);
}
#[test]
fn expr_arena_test3() {
use expr_arena_ast::*;
let arena = Arena::new();
let expected =
arena.alloc(
Node::Binary(Op::Mul,
arena.alloc(Node::Value(22)),
arena.alloc(Node::Paren(
arena.alloc(
Node::Binary(Op::Sub,
arena.alloc(Node::Value(3)),
arena.alloc(Node::Value(6))))))));
util::test_loc(|v| expr_arena::parse_Expr(&arena, v), "22 * (3 - 6)", expected);
}
#[test]
fn expr_generic_test1() {
let expected: i32 = 22 * 3 - 6;
let actual = expr_generic::parse_Expr::<i32>("22 * 3 - 6").unwrap();
assert_eq!(expected, actual);
}
#[test]
fn intern_tok_test1() {
let expected = vec![(0, 0), // spans of `+` characters, measured in bytes
(2, 3),
(4, 5),
(8, 9)];
let actual = intern_tok::parse_Items("--+-+---+").unwrap();
// 012345678
assert_eq!(actual, expected);
}
#[test]
fn loc_test1() {
let expected = vec![(0, 0), // note that tok.rs generates odd spans, measured in 2*chars
(4, 5),
(8, 9),
(16, 17)];
util::test_loc(|v| loc::parse_Items(v), "--+-+---+", expected);
// 000001111
// 024680246
}
#[test]
fn loc_test2() {
util::test_loc(|v| loc::parse_Items(v), "+", vec![(0, 0),
(0, 1)]);
}
#[test]
fn loc_empty() {
// test what happens when `@L` and `@R` are invoked on an empty input
util::test_loc(|v| loc::parse_Items(v), "", vec![(0, 0)]);
}
#[test]
fn use_super_test1() {
util::test(|v| use_super::parse_S(v), "()", 0);
}
#[test]
fn error_test1() {
use lalrpop_util::ParseError;
match util::test_err_gen(error::parse_Items, "---+") {
Err(ParseError::User { error: '+' }) => { /* OK! */ }
r => {
panic!("unexpected response from parser: {:?}", r);
}
}
}
#[test]
fn error_recovery_eof() {
let errors = RefCell::new(vec![]);
util::test(|v| error_recovery::__parse_table::parse_Item(&errors, v), "-", '!'.to_string());
assert_eq!(errors.borrow().len(), 1);
assert_eq!(errors.borrow()[0], ParseError::UnrecognizedToken {
token: None,
expected: vec![],
});
}
#[test]
fn error_recovery_extra_token() {
let errors = RefCell::new(vec![]);
util::test(|v| error_recovery::__parse_table::parse_Item(&errors, v), "(++)", "()".to_string());
assert_eq!(errors.borrow().len(), 1);
assert_eq!(errors.borrow()[0], ParseError::UnrecognizedToken {
token: Some(((), Tok::Plus,())),
expected: vec![],
});
}
#[test]
fn error_recovery_multiple_extra_tokens() {
let errors = RefCell::new(vec![]);
util::test(|v| error_recovery::__parse_table::parse_Item(&errors, v), "(+++)", "()".to_string());
assert_eq!(errors.borrow().len(), 1);
assert_eq!(errors.borrow()[0], ParseError::UnrecognizedToken {
token: Some(((), Tok::Plus,())),
expected: vec![],
});
}
#[test]
fn issue_55_test1() {
// Issue 55 caused us to either accept NO assoc types or assoc
// types both before and after, so check that we can parse with
// assoc types on either side.
let (a, b, c) = issue_55::parse_E("{ type X; type Y; enum Z { } }").unwrap();
assert_eq!(a, vec!["X", "Y"]);
assert_eq!(b, "Z");
assert!(c.is_empty());
let (a, b, c) = issue_55::parse_E("{ enum Z { } type X; type Y; }").unwrap();
assert!(a.is_empty());
assert_eq!(b, "Z");
assert_eq!(c, vec!["X", "Y"]);
}
#[test]
fn unit_test1() {
assert!(unit::parse_Expr("3 + 4 * 5").is_ok());
assert!(unit::parse_Expr("3 + +").is_err());
}
#[test]
fn generics_issue_104_test1() {
// The real thing `generics_issue_104` is testing is that the code
// *compiles*, even though the type parameter `T` does not appear
// in any of the arguments.
assert!(generics_issue_104::parse_Schema::<()>("grammar { foo }").is_ok());
}
| 26.901685 | 101 | 0.556646 |
0eff093beb4bdd639d7e79638bac733c5ebb87d2 | 14,424 | //! Defines `Body`: a lowered representation of bodies of functions, statics and
//! consts.
mod lower;
#[cfg(test)]
mod tests;
pub mod scope;
use std::{mem, ops::Index, sync::Arc};
use base_db::CrateId;
use cfg::{CfgExpr, CfgOptions};
use drop_bomb::DropBomb;
use either::Either;
use hir_expand::{
ast_id_map::AstIdMap, hygiene::Hygiene, AstId, ExpandResult, HirFileId, InFile, MacroDefId,
};
use la_arena::{Arena, ArenaMap};
use limit::Limit;
use profile::Count;
use rustc_hash::FxHashMap;
use syntax::{ast, AstNode, AstPtr, SyntaxNodePtr};
use crate::{
attr::{Attrs, RawAttrs},
db::DefDatabase,
expr::{Expr, ExprId, Label, LabelId, Pat, PatId},
item_scope::BuiltinShadowMode,
nameres::DefMap,
path::{ModPath, Path},
src::HasSource,
AsMacroCall, BlockId, DefWithBodyId, HasModule, LocalModuleId, Lookup, ModuleId,
UnresolvedMacro,
};
pub use lower::LowerCtx;
/// A subset of Expander that only deals with cfg attributes. We only need it to
/// avoid cyclic queries in crate def map during enum processing.
#[derive(Debug)]
pub(crate) struct CfgExpander {
cfg_options: CfgOptions,
hygiene: Hygiene,
krate: CrateId,
}
#[derive(Debug)]
pub struct Expander {
cfg_expander: CfgExpander,
def_map: Arc<DefMap>,
current_file_id: HirFileId,
ast_id_map: Arc<AstIdMap>,
module: LocalModuleId,
recursion_limit: usize,
}
#[cfg(test)]
const EXPANSION_RECURSION_LIMIT: Limit = Limit::new(32);
#[cfg(not(test))]
const EXPANSION_RECURSION_LIMIT: Limit = Limit::new(128);
impl CfgExpander {
pub(crate) fn new(
db: &dyn DefDatabase,
current_file_id: HirFileId,
krate: CrateId,
) -> CfgExpander {
let hygiene = Hygiene::new(db.upcast(), current_file_id);
let cfg_options = db.crate_graph()[krate].cfg_options.clone();
CfgExpander { cfg_options, hygiene, krate }
}
pub(crate) fn parse_attrs(&self, db: &dyn DefDatabase, owner: &dyn ast::HasAttrs) -> Attrs {
RawAttrs::new(db, owner, &self.hygiene).filter(db, self.krate)
}
pub(crate) fn is_cfg_enabled(&self, db: &dyn DefDatabase, owner: &dyn ast::HasAttrs) -> bool {
let attrs = self.parse_attrs(db, owner);
attrs.is_cfg_enabled(&self.cfg_options)
}
}
impl Expander {
pub fn new(db: &dyn DefDatabase, current_file_id: HirFileId, module: ModuleId) -> Expander {
let cfg_expander = CfgExpander::new(db, current_file_id, module.krate);
let def_map = module.def_map(db);
let ast_id_map = db.ast_id_map(current_file_id);
Expander {
cfg_expander,
def_map,
current_file_id,
ast_id_map,
module: module.local_id,
recursion_limit: 0,
}
}
pub fn enter_expand<T: ast::AstNode>(
&mut self,
db: &dyn DefDatabase,
macro_call: ast::MacroCall,
) -> Result<ExpandResult<Option<(Mark, T)>>, UnresolvedMacro> {
if EXPANSION_RECURSION_LIMIT.check(self.recursion_limit + 1).is_err() {
cov_mark::hit!(your_stack_belongs_to_me);
return Ok(ExpandResult::str_err(
"reached recursion limit during macro expansion".into(),
));
}
let macro_call = InFile::new(self.current_file_id, ¯o_call);
let resolver =
|path: ModPath| -> Option<MacroDefId> { self.resolve_path_as_macro(db, &path) };
let mut err = None;
let call_id =
macro_call.as_call_id_with_errors(db, self.def_map.krate(), resolver, &mut |e| {
err.get_or_insert(e);
})?;
let call_id = match call_id {
Ok(it) => it,
Err(_) => {
return Ok(ExpandResult { value: None, err });
}
};
if err.is_none() {
err = db.macro_expand_error(call_id);
}
let file_id = call_id.as_file();
let raw_node = match db.parse_or_expand(file_id) {
Some(it) => it,
None => {
// Only `None` if the macro expansion produced no usable AST.
if err.is_none() {
tracing::warn!("no error despite `parse_or_expand` failing");
}
return Ok(ExpandResult::only_err(err.unwrap_or_else(|| {
mbe::ExpandError::Other("failed to parse macro invocation".into())
})));
}
};
let node = match T::cast(raw_node) {
Some(it) => it,
None => {
// This can happen without being an error, so only forward previous errors.
return Ok(ExpandResult { value: None, err });
}
};
tracing::debug!("macro expansion {:#?}", node.syntax());
self.recursion_limit += 1;
let mark = Mark {
file_id: self.current_file_id,
ast_id_map: mem::take(&mut self.ast_id_map),
bomb: DropBomb::new("expansion mark dropped"),
};
self.cfg_expander.hygiene = Hygiene::new(db.upcast(), file_id);
self.current_file_id = file_id;
self.ast_id_map = db.ast_id_map(file_id);
Ok(ExpandResult { value: Some((mark, node)), err })
}
pub fn exit(&mut self, db: &dyn DefDatabase, mut mark: Mark) {
self.cfg_expander.hygiene = Hygiene::new(db.upcast(), mark.file_id);
self.current_file_id = mark.file_id;
self.ast_id_map = mem::take(&mut mark.ast_id_map);
self.recursion_limit -= 1;
mark.bomb.defuse();
}
pub(crate) fn to_source<T>(&self, value: T) -> InFile<T> {
InFile { file_id: self.current_file_id, value }
}
pub(crate) fn parse_attrs(&self, db: &dyn DefDatabase, owner: &dyn ast::HasAttrs) -> Attrs {
self.cfg_expander.parse_attrs(db, owner)
}
pub(crate) fn cfg_options(&self) -> &CfgOptions {
&self.cfg_expander.cfg_options
}
pub fn current_file_id(&self) -> HirFileId {
self.current_file_id
}
fn parse_path(&mut self, db: &dyn DefDatabase, path: ast::Path) -> Option<Path> {
let ctx = LowerCtx::with_hygiene(db, &self.cfg_expander.hygiene);
Path::from_src(path, &ctx)
}
fn resolve_path_as_macro(&self, db: &dyn DefDatabase, path: &ModPath) -> Option<MacroDefId> {
self.def_map.resolve_path(db, self.module, path, BuiltinShadowMode::Other).0.take_macros()
}
fn ast_id<N: AstNode>(&self, item: &N) -> AstId<N> {
let file_local_id = self.ast_id_map.ast_id(item);
AstId::new(self.current_file_id, file_local_id)
}
}
#[derive(Debug)]
pub struct Mark {
file_id: HirFileId,
ast_id_map: Arc<AstIdMap>,
bomb: DropBomb,
}
/// The body of an item (function, const etc.).
#[derive(Debug, Eq, PartialEq)]
pub struct Body {
pub exprs: Arena<Expr>,
pub pats: Arena<Pat>,
pub labels: Arena<Label>,
/// The patterns for the function's parameters. While the parameter types are
/// part of the function signature, the patterns are not (they don't change
/// the external type of the function).
///
/// If this `Body` is for the body of a constant, this will just be
/// empty.
pub params: Vec<PatId>,
/// The `ExprId` of the actual body expression.
pub body_expr: ExprId,
/// Block expressions in this body that may contain inner items.
block_scopes: Vec<BlockId>,
_c: Count<Self>,
}
pub type ExprPtr = AstPtr<ast::Expr>;
pub type ExprSource = InFile<ExprPtr>;
pub type PatPtr = Either<AstPtr<ast::Pat>, AstPtr<ast::SelfParam>>;
pub type PatSource = InFile<PatPtr>;
pub type LabelPtr = AstPtr<ast::Label>;
pub type LabelSource = InFile<LabelPtr>;
/// An item body together with the mapping from syntax nodes to HIR expression
/// IDs. This is needed to go from e.g. a position in a file to the HIR
/// expression containing it; but for type inference etc., we want to operate on
/// a structure that is agnostic to the actual positions of expressions in the
/// file, so that we don't recompute types whenever some whitespace is typed.
///
/// One complication here is that, due to macro expansion, a single `Body` might
/// be spread across several files. So, for each ExprId and PatId, we record
/// both the HirFileId and the position inside the file. However, we only store
/// AST -> ExprId mapping for non-macro files, as it is not clear how to handle
/// this properly for macros.
#[derive(Default, Debug, Eq, PartialEq)]
pub struct BodySourceMap {
expr_map: FxHashMap<ExprSource, ExprId>,
expr_map_back: ArenaMap<ExprId, Result<ExprSource, SyntheticSyntax>>,
pat_map: FxHashMap<PatSource, PatId>,
pat_map_back: ArenaMap<PatId, Result<PatSource, SyntheticSyntax>>,
label_map: FxHashMap<LabelSource, LabelId>,
label_map_back: ArenaMap<LabelId, LabelSource>,
/// We don't create explicit nodes for record fields (`S { record_field: 92 }`).
/// Instead, we use id of expression (`92`) to identify the field.
field_map: FxHashMap<InFile<AstPtr<ast::RecordExprField>>, ExprId>,
field_map_back: FxHashMap<ExprId, InFile<AstPtr<ast::RecordExprField>>>,
expansions: FxHashMap<InFile<AstPtr<ast::MacroCall>>, HirFileId>,
/// Diagnostics accumulated during body lowering. These contain `AstPtr`s and so are stored in
/// the source map (since they're just as volatile).
diagnostics: Vec<BodyDiagnostic>,
}
#[derive(Default, Debug, Eq, PartialEq, Clone, Copy)]
pub struct SyntheticSyntax;
#[derive(Debug, Eq, PartialEq)]
pub enum BodyDiagnostic {
InactiveCode { node: InFile<SyntaxNodePtr>, cfg: CfgExpr, opts: CfgOptions },
MacroError { node: InFile<AstPtr<ast::MacroCall>>, message: String },
UnresolvedProcMacro { node: InFile<AstPtr<ast::MacroCall>> },
UnresolvedMacroCall { node: InFile<AstPtr<ast::MacroCall>>, path: ModPath },
}
impl Body {
pub(crate) fn body_with_source_map_query(
db: &dyn DefDatabase,
def: DefWithBodyId,
) -> (Arc<Body>, Arc<BodySourceMap>) {
let _p = profile::span("body_with_source_map_query");
let mut params = None;
let (file_id, module, body) = match def {
DefWithBodyId::FunctionId(f) => {
let f = f.lookup(db);
let src = f.source(db);
params = src.value.param_list();
(src.file_id, f.module(db), src.value.body().map(ast::Expr::from))
}
DefWithBodyId::ConstId(c) => {
let c = c.lookup(db);
let src = c.source(db);
(src.file_id, c.module(db), src.value.body())
}
DefWithBodyId::StaticId(s) => {
let s = s.lookup(db);
let src = s.source(db);
(src.file_id, s.module(db), src.value.body())
}
};
let expander = Expander::new(db, file_id, module);
let (mut body, source_map) = Body::new(db, expander, params, body);
body.shrink_to_fit();
(Arc::new(body), Arc::new(source_map))
}
pub(crate) fn body_query(db: &dyn DefDatabase, def: DefWithBodyId) -> Arc<Body> {
db.body_with_source_map(def).0
}
/// Returns an iterator over all block expressions in this body that define inner items.
pub fn blocks<'a>(
&'a self,
db: &'a dyn DefDatabase,
) -> impl Iterator<Item = (BlockId, Arc<DefMap>)> + '_ {
self.block_scopes
.iter()
.map(move |block| (*block, db.block_def_map(*block).expect("block ID without DefMap")))
}
fn new(
db: &dyn DefDatabase,
expander: Expander,
params: Option<ast::ParamList>,
body: Option<ast::Expr>,
) -> (Body, BodySourceMap) {
lower::lower(db, expander, params, body)
}
fn shrink_to_fit(&mut self) {
let Self { _c: _, body_expr: _, block_scopes, exprs, labels, params, pats } = self;
block_scopes.shrink_to_fit();
exprs.shrink_to_fit();
labels.shrink_to_fit();
params.shrink_to_fit();
pats.shrink_to_fit();
}
}
impl Index<ExprId> for Body {
type Output = Expr;
fn index(&self, expr: ExprId) -> &Expr {
&self.exprs[expr]
}
}
impl Index<PatId> for Body {
type Output = Pat;
fn index(&self, pat: PatId) -> &Pat {
&self.pats[pat]
}
}
impl Index<LabelId> for Body {
type Output = Label;
fn index(&self, label: LabelId) -> &Label {
&self.labels[label]
}
}
// FIXME: Change `node_` prefix to something more reasonable.
// Perhaps `expr_syntax` and `expr_id`?
impl BodySourceMap {
pub fn expr_syntax(&self, expr: ExprId) -> Result<ExprSource, SyntheticSyntax> {
self.expr_map_back[expr].clone()
}
pub fn node_expr(&self, node: InFile<&ast::Expr>) -> Option<ExprId> {
let src = node.map(|it| AstPtr::new(it));
self.expr_map.get(&src).cloned()
}
pub fn node_macro_file(&self, node: InFile<&ast::MacroCall>) -> Option<HirFileId> {
let src = node.map(|it| AstPtr::new(it));
self.expansions.get(&src).cloned()
}
pub fn pat_syntax(&self, pat: PatId) -> Result<PatSource, SyntheticSyntax> {
self.pat_map_back[pat].clone()
}
pub fn node_pat(&self, node: InFile<&ast::Pat>) -> Option<PatId> {
let src = node.map(|it| Either::Left(AstPtr::new(it)));
self.pat_map.get(&src).cloned()
}
pub fn node_self_param(&self, node: InFile<&ast::SelfParam>) -> Option<PatId> {
let src = node.map(|it| Either::Right(AstPtr::new(it)));
self.pat_map.get(&src).cloned()
}
pub fn label_syntax(&self, label: LabelId) -> LabelSource {
self.label_map_back[label].clone()
}
pub fn node_label(&self, node: InFile<&ast::Label>) -> Option<LabelId> {
let src = node.map(|it| AstPtr::new(it));
self.label_map.get(&src).cloned()
}
pub fn field_syntax(&self, expr: ExprId) -> InFile<AstPtr<ast::RecordExprField>> {
self.field_map_back[&expr].clone()
}
pub fn node_field(&self, node: InFile<&ast::RecordExprField>) -> Option<ExprId> {
let src = node.map(|it| AstPtr::new(it));
self.field_map.get(&src).cloned()
}
/// Get a reference to the body source map's diagnostics.
pub fn diagnostics(&self) -> &[BodyDiagnostic] {
&self.diagnostics
}
}
| 33.466357 | 99 | 0.618067 |
ac06bc8f60ff725a5ad0719ec523f367ec0b10a9 | 30,764 | // Copyright (c) Microsoft. All rights reserved.
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use base64;
use bytes::Bytes;
use chrono::{DateTime, Utc};
use futures::future::Either;
use futures::{future, Future};
use hyper::client::Service;
use hyper::{Error as HyperError, Method, Request, Response, StatusCode};
use percent_encoding::{percent_encode, PATH_SEGMENT_ENCODE_SET};
use serde_json;
use tokio::prelude::*;
use tokio::timer::Interval;
use url::form_urlencoded::Serializer as UrlSerializer;
use edgelet_core::crypto::{Activate, KeyIdentity, KeyStore, Sign, Signature, SignatureAlgorithm};
use edgelet_http::client::{Client, TokenSource};
use edgelet_http::ErrorKind as HttpErrorKind;
use error::{Error, ErrorKind};
use model::{
DeviceRegistration, DeviceRegistrationResult, RegistrationOperationStatus, TpmAttestation,
TpmRegistrationResult,
};
/// This is the interval at which to poll DPS for registration assignment status
const DPS_ASSIGNMENT_RETRY_INTERVAL_SECS: u64 = 10;
/// This is the number of seconds to wait for DPS to complete assignment to a hub
const DPS_ASSIGNMENT_TIMEOUT_SECS: u64 = 120;
define_encode_set! {
pub IOTHUB_ENCODE_SET = [PATH_SEGMENT_ENCODE_SET] | { '=' }
}
#[derive(Clone)]
pub struct DpsTokenSource<K>
where
K: Sign + Clone,
{
scope_id: String,
registration_id: String,
key: K,
}
impl<K> DpsTokenSource<K>
where
K: Sign + Clone,
{
fn new(scope_id: String, registration_id: String, key: K) -> Self {
DpsTokenSource {
scope_id,
registration_id,
key,
}
}
}
impl<K> TokenSource for DpsTokenSource<K>
where
K: Sign + Clone,
{
type Error = Error;
fn get(&self, expiry: &DateTime<Utc>) -> Result<String, Error> {
let expiry = expiry.timestamp().to_string();
let audience = format!("{}/registrations/{}", self.scope_id, self.registration_id);
let resource_uri =
percent_encode(audience.to_lowercase().as_bytes(), IOTHUB_ENCODE_SET).to_string();
let sig_data = format!("{}\n{}", &resource_uri, expiry);
let signature = self
.key
.sign(SignatureAlgorithm::HMACSHA256, sig_data.as_bytes())
.map(|s| base64::encode(s.as_bytes()))
.map_err(Error::from)?;
let token = UrlSerializer::new(format!("sr={}", resource_uri))
.append_pair("sig", &signature)
.append_pair("se", &expiry)
.append_pair("skn", "registration")
.finish();
Ok(token)
}
}
pub struct DpsClient<S, K, A>
where
S: 'static + Service<Error = HyperError, Request = Request, Response = Response>,
K: 'static + Sign + Clone,
A: 'static + KeyStore<Key = K> + Activate<Key = K> + Clone,
{
client: Arc<RwLock<Client<S, DpsTokenSource<K>>>>,
scope_id: String,
registration_id: String,
tpm_ek: Bytes,
tpm_srk: Bytes,
key_store: A,
}
impl<S, K, A> DpsClient<S, K, A>
where
S: 'static + Service<Error = HyperError, Request = Request, Response = Response>,
K: 'static + Sign + Clone,
A: 'static + KeyStore<Key = K> + Activate<Key = K> + Clone,
{
pub fn new(
client: Client<S, DpsTokenSource<K>>,
scope_id: String,
registration_id: String,
tpm_ek: Bytes,
tpm_srk: Bytes,
key_store: A,
) -> Result<DpsClient<S, K, A>, Error> {
Ok(DpsClient {
client: Arc::new(RwLock::new(client)),
scope_id,
registration_id,
tpm_ek,
tpm_srk,
key_store,
})
}
fn get_tpm_challenge_key(body: &str, key_store: &mut A) -> Result<K, Error> {
serde_json::from_str(body).map_err(Error::from).and_then(
|tpm_challenge: TpmRegistrationResult| {
tpm_challenge
.authentication_key()
.ok_or_else(|| Error::from(ErrorKind::InvalidTpmToken))
.and_then(|key_str| base64::decode(key_str).map_err(Error::from))
.and_then(|key_bytes| {
debug!("Storing authentication key");
key_store
.activate_identity_key(
KeyIdentity::Device,
"primary".to_string(),
key_bytes,
)
.map_err(Error::from)
})
.and_then(|_| {
key_store
.get(&KeyIdentity::Device, "primary")
.map_err(Error::from)
})
},
)
}
fn get_operation_id(
client: &Arc<RwLock<Client<S, DpsTokenSource<K>>>>,
scope_id: &str,
registration_id: &str,
registration: &DeviceRegistration,
key: K,
) -> Box<Future<Item = Option<RegistrationOperationStatus>, Error = Error>> {
let token_source =
DpsTokenSource::new(scope_id.to_string(), registration_id.to_string(), key);
debug!(
"Registration PUT, scope_id, \"{}\", registration_id \"{}\"",
scope_id, registration_id
);
let f = client
.write()
.expect("RwLock write failure")
.clone()
.with_token_source(token_source)
.request::<DeviceRegistration, RegistrationOperationStatus>(
Method::Put,
&format!("{}/registrations/{}/register", scope_id, registration_id),
None,
Some(registration.clone()),
false,
)
.map_err(Error::from);
Box::new(f)
}
fn get_operation_status(
client: &Arc<RwLock<Client<S, DpsTokenSource<K>>>>,
scope_id: &str,
registration_id: &str,
operation_id: &str,
key: K,
) -> Box<Future<Item = Option<DeviceRegistrationResult>, Error = Error>> {
let token_source =
DpsTokenSource::new(scope_id.to_string(), registration_id.to_string(), key);
let request = client.read().expect("RwLock read failure")
.clone()
.with_token_source(token_source)
.request::<(), RegistrationOperationStatus>(
Method::Get,
&format!(
"{}/registrations/{}/operations/{}",
scope_id, registration_id, operation_id
),
None,
None,
false,
)
.map_err(Error::from)
.map(
|operation_status: Option<RegistrationOperationStatus>| ->
Option<DeviceRegistrationResult> {
let status: Option<DeviceRegistrationResult> = operation_status.map_or_else(
|| None,
|op| {
op.registration_state().map_or_else(|| None, |r| {
Some(r.clone())
})
},
);
status
},
);
Box::new(request)
}
// Return Ok(true) if we get no result, or the result is not complete.
// The result is complete if we receive a status of anything other than "assigning"
fn is_skippable_result(
registration_result: &Option<DeviceRegistrationResult>,
) -> Result<bool, Error> {
if let Some(r) = registration_result.as_ref() {
debug!(
"Device Registration Result: device {:?}, hub {:?}, status {}",
r.device_id(),
r.assigned_hub(),
r.status()
);
Ok(r.status().eq_ignore_ascii_case("assigning"))
} else {
debug!("Not a device registration response");
Ok(true)
}
}
// The purpose of this function is to poll DPS till it sends either an error or the device
// credentials back. This function calls get_operation_status on a timer which in turns calls
// in to DPS. The way polling is implemented is by generating a stream of timer events and
// calling get_operation_status on each timer event. Stream processing is aborted if either the
// timer generates an error or if get_operation_status returns an error. All results from
// get_operation_status are discarded, but for the one that returns the desired result. The
// skip_while and take(1) implement discarding all but the desired result. Finally fold is
// called on the desired result to format and return it from the function.
fn get_device_registration_result(
client: Arc<RwLock<Client<S, DpsTokenSource<K>>>>,
scope_id: String,
registration_id: String,
operation_id: String,
key: K,
retry_count: u64,
) -> Box<Future<Item = Option<DeviceRegistrationResult>, Error = Error>> {
debug!(
"DPS registration result will retry {} times every {} seconds",
retry_count, DPS_ASSIGNMENT_RETRY_INTERVAL_SECS
);
let chain = Interval::new(
Instant::now(),
Duration::from_secs(DPS_ASSIGNMENT_RETRY_INTERVAL_SECS),
).take(retry_count)
.map_err(|_| Error::from(ErrorKind::TimerError))
.and_then(move |_instant: Instant| {
debug!("Ask DPS for registration status");
Self::get_operation_status(
&client.clone(),
&scope_id,
®istration_id,
&operation_id,
key.clone(),
)
})
.skip_while(Self::is_skippable_result)
.take(1)
.fold(
None,
|_final_result: Option<DeviceRegistrationResult>,
result_from_service: Option<DeviceRegistrationResult>| {
future::ok::<Option<DeviceRegistrationResult>, Error>(result_from_service)
},
);
Box::new(chain)
}
fn register_with_auth(
client: &Arc<RwLock<Client<S, DpsTokenSource<K>>>>,
scope_id: String,
registration_id: String,
tpm_ek: &Bytes,
tpm_srk: &Bytes,
key_store: &A,
) -> Box<Future<Item = Option<RegistrationOperationStatus>, Error = Error>> {
let tpm_attestation = TpmAttestation::new(base64::encode(&tpm_ek))
.with_storage_root_key(base64::encode(&tpm_srk));
let registration = DeviceRegistration::new()
.with_registration_id(registration_id.clone())
.with_tpm(tpm_attestation);
let client_inner = client.clone();
let mut key_store_inner = key_store.clone();
let r = client
.read()
.expect("RwLock read failure")
.request::<DeviceRegistration, TpmRegistrationResult>(
Method::Put,
&format!("{}/registrations/{}/register", scope_id, registration_id),
None,
Some(registration.clone()),
false,
)
.then(move |result| {
match result {
Ok(_) => Either::B(future::err(Error::from(ErrorKind::Unexpected))),
Err(err) => {
// If request is returned with status unauthorized, extract the tpm
// challenge from the payload, generate a signature and re-issue the
// request
let body =
if let HttpErrorKind::ServiceError(status, ref body) = *err.kind() {
if status == StatusCode::Unauthorized {
debug!(
"Registration unauthorized, checking response for challenge {}",
status
);
Some(body.clone())
} else {
debug!("Unexpected registration status, {}", status);
None
}
} else {
debug!("Response error {:?}", err);
None
};
body.map(move |body| {
Self::get_tpm_challenge_key(body.as_str(), &mut key_store_inner)
.map(move |key| {
Either::A(Self::get_operation_id(
&client_inner.clone(),
scope_id.as_str(),
registration_id.as_str(),
®istration,
key.clone(),
))
})
.unwrap_or_else(|err| Either::B(future::err(err)))
}).unwrap_or_else(|| Either::B(future::err(Error::from(err))))
}
}
});
Box::new(r)
}
pub fn register(&self) -> Box<Future<Item = (String, String), Error = Error>> {
let key_store = self.key_store.clone();
let mut key_store_status = self.key_store.clone();
let client_with_token_status = self.client.clone();
let scope_id = self.scope_id.clone();
let scope_id_status = self.scope_id.clone();
let registration_id = self.registration_id.clone();
let registration_id_status = self.registration_id.clone();
let tpm_ek = self.tpm_ek.clone();
let tpm_srk = self.tpm_srk.clone();
info!(
"Starting DPS registration with scope_id \"{}\", registration_id \"{}\"",
scope_id, registration_id,
);
let r = Self::register_with_auth(
&self.client,
scope_id,
registration_id,
&tpm_ek,
&tpm_srk,
&self.key_store,
).and_then(
move |operation_status: Option<RegistrationOperationStatus>| {
key_store
.get(&KeyIdentity::Device, "primary")
.map(|k| {
operation_status
.map(move |s| {
let retry_count = (DPS_ASSIGNMENT_TIMEOUT_SECS
/ DPS_ASSIGNMENT_RETRY_INTERVAL_SECS)
+ 1;
Either::A(Self::get_device_registration_result(
client_with_token_status,
scope_id_status,
registration_id_status,
s.operation_id().clone(),
k.clone(),
retry_count,
))
})
.unwrap_or_else(|| {
Either::B(future::err(Error::from(ErrorKind::NotAssigned)))
})
})
.unwrap_or_else(|err| Either::B(future::err(Error::from(err))))
},
)
.and_then(move |operation_status: Option<DeviceRegistrationResult>| {
operation_status
.ok_or_else(|| Error::from(ErrorKind::NotAssigned))
.and_then(|s| -> Result<(String, String), Error> {
let tpm_result_inner = s.clone();
let tpm_result = s.tpm();
tpm_result
.ok_or_else(|| Error::from(ErrorKind::NotAssigned))
.and_then(|r| -> Result<(), Error> {
r.authentication_key()
.ok_or_else(|| Error::from(ErrorKind::NotAssigned))
.and_then(|ks| base64::decode(ks).map_err(Error::from))
.and_then(|kb| -> Result<(), Error> {
key_store_status
.activate_identity_key(
KeyIdentity::Device,
"primary".to_string(),
kb,
)
.map_err(Error::from)
})
})
.and_then(|_| -> Result<(String, String), Error> {
get_device_info(&tpm_result_inner)
})
})
});
Box::new(r)
}
}
fn get_device_info(
registration_result: &DeviceRegistrationResult,
) -> Result<(String, String), Error> {
Ok((
registration_result
.device_id()
.cloned()
.ok_or_else(|| Error::from(ErrorKind::NotAssigned))?,
registration_result
.assigned_hub()
.cloned()
.ok_or_else(|| Error::from(ErrorKind::NotAssigned))?,
))
}
#[cfg(test)]
mod tests {
use super::*;
use std::cell::RefCell;
use std::mem;
use hyper::header::Authorization;
use hyper::server::service_fn;
use hyper::StatusCode;
use serde_json;
use tokio_core::reactor::Core;
use url::Url;
use edgelet_core::crypto::{MemoryKey, MemoryKeyStore};
#[test]
fn server_register_with_auth_success() {
let mut core = Core::new().unwrap();
let expected_uri = "https://global.azure-devices-provisioning.net/scope/registrations/reg/register?api-version=2017-11-15";
let handler = move |req: Request| {
let (method, uri, _httpversion, headers, _body) = req.deconstruct();
assert_eq!(uri, expected_uri);
assert_eq!(method, Method::Put);
// If authorization header does not have the shared access signature, request one
let auth = headers.get::<Authorization<String>>();
match auth {
None => {
let mut result = TpmRegistrationResult::new();
result.set_authentication_key(base64::encode("key"));
future::ok(
Response::new()
.with_status(StatusCode::Unauthorized)
.with_body(serde_json::to_string(&result).unwrap().into_bytes()),
)
}
Some(_) => {
let mut result = RegistrationOperationStatus::new("something".to_string())
.with_status("assigning".to_string());
future::ok(
Response::new()
.with_status(StatusCode::Ok)
.with_body(serde_json::to_string(&result).unwrap().into_bytes()),
)
}
}
};
let client = Arc::new(RwLock::new(
Client::new(
service_fn(handler),
None,
"2017-11-15",
Url::parse("https://global.azure-devices-provisioning.net/").unwrap(),
).unwrap(),
));
let task = DpsClient::register_with_auth(
&client,
"scope".to_string(),
"reg".to_string(),
&Bytes::from("ek".to_string().into_bytes()),
&Bytes::from("srk".to_string().into_bytes()),
&MemoryKeyStore::new(),
).map(|result| match result {
Some(op) => {
assert_eq!(op.operation_id(), "something");
assert_eq!(op.status().unwrap(), "assigning");
()
}
None => panic!("Unexpected"),
});
core.run(task).unwrap();
}
#[test]
fn server_register_gets_404_fails() {
let mut core = Core::new().unwrap();
let handler = |_req: Request| future::ok(Response::new().with_status(StatusCode::NotFound));
let client = Client::new(
service_fn(handler),
None,
"2017-11-15",
Url::parse("https://global.azure-devices-provisioning.net/").unwrap(),
).unwrap();
let dps = DpsClient::new(
client,
"scope".to_string(),
"test".to_string(),
Bytes::from("ek".to_string().into_bytes()),
Bytes::from("srk".to_string().into_bytes()),
MemoryKeyStore::new(),
).unwrap();
let task = dps.register().then(|result| {
match result {
Ok(_) => panic!("Excepted err got success"),
Err(err) => {
if mem::discriminant(err.kind()) != mem::discriminant(&ErrorKind::Http) {
panic!("Wrong error kind. Expected `Http` found {:?}", err);
}
}
}
Ok(()) as Result<(), Error>
});
core.run(task).unwrap();
}
#[test]
fn server_register_with_auth_gets_404_fails() {
let mut core = Core::new().unwrap();
let handler = |req: Request| {
// If authorization header does not have the shared access signature, request one
let auth = req.headers().get::<Authorization<String>>();
match auth {
None => {
let mut result = TpmRegistrationResult::new();
result.set_authentication_key("key".to_string());
future::ok(
Response::new()
.with_status(StatusCode::Unauthorized)
.with_body(serde_json::to_string(&result).unwrap().into_bytes()),
)
}
Some(_) => future::ok(Response::new().with_status(StatusCode::NotFound)),
}
};
let client = Client::new(
service_fn(handler),
None,
"2017-11-15",
Url::parse("https://global.azure-devices-provisioning.net/").unwrap(),
).unwrap();
let dps = DpsClient::new(
client,
"scope".to_string(),
"test".to_string(),
Bytes::from("ek".to_string().into_bytes()),
Bytes::from("srk".to_string().into_bytes()),
MemoryKeyStore::new(),
).unwrap();
let task = dps.register().then(|result| {
match result {
Ok(_) => panic!("Excepted err got success"),
Err(err) => {
if mem::discriminant(err.kind()) != mem::discriminant(&ErrorKind::Http) {
panic!("Wrong error kind. Expected `Http` found {:?}", err);
}
}
}
Ok(()) as Result<(), Error>
});
core.run(task).unwrap();
}
#[test]
fn get_device_registration_result_success() {
let mut core = Core::new().unwrap();
let reg_op_status_vanilla = Response::new().with_status(StatusCode::Ok).with_body(
serde_json::to_string(&RegistrationOperationStatus::new("operation".to_string()))
.unwrap()
.into_bytes(),
);
let reg_op_status_final = Response::new().with_status(StatusCode::Ok).with_body(
serde_json::to_string(
&RegistrationOperationStatus::new("operation".to_string()).with_registration_state(
DeviceRegistrationResult::new("reg".to_string(), "doesn't matter".to_string()),
),
).unwrap()
.into_bytes(),
);
let stream = RefCell::new(stream::iter_result(vec![
Ok(reg_op_status_vanilla),
Ok(reg_op_status_final),
Err(Error::from(ErrorKind::Unexpected)),
]));
let handler = move |_req: Request| {
if let Async::Ready(opt) = stream.borrow_mut().poll().unwrap() {
future::ok(opt.unwrap())
} else {
unimplemented!();
}
};
let key = MemoryKey::new("key".to_string());
let service = service_fn(handler);
let client = Arc::new(RwLock::new(
Client::new(
service,
None,
"2017-11-15",
Url::parse("https://global.azure-devices-provisioning.net/").unwrap(),
).unwrap()
.with_token_source(DpsTokenSource::new(
"scope_id".to_string(),
"reg".to_string(),
key.clone(),
))
.clone(),
));
let dps_operation = DpsClient::<_, _, MemoryKeyStore>::get_device_registration_result(
client,
"scope_id".to_string(),
"reg".to_string(),
"operation".to_string(),
key,
3,
);
let task = dps_operation.map(|result| {
match result {
Some(r) => assert_eq!(*r.registration_id(), "reg".to_string()),
None => panic!("Expected registration id"),
}
()
});
core.run(task).unwrap();
}
#[test]
fn get_device_registration_result_on_all_attempts_returns_none() {
let mut core = Core::new().unwrap();
let handler = |_req: Request| {
future::ok(
Response::new().with_status(StatusCode::Ok).with_body(
serde_json::to_string(&RegistrationOperationStatus::new(
"operation".to_string(),
)).unwrap()
.into_bytes(),
),
)
};
let key = MemoryKey::new("key".to_string());
let service = service_fn(handler);
let client = Arc::new(RwLock::new(
Client::new(
service,
None,
"2017-11-15",
Url::parse("https://global.azure-devices-provisioning.net/").unwrap(),
).unwrap()
.with_token_source(DpsTokenSource::new(
"scope_id".to_string(),
"reg".to_string(),
key.clone(),
))
.clone(),
));
let dps_operation = DpsClient::<_, _, MemoryKeyStore>::get_device_registration_result(
client,
"scope_id".to_string(),
"reg".to_string(),
"operation".to_string(),
key,
3,
);
let task = dps_operation.map(|result| {
match result {
Some(_) => panic!("Shouldn't have passed because every attempt failed"),
None => assert_eq!(true, true),
}
()
});
core.run(task).unwrap();
}
#[test]
fn get_operation_status_success() {
let mut core = Core::new().unwrap();
let expected_uri = "https://global.azure-devices-provisioning.net/scope_id/registrations/reg/operations/operation?api-version=2017-11-15";
let handler = move |req: Request| {
let (method, uri, _httpversion, _headers, _body) = req.deconstruct();
assert_eq!(uri, expected_uri);
assert_eq!(method, Method::Get);
let operation_status: RegistrationOperationStatus =
RegistrationOperationStatus::new("operation".to_string());
let serializable = operation_status.with_registration_state(
DeviceRegistrationResult::new("reg".to_string(), "doesn't matter".to_string()),
);
future::ok(
Response::new()
.with_status(StatusCode::Ok)
.with_body(serde_json::to_string(&serializable).unwrap().into_bytes()),
)
};
let client = Client::new(
service_fn(handler),
None,
"2017-11-15",
Url::parse("https://global.azure-devices-provisioning.net/").unwrap(),
).unwrap();
let dps_operation = DpsClient::<_, _, MemoryKeyStore>::get_operation_status(
&Arc::new(RwLock::new(client.clone())),
"scope_id",
"reg",
"operation",
MemoryKey::new("key".to_string()),
);
let task = dps_operation.map(|result| match result {
Some(op) => {
assert_eq!(*op.registration_id(), "reg".to_string());
()
}
None => panic!("Unexpected"),
});
core.run(task).unwrap();
}
#[test]
fn get_operation_status_gets_404_fails() {
let mut core = Core::new().unwrap();
let handler = |_req: Request| future::ok(Response::new().with_status(StatusCode::NotFound));
let client = Client::new(
service_fn(handler),
None,
"2017-11-15",
Url::parse("https://global.azure-devices-provisioning.net/").unwrap(),
).unwrap();
let dps_operation = DpsClient::<_, _, MemoryKeyStore>::get_operation_status(
&Arc::new(RwLock::new(client.clone())),
"scope_id",
"reg",
"operation",
MemoryKey::new("key".to_string()),
);
let task = dps_operation.then(|result| {
match result {
Ok(_) => panic!("Excepted err got success"),
Err(err) => {
if mem::discriminant(err.kind()) != mem::discriminant(&ErrorKind::Http) {
panic!("Wrong error kind. Expected `Http` found {:?}", err);
}
}
}
Ok(()) as Result<(), Error>
});
core.run(task).unwrap();
}
#[test]
fn get_device_info_success() {
assert_eq!(
get_device_info(
&DeviceRegistrationResult::new("reg".to_string(), "assigned".to_string())
.with_device_id("device".to_string())
.with_assigned_hub("hub".to_string())
).unwrap(),
("device".to_string(), "hub".to_string())
)
}
}
| 38.551378 | 146 | 0.493791 |
0ec6ff8eac3b2d01a65cbea9a5dc745bd4ecce00 | 705 | extern crate flate2;
use std::io::prelude::*;
use std::io;
use flate2::Compression;
use flate2::write::GzEncoder;
use flate2::bufread::GzDecoder;
// Compress a sample string and print it after transformation.
fn main() {
let mut e = GzEncoder::new(Vec::new(), Compression::default());
e.write(b"Hello World").unwrap();
let bytes = e.finish().unwrap();
println!("{}", decode_reader(bytes).unwrap());
}
// Uncompresses a Gz Encoded vector of bytes and returns a string or error
// Here &[u8] implements BufRead
fn decode_reader(bytes: Vec<u8>) -> io::Result<String> {
let mut gz = GzDecoder::new(&bytes[..]);
let mut s = String::new();
gz.read_to_string(&mut s)?;
Ok(s)
}
| 28.2 | 74 | 0.662411 |
ab828fb1221380053e4d2f6d8f7b25502f32bedd | 1,686 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::{bail, Result},
ffx_core::ffx_plugin,
ffx_off_args::OffCommand,
fidl::Error as FidlError,
fidl_fuchsia_hardware_power_statecontrol::AdminProxy,
};
#[ffx_plugin(AdminProxy = "core/appmgr:out:fuchsia.hardware.power.statecontrol.Admin")]
pub async fn off(admin_proxy: AdminProxy, _cmd: OffCommand) -> Result<()> {
let res = admin_proxy.poweroff().await;
match res {
Ok(Ok(_)) => Ok(()),
Ok(Err(e)) => bail!(e),
Err(e) => match e {
FidlError::ClientChannelClosed { .. } => {
log::info!(
"Off returned a client channel closed - assuming power down succeeded: {:?}",
e
);
Ok(())
}
_ => bail!(e),
},
}
}
////////////////////////////////////////////////////////////////////////////////
// tests
#[cfg(test)]
mod test {
use {super::*, fidl_fuchsia_hardware_power_statecontrol::AdminRequest};
fn setup_fake_admin_server() -> AdminProxy {
setup_fake_admin_proxy(|req| match req {
AdminRequest::Poweroff { responder } => {
responder.send(&mut Ok(())).unwrap();
}
_ => assert!(false),
})
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_off() -> Result<()> {
let admin_proxy = setup_fake_admin_server();
let result = off(admin_proxy, OffCommand {}).await;
assert!(result.is_ok());
Ok(())
}
}
| 29.578947 | 97 | 0.537367 |
b937e66a4d4a420ba91db6ed6ed360a7e6f64d49 | 512 | use structopt::StructOpt;
#[derive(StructOpt, Debug, Default, Clone)]
pub struct PredictCost {
/// The node type used for calculation.
#[structopt(short = "n", long = "node-type")]
pub node_type: String,
/// CPU requirement.
#[structopt(short = "c", long = "cpu")]
pub cpu: f32,
/// Cluster target.
#[structopt(short = "m", long = "memory")]
pub memory: f32,
/// Cluster target.
#[structopt(short = "s", long = "scale", default_value = "1")]
pub scale: i32,
}
| 24.380952 | 66 | 0.59375 |
91a750c20c4bc2e6b3e4e9245f02b0a4a9d7d731 | 2,231 | #[derive(Copy, Clone, Debug)]
struct Component {
left_pins: u16,
right_pins: u16,
}
impl Component {
fn from_str(s: &str) -> Self {
let mut iter = s.trim().split("/");
Component {
left_pins: iter.next().unwrap().parse().unwrap(),
right_pins: iter.next().unwrap().parse().unwrap(),
}
}
fn score(self) -> u16 {
self.left_pins + self.right_pins
}
}
#[derive(Debug, Clone, Default)]
struct Components(Vec<Component>);
impl Components {
fn from_str(s: &str) -> Self {
Components(
s.trim()
.lines()
.map(|l| Component::from_str(l))
.collect()
)
}
fn score(&self) -> u16 {
self.0.iter().map(|n| n.score()).sum()
}
}
fn recurse(prev_pins: u16, remaining: Components, bridge: Components) -> Components {
let mut highest_length = 0;
let mut best_score = 0;
let mut best_bridge = None;
for (index, component) in remaining.0.iter().enumerate() {
let curr_bridge = if component.left_pins == prev_pins {
let mut remaining_clone = remaining.clone();
let mut bridge_clone = bridge.clone();
let removed = remaining_clone.0.remove(index);
bridge_clone.0.push(removed);
recurse(removed.right_pins, remaining_clone, bridge_clone)
} else if component.right_pins == prev_pins {
let mut remaining_clone = remaining.clone();
let mut bridge_clone = bridge.clone();
let removed = remaining_clone.0.remove(index);
bridge_clone.0.push(removed);
recurse(removed.left_pins, remaining_clone, bridge_clone)
} else {
continue;
};
if curr_bridge.0.len() > highest_length || curr_bridge.score() > best_score {
highest_length = curr_bridge.0.len();
best_score = curr_bridge.score();
best_bridge = Some(curr_bridge);
}
}
best_bridge.unwrap_or(bridge)
}
fn main() {
let bridge = recurse(
0,
Components::from_str(include_str!("../input")),
Components(vec![]),
);
println!("solution: {}", bridge.score());
}
| 29.355263 | 85 | 0.56701 |
1cabcbdc79a4f375d1b08b3fe63c42403656c3f0 | 79 | //pub mod contract;
pub mod error;
pub mod helper;
pub mod msg;
pub mod state;
| 13.166667 | 19 | 0.721519 |
4b5a634889a7920fa50cc16782547c17d850a504 | 4,197 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Optimizer rule to replace `LIMIT 0` on a plan with an empty relation.
//! This saves time in planning and executing the query.
use crate::error::Result;
use crate::logical_plan::LogicalPlan;
use crate::optimizer::optimizer::OptimizerRule;
use super::utils;
use crate::execution::context::ExecutionProps;
/// Optimization rule that replaces LIMIT 0 with an [LogicalPlan::EmptyRelation]
pub struct EliminateLimit;
impl EliminateLimit {
#[allow(missing_docs)]
pub fn new() -> Self {
Self {}
}
}
impl OptimizerRule for EliminateLimit {
fn optimize(
&self,
plan: &LogicalPlan,
execution_props: &ExecutionProps,
) -> Result<LogicalPlan> {
match plan {
LogicalPlan::Limit { n, input } if *n == 0 => {
Ok(LogicalPlan::EmptyRelation {
produce_one_row: false,
schema: input.schema().clone(),
})
}
// Rest: recurse and find possible LIMIT 0 nodes
_ => {
let expr = plan.expressions();
// apply the optimization to all inputs of the plan
let inputs = plan.inputs();
let new_inputs = inputs
.iter()
.map(|plan| self.optimize(plan, execution_props))
.collect::<Result<Vec<_>>>()?;
utils::from_plan(plan, &expr, &new_inputs)
}
}
}
fn name(&self) -> &str {
"eliminate_limit"
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::logical_plan::LogicalPlanBuilder;
use crate::logical_plan::{col, sum};
use crate::test::*;
fn assert_optimized_plan_eq(plan: &LogicalPlan, expected: &str) {
let rule = EliminateLimit::new();
let optimized_plan = rule
.optimize(plan, &ExecutionProps::new())
.expect("failed to optimize plan");
let formatted_plan = format!("{:?}", optimized_plan);
assert_eq!(formatted_plan, expected);
assert_eq!(plan.schema(), optimized_plan.schema());
}
#[test]
fn limit_0_root() {
let table_scan = test_table_scan().unwrap();
let plan = LogicalPlanBuilder::from(&table_scan)
.aggregate(vec![col("a")], vec![sum(col("b"))])
.unwrap()
.limit(0)
.unwrap()
.build()
.unwrap();
// No aggregate / scan / limit
let expected = "EmptyRelation";
assert_optimized_plan_eq(&plan, expected);
}
#[test]
fn limit_0_nested() {
let table_scan = test_table_scan().unwrap();
let plan1 = LogicalPlanBuilder::from(&table_scan)
.aggregate(vec![col("a")], vec![sum(col("b"))])
.unwrap()
.build()
.unwrap();
let plan = LogicalPlanBuilder::from(&table_scan)
.aggregate(vec![col("a")], vec![sum(col("b"))])
.unwrap()
.limit(0)
.unwrap()
.union(plan1)
.unwrap()
.build()
.unwrap();
// Left side is removed
let expected = "Union\
\n EmptyRelation\
\n Aggregate: groupBy=[[#test.a]], aggr=[[SUM(#test.b)]]\
\n TableScan: test projection=None";
assert_optimized_plan_eq(&plan, expected);
}
}
| 32.284615 | 80 | 0.57827 |
bff5863616ddb6957ebb1d42ddeab2b253f30ea1 | 608 | use azure_sdk_core::errors::AzureError;
use azure_sdk_cosmos::clients::{Client, ClientBuilder, CosmosUriBuilder};
use azure_sdk_cosmos::{AuthorizationToken, TokenType};
pub fn initialize() -> Result<Client<impl CosmosUriBuilder>, AzureError> {
let account = std::env::var("COSMOS_ACCOUNT").expect("Set env variable COSMOS_ACCOUNT first!");
let key = std::env::var("COSMOS_MASTER_KEY").expect("Set env variable COSMOS_KEY first!");
let authorization_token = AuthorizationToken::new(account, TokenType::Master, &key)?;
let client = ClientBuilder::new(authorization_token)?;
Ok(client)
}
| 43.428571 | 99 | 0.75 |
3ac37b4b3e749c8f23ba90e6e83c4ee28e3573d9 | 2,095 | extern crate rand;
use std::{thread, time};
use rand::{thread_rng, Rng};
trait Observer {
fn update(&self, generator: Box<&NumberGenerator>);
}
struct DigitObserver {}
impl DigitObserver {
fn new() -> DigitObserver {
DigitObserver {}
}
}
impl Observer for DigitObserver {
fn update(&self, generator: Box<&NumberGenerator>) {
println!("DigitObserver: {}", generator.get_number());
thread::sleep(time::Duration::from_millis(100));
}
}
struct GraphObserver {}
impl GraphObserver {
fn new() -> GraphObserver {
GraphObserver {}
}
}
impl Observer for GraphObserver {
fn update(&self, generator: Box<&NumberGenerator>) {
print!("GraphObserver:");
for _ in 0..generator.get_number() {
print!("*");
}
println!("");
thread::sleep(time::Duration::from_millis(100));
}
}
trait NumberGenerator {
fn get_number(&self) -> u32;
fn execute(&mut self);
}
struct RandomNumberGenerator {
observers: Vec<Box<Observer>>,
number: u32,
rng: rand::ThreadRng,
}
impl RandomNumberGenerator {
fn new() -> RandomNumberGenerator {
RandomNumberGenerator {
observers: Vec::new(),
number: 0,
rng: thread_rng(),
}
}
fn add_observer(&mut self, observer: Box<Observer>) {
self.observers.push(observer);
}
fn notify_observers(&self) {
for observer in &self.observers {
observer.update(Box::new(self));
}
}
}
impl NumberGenerator for RandomNumberGenerator {
fn get_number(&self) -> u32 {
self.number
}
fn execute(&mut self) {
for _ in 0..20 {
self.number = self.rng.gen_range(0, 50);
self.notify_observers();
}
}
}
fn main() {
let mut generator = Box::new(RandomNumberGenerator::new());
let observer1 = Box::new(DigitObserver::new());
let observer2 = Box::new(GraphObserver::new());
generator.add_observer(observer1);
generator.add_observer(observer2);
generator.execute();
}
| 21.597938 | 63 | 0.599045 |
1ce5cad7a312f4e40b35c3e54055e211cbf01b6a | 7,619 | use curl;
use curl::easy::{Auth, Easy2, Handler, List, WriteError};
use serde::Serialize;
use serde::de::DeserializeOwned;
use serde_json;
use std::fmt::Display;
struct Collector(Vec<u8>);
impl Handler for Collector {
fn write(&mut self, data: &[u8]) -> Result<usize, WriteError> {
self.0.extend_from_slice(data);
Ok(data.len())
}
}
/// HTTP Method
pub enum Method {
GET,
POST,
DELETE,
}
use self::Method::*;
/// Constructs a new `String` which represents a key-value
/// parameter string from `key` and `value` and returns the
/// result as a form of `Some(String)`.
///
/// Returns `None` if `value` is `None`.
///
/// # Examples
/// ```
/// use livy::http;
///
/// assert_eq!(Some("from=2".to_string()), http::param("from", Some(2)));
/// assert_eq!(None, http::param::<i32>("from", None));
/// ```
pub fn param<T: Display>(key: &str, value: Option<T>) -> Option<String> {
match value {
Some(value) => Some(format!("{}={}", key, value)),
None => None
}
}
/// Constructs a new `String` which represents a key-value parameters
/// string as a form of `"?key1=value1&key2=value2&..."`.
///
/// Returns an empty string if there is no `Some(String)` value in `params`.
///
/// # Examples
/// ```
/// use livy::http;
///
/// assert_eq!("".to_string(),
/// http::params(vec![]));
/// assert_eq!("".to_string(),
/// http::params(vec![None]));
/// assert_eq!("?key1=value1",
/// http::params(vec![Some("key1=value1".to_string())]));
/// assert_eq!("?key1=value1",
/// http::params(vec![Some("key1=value1".to_string()),
/// None]));
/// assert_eq!("?key1=value1",
/// http::params(vec![None,
/// Some("key1=value1".to_string())]));
/// assert_eq!("?key1=value1&key2=value2",
/// http::params(vec![Some("key1=value1".to_string()),
/// Some("key2=value2".to_string())]));
/// ```
pub fn params(params: Vec<Option<String>>) -> String {
let mut s = String::new();
for param in params {
match param {
Some(param) => {
if s.is_empty() {
s.push('?');
} else {
s.push('&');
}
s.push_str(param.as_str());
},
None => (),
}
}
s
}
/// Removes the trailing slash of `s` if it exists,
/// constructs a new `String` from the result and
/// returns it.
///
/// # Examples
/// ```
/// use livy::http;
///
/// assert_eq!("http://example.com".to_string(),
/// http::remove_trailing_slash("http://example.com/"));
/// ```
pub fn remove_trailing_slash(s: &str) -> String {
if s.ends_with("/") {
s[..s.len()-1].to_string()
} else {
s.to_string()
}
}
/// Sends an HTTP request, deserializes the response body and
/// returns the result.
pub fn send<T: DeserializeOwned, U: Serialize>(method: Method, url: &str, data: Option<U>, gssnegotiate: Option<&bool>, username: Option<&str>) -> Result<T, String> {
let mut easy = Easy2::new(Collector(Vec::new()));
let mut auth = Auth::new();
let data = match data {
Some(data) => {
match serde_json::to_string(&data) {
Ok(data) => Some(data),
Err(err) => return Err(format!("{}", err)),
}
},
None => None,
};
if let Err(err) = perform(&mut easy, &mut auth, method, url, data.as_ref().map(String::as_bytes), gssnegotiate, username) {
return Err(format!("{}", err));
}
match easy.response_code() {
Err(err) => return Err(format!("{}", err)),
Ok(status_code) if status_code >= 200 && status_code <= 308 => (),
Ok(status_code) => return Err(format!("invalid status code; code: {}, response: {}",
status_code,
String::from_utf8_lossy(&easy.get_ref().0))),
}
let res = String::from_utf8_lossy(&easy.get_ref().0);
let res = serde_json::from_str(res.as_ref());
match res {
Ok(res) => Ok(res),
Err(err) => Err(format!("{}", err)),
}
}
fn perform(easy: &mut Easy2<Collector>, auth: &mut Auth, method: Method, url: &str, data: Option<&[u8]>, gssnegotiate: Option<&bool>, username: Option<&str>) -> Result<(), curl::Error> {
match method {
GET => easy.get(true)?,
POST => {
easy.post(true)?;
if let Some(data) = data {
easy.post_fields_copy(data)?;
}
},
DELETE => easy.custom_request("DELETE")?,
};
easy.url(url)?;
if let Some(gssnegotiate) = gssnegotiate {
auth.gssnegotiate(*gssnegotiate);
easy.http_auth(&auth)?;
}
if let Some(username) = username {
easy.username(username)?;
}
let mut headers = List::new();
headers.append("Content-Type: application/json")?;
headers.append("X-Requested-By: x")?;
easy.http_headers(headers)?;
easy.perform()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_param() {
struct TestCase {
key: &'static str,
value: Option<i32>,
expected: Option<String>,
}
let test_cases = vec![
TestCase {
key: "from",
value: Some(2),
expected: Some("from=2".to_string()),
},
TestCase {
key: "from",
value: None,
expected: None,
},
];
for test_case in test_cases {
assert_eq!(test_case.expected, param(test_case.key, test_case.value));
}
}
#[test]
fn test_params() {
struct TestCase {
params: Vec<Option<String>>,
expected: String,
}
let test_cases = vec![
TestCase {
params: vec![],
expected: "".to_string(),
},
TestCase {
params: vec![None],
expected: "".to_string(),
},
TestCase {
params: vec![Some("key1=value1".to_string())],
expected: "?key1=value1".to_string(),
},
TestCase {
params: vec![Some("key1=value1".to_string()), None],
expected: "?key1=value1".to_string(),
},
TestCase {
params: vec![None, Some("key1=value1".to_string())],
expected: "?key1=value1".to_string(),
},
TestCase {
params: vec![Some("key1=value1".to_string()), Some("key2=value2".to_string())],
expected: "?key1=value1&key2=value2".to_string(),
},
];
for test_case in test_cases {
assert_eq!(test_case.expected, params(test_case.params));
}
}
#[test]
fn test_remove_trailing_slash() {
struct TestCase {
s: &'static str,
expected: String,
}
let test_cases = vec![
TestCase {
s: "http://example.com/",
expected: "http://example.com".to_string(),
},
TestCase {
s: "http://example.com",
expected: "http://example.com".to_string(),
},
];
for test_case in test_cases {
assert_eq!(test_case.expected, remove_trailing_slash(test_case.s));
}
}
}
| 28.011029 | 186 | 0.499278 |
91e8316e57507f8fcf4b9dd7d54cc22501caa33d | 1,389 | struct Solution {}
impl Solution {
pub fn insert(mut intervals: Vec<Vec<i32>>, mut new_interval: Vec<i32>) -> Vec<Vec<i32>> {
let start = intervals
.iter()
.map(|v| v[0])
.collect::<Vec<i32>>()
.binary_search(&new_interval[0]);
let mut start = match start {
Ok(s) => s,
Err(s) => s,
};
if start > 0 && new_interval[0] <= intervals[start - 1][1] {
new_interval[0] = intervals[start - 1][0];
start -= 1;
}
let end = intervals
.iter()
.map(|v| v[1])
.collect::<Vec<i32>>()
.binary_search(&new_interval[1]);
let mut end = match end {
Ok(e) => e as i32,
Err(e) => e as i32,
};
if (end as usize) < intervals.len() && new_interval[1] >= intervals[end as usize][0] {
new_interval[1] = intervals[end as usize][1]
} else {
end -= 1;
}
let mut i = start as i32;
while i <= end {
intervals.remove(start);
i += 1
}
intervals.insert(start, new_interval);
intervals
}
}
fn main() {
println!(
"{:?}",
Solution::insert(
vec![vec![3, 5], vec![6, 7], vec![8, 10], vec![12, 16]],
vec![1, 2]
)
);
}
| 26.711538 | 94 | 0.432685 |
2fea75c79e6f4743d6a0e8c2f3a48588f3c13f74 | 2,628 | struct Ingredient {
prop: [i32; 5],
}
impl Ingredient {
fn new(s: &str) -> Ingredient {
let mut v = aoc::ints::<i32>(s);
Ingredient {
prop: [
v.next().unwrap(),
v.next().unwrap(),
v.next().unwrap(),
v.next().unwrap(),
v.next().unwrap(),
],
}
}
fn prop(&self, n: usize) -> i32 {
self.prop[n]
}
}
fn score(ingredients: &[Ingredient], amounts: &[usize]) -> (usize, usize) {
let mut prod: usize = 1;
for prop in 0..=3 {
let mut s: i32 = 0;
for (i, ing) in ingredients.iter().enumerate() {
s += ing.prop(prop) * amounts[i] as i32;
}
if s < 0 {
s = 0;
}
prod *= s as usize;
}
if prod == 0 {
return (0, 0);
}
let mut cal: i32 = 0;
for (i, ing) in ingredients.iter().enumerate() {
cal += ing.prop(4) * amounts[i] as i32;
}
(prod, cal as usize)
}
fn best_recipe(ingredients: &[Ingredient]) -> (usize, usize) {
let num = 100;
let types = ingredients.len();
let mut max1 = 0;
let mut max2 = 0;
for var in variations(types, num) {
let (score, cal) = score(ingredients, &var);
if max1 < score {
max1 = score
}
if cal == 500 && max2 < score {
max2 = score
}
}
(max1, max2)
}
fn variations(k: usize, n: usize) -> Vec<Vec<usize>> {
if k == 1 {
return vec![vec![n]];
}
let mut res: Vec<Vec<usize>> = vec![];
for i in 0..=n {
let subres = variations(k - 1, n - i);
for sr in subres {
let mut r = sr.clone();
r.push(i);
res.push(r);
}
}
res
}
fn main() {
let inp = aoc::input_lines();
aoc::benchme(|bench: bool| {
let ingredients: Vec<Ingredient> =
inp.iter().map(|l| Ingredient::new(l)).collect();
let (p1, p2) = best_recipe(&ingredients);
if !bench {
println!("Part 1: {}", p1);
println!("Part 2: {}", p2);
}
});
}
#[test]
fn works() {
let butterscotch = Ingredient::new(
"Butterscotch: capacity -1, durability -2, flavor 6, texture 3, calories 8",
);
let cinnamon = Ingredient::new(
"Cinnamon: capacity 2, durability 3, flavor -2, texture -1, calories 3",
);
let ing: Vec<Ingredient> = vec![butterscotch, cinnamon];
assert_eq!(score(&ing, &vec![44usize, 56]), (62842880, 520), "score");
assert_eq!(best_recipe(&ing), (62842880, 57600000), "best");
}
| 25.514563 | 84 | 0.479833 |
e81855213cd259a411ca1410c1f8a60561f6cd7d | 84,804 | //! # Fingerprints
//!
//! This module implements change-tracking so that Cargo can know whether or
//! not something needs to be recompiled. A Cargo `Unit` can be either "dirty"
//! (needs to be recompiled) or "fresh" (it does not need to be recompiled).
//! There are several mechanisms that influence a Unit's freshness:
//!
//! - The `Fingerprint` is a hash, saved to the filesystem in the
//! `.fingerprint` directory, that tracks information about the Unit. If the
//! fingerprint is missing (such as the first time the unit is being
//! compiled), then the unit is dirty. If any of the fingerprint fields
//! change (like the name of the source file), then the Unit is considered
//! dirty.
//!
//! The `Fingerprint` also tracks the fingerprints of all its dependencies,
//! so a change in a dependency will propagate the "dirty" status up.
//!
//! - Filesystem mtime tracking is also used to check if a unit is dirty.
//! See the section below on "Mtime comparison" for more details. There
//! are essentially two parts to mtime tracking:
//!
//! 1. The mtime of a Unit's output files is compared to the mtime of all
//! its dependencies' output file mtimes (see `check_filesystem`). If any
//! output is missing, or is older than a dependency's output, then the
//! unit is dirty.
//! 2. The mtime of a Unit's source files is compared to the mtime of its
//! dep-info file in the fingerprint directory (see `find_stale_file`).
//! The dep-info file is used as an anchor to know when the last build of
//! the unit was done. See the "dep-info files" section below for more
//! details. If any input files are missing, or are newer than the
//! dep-info, then the unit is dirty.
//!
//! Note: Fingerprinting is not a perfect solution. Filesystem mtime tracking
//! is notoriously imprecise and problematic. Only a small part of the
//! environment is captured. This is a balance of performance, simplicity, and
//! completeness. Sandboxing, hashing file contents, tracking every file
//! access, environment variable, and network operation would ensure more
//! reliable and reproducible builds at the cost of being complex, slow, and
//! platform-dependent.
//!
//! ## Fingerprints and Metadata
//!
//! The `Metadata` hash is a hash added to the output filenames to isolate
//! each unit. See the documentation in the `compilation_files` module for
//! more details. NOTE: Not all output files are isolated via filename hashes
//! (like dylibs). The fingerprint directory uses a hash, but sometimes units
//! share the same fingerprint directory (when they don't have Metadata) so
//! care should be taken to handle this!
//!
//! Fingerprints and Metadata are similar, and track some of the same things.
//! The Metadata contains information that is required to keep Units separate.
//! The Fingerprint includes additional information that should cause a
//! recompile, but it is desired to reuse the same filenames. A comparison
//! of what is tracked:
//!
//! Value | Fingerprint | Metadata
//! -------------------------------------------|-------------|----------
//! rustc | ✓ | ✓
//! Profile | ✓ | ✓
//! `cargo rustc` extra args | ✓ | ✓
//! CompileMode | ✓ | ✓
//! Target Name | ✓ | ✓
//! Target CompileKind (bin/lib/etc.) | ✓ | ✓
//! Enabled Features | ✓ | ✓
//! Immediate dependency’s hashes | ✓[^1] | ✓
//! Target or Host mode | | ✓
//! __CARGO_DEFAULT_LIB_METADATA[^4] | | ✓
//! package_id | | ✓
//! authors, description, homepage, repo | ✓ |
//! Target src path relative to ws | ✓ |
//! Target flags (test/bench/for_host/edition) | ✓ |
//! -C incremental=… flag | ✓ |
//! mtime of sources | ✓[^3] |
//! RUSTFLAGS/RUSTDOCFLAGS | ✓ |
//! LTO flags | ✓ | ✓
//! config settings[^5] | ✓ |
//! is_std | | ✓
//!
//! [^1]: Build script and bin dependencies are not included.
//!
//! [^3]: See below for details on mtime tracking.
//!
//! [^4]: `__CARGO_DEFAULT_LIB_METADATA` is set by rustbuild to embed the
//! release channel (bootstrap/stable/beta/nightly) in libstd.
//!
//! [^5]: Config settings that are not otherwise captured anywhere else.
//! Currently, this is only `doc.extern-map`.
//!
//! When deciding what should go in the Metadata vs the Fingerprint, consider
//! that some files (like dylibs) do not have a hash in their filename. Thus,
//! if a value changes, only the fingerprint will detect the change (consider,
//! for example, swapping between different features). Fields that are only in
//! Metadata generally aren't relevant to the fingerprint because they
//! fundamentally change the output (like target vs host changes the directory
//! where it is emitted).
//!
//! ## Fingerprint files
//!
//! Fingerprint information is stored in the
//! `target/{debug,release}/.fingerprint/` directory. Each Unit is stored in a
//! separate directory. Each Unit directory contains:
//!
//! - A file with a 16 hex-digit hash. This is the Fingerprint hash, used for
//! quick loading and comparison.
//! - A `.json` file that contains details about the Fingerprint. This is only
//! used to log details about *why* a fingerprint is considered dirty.
//! `CARGO_LOG=cargo::core::compiler::fingerprint=trace cargo build` can be
//! used to display this log information.
//! - A "dep-info" file which is a translation of rustc's `*.d` dep-info files
//! to a Cargo-specific format that tweaks file names and is optimized for
//! reading quickly.
//! - An `invoked.timestamp` file whose filesystem mtime is updated every time
//! the Unit is built. This is used for capturing the time when the build
//! starts, to detect if files are changed in the middle of the build. See
//! below for more details.
//!
//! Note that some units are a little different. A Unit for *running* a build
//! script or for `rustdoc` does not have a dep-info file (it's not
//! applicable). Build script `invoked.timestamp` files are in the build
//! output directory.
//!
//! ## Fingerprint calculation
//!
//! After the list of Units has been calculated, the Units are added to the
//! `JobQueue`. As each one is added, the fingerprint is calculated, and the
//! dirty/fresh status is recorded. A closure is used to update the fingerprint
//! on-disk when the Unit successfully finishes. The closure will recompute the
//! Fingerprint based on the updated information. If the Unit fails to compile,
//! the fingerprint is not updated.
//!
//! Fingerprints are cached in the `Context`. This makes computing
//! Fingerprints faster, but also is necessary for properly updating
//! dependency information. Since a Fingerprint includes the Fingerprints of
//! all dependencies, when it is updated, by using `Arc` clones, it
//! automatically picks up the updates to its dependencies.
//!
//! ### dep-info files
//!
//! Cargo passes the `--emit=dep-info` flag to `rustc` so that `rustc` will
//! generate a "dep info" file (with the `.d` extension). This is a
//! Makefile-like syntax that includes all of the source files used to build
//! the crate. This file is used by Cargo to know which files to check to see
//! if the crate will need to be rebuilt.
//!
//! After `rustc` exits successfully, Cargo will read the dep info file and
//! translate it into a binary format that is stored in the fingerprint
//! directory (`translate_dep_info`). The mtime of the fingerprint dep-info
//! file itself is used as the reference for comparing the source files to
//! determine if any of the source files have been modified (see below for
//! more detail). Note that Cargo parses the special `# env-var:...` comments in
//! dep-info files to learn about environment variables that the rustc compile
//! depends on. Cargo then later uses this to trigger a recompile if a
//! referenced env var changes (even if the source didn't change).
//!
//! There is also a third dep-info file. Cargo will extend the file created by
//! rustc with some additional information and saves this into the output
//! directory. This is intended for build system integration. See the
//! `output_depinfo` module for more detail.
//!
//! #### -Zbinary-dep-depinfo
//!
//! `rustc` has an experimental flag `-Zbinary-dep-depinfo`. This causes
//! `rustc` to include binary files (like rlibs) in the dep-info file. This is
//! primarily to support rustc development, so that Cargo can check the
//! implicit dependency to the standard library (which lives in the sysroot).
//! We want Cargo to recompile whenever the standard library rlib/dylibs
//! change, and this is a generic mechanism to make that work.
//!
//! ### Mtime comparison
//!
//! The use of modification timestamps is the most common way a unit will be
//! determined to be dirty or fresh between builds. There are many subtle
//! issues and edge cases with mtime comparisons. This gives a high-level
//! overview, but you'll need to read the code for the gritty details. Mtime
//! handling is different for different unit kinds. The different styles are
//! driven by the `Fingerprint.local` field, which is set based on the unit
//! kind.
//!
//! The status of whether or not the mtime is "stale" or "up-to-date" is
//! stored in `Fingerprint.fs_status`.
//!
//! All units will compare the mtime of its newest output file with the mtimes
//! of the outputs of all its dependencies. If any output file is missing,
//! then the unit is stale. If any dependency is newer, the unit is stale.
//!
//! #### Normal package mtime handling
//!
//! `LocalFingerprint::CheckDepinfo` is used for checking the mtime of
//! packages. It compares the mtime of the input files (the source files) to
//! the mtime of the dep-info file (which is written last after a build is
//! finished). If the dep-info is missing, the unit is stale (it has never
//! been built). The list of input files comes from the dep-info file. See the
//! section above for details on dep-info files.
//!
//! Also note that although registry and git packages use `CheckDepInfo`, none
//! of their source files are included in the dep-info (see
//! `translate_dep_info`), so for those kinds no mtime checking is done
//! (unless `-Zbinary-dep-depinfo` is used). Repository and git packages are
//! static, so there is no need to check anything.
//!
//! When a build is complete, the mtime of the dep-info file in the
//! fingerprint directory is modified to rewind it to the time when the build
//! started. This is done by creating an `invoked.timestamp` file when the
//! build starts to capture the start time. The mtime is rewound to the start
//! to handle the case where the user modifies a source file while a build is
//! running. Cargo can't know whether or not the file was included in the
//! build, so it takes a conservative approach of assuming the file was *not*
//! included, and it should be rebuilt during the next build.
//!
//! #### Rustdoc mtime handling
//!
//! Rustdoc does not emit a dep-info file, so Cargo currently has a relatively
//! simple system for detecting rebuilds. `LocalFingerprint::Precalculated` is
//! used for rustdoc units. For registry packages, this is the package
//! version. For git packages, it is the git hash. For path packages, it is
//! the a string of the mtime of the newest file in the package.
//!
//! There are some known bugs with how this works, so it should be improved at
//! some point.
//!
//! #### Build script mtime handling
//!
//! Build script mtime handling runs in different modes. There is the "old
//! style" where the build script does not emit any `rerun-if` directives. In
//! this mode, Cargo will use `LocalFingerprint::Precalculated`. See the
//! "rustdoc" section above how it works.
//!
//! In the new-style, each `rerun-if` directive is translated to the
//! corresponding `LocalFingerprint` variant. The `RerunIfChanged` variant
//! compares the mtime of the given filenames against the mtime of the
//! "output" file.
//!
//! Similar to normal units, the build script "output" file mtime is rewound
//! to the time just before the build script is executed to handle mid-build
//! modifications.
//!
//! ## Considerations for inclusion in a fingerprint
//!
//! Over time we've realized a few items which historically were included in
//! fingerprint hashings should not actually be included. Examples are:
//!
//! * Modification time values. We strive to never include a modification time
//! inside a `Fingerprint` to get hashed into an actual value. While
//! theoretically fine to do, in practice this causes issues with common
//! applications like Docker. Docker, after a layer is built, will zero out
//! the nanosecond part of all filesystem modification times. This means that
//! the actual modification time is different for all build artifacts, which
//! if we tracked the actual values of modification times would cause
//! unnecessary recompiles. To fix this we instead only track paths which are
//! relevant. These paths are checked dynamically to see if they're up to
//! date, and the modification time doesn't make its way into the fingerprint
//! hash.
//!
//! * Absolute path names. We strive to maintain a property where if you rename
//! a project directory Cargo will continue to preserve all build artifacts
//! and reuse the cache. This means that we can't ever hash an absolute path
//! name. Instead we always hash relative path names and the "root" is passed
//! in at runtime dynamically. Some of this is best effort, but the general
//! idea is that we assume all accesses within a crate stay within that
//! crate.
//!
//! These are pretty tricky to test for unfortunately, but we should have a good
//! test suite nowadays and lord knows Cargo gets enough testing in the wild!
//!
//! ## Build scripts
//!
//! The *running* of a build script (`CompileMode::RunCustomBuild`) is treated
//! significantly different than all other Unit kinds. It has its own function
//! for calculating the Fingerprint (`calculate_run_custom_build`) and has some
//! unique considerations. It does not track the same information as a normal
//! Unit. The information tracked depends on the `rerun-if-changed` and
//! `rerun-if-env-changed` statements produced by the build script. If the
//! script does not emit either of these statements, the Fingerprint runs in
//! "old style" mode where an mtime change of *any* file in the package will
//! cause the build script to be re-run. Otherwise, the fingerprint *only*
//! tracks the individual "rerun-if" items listed by the build script.
//!
//! The "rerun-if" statements from a *previous* build are stored in the build
//! output directory in a file called `output`. Cargo parses this file when
//! the Unit for that build script is prepared for the `JobQueue`. The
//! Fingerprint code can then use that information to compute the Fingerprint
//! and compare against the old fingerprint hash.
//!
//! Care must be taken with build script Fingerprints because the
//! `Fingerprint::local` value may be changed after the build script runs
//! (such as if the build script adds or removes "rerun-if" items).
//!
//! Another complication is if a build script is overridden. In that case, the
//! fingerprint is the hash of the output of the override.
//!
//! ## Special considerations
//!
//! Registry dependencies do not track the mtime of files. This is because
//! registry dependencies are not expected to change (if a new version is
//! used, the Package ID will change, causing a rebuild). Cargo currently
//! partially works with Docker caching. When a Docker image is built, it has
//! normal mtime information. However, when a step is cached, the nanosecond
//! portions of all files is zeroed out. Currently this works, but care must
//! be taken for situations like these.
//!
//! HFS on macOS only supports 1 second timestamps. This causes a significant
//! number of problems, particularly with Cargo's testsuite which does rapid
//! builds in succession. Other filesystems have various degrees of
//! resolution.
//!
//! Various weird filesystems (such as network filesystems) also can cause
//! complications. Network filesystems may track the time on the server
//! (except when the time is set manually such as with
//! `filetime::set_file_times`). Not all filesystems support modifying the
//! mtime.
//!
//! See the `A-rebuild-detection` flag on the issue tracker for more:
//! <https://github.com/rust-lang/cargo/issues?q=is%3Aissue+is%3Aopen+label%3AA-rebuild-detection>
use std::collections::hash_map::{Entry, HashMap};
use std::convert::TryInto;
use std::env;
use std::hash::{self, Hasher};
use std::path::{Path, PathBuf};
use std::str;
use std::sync::{Arc, Mutex};
use std::time::SystemTime;
use anyhow::{bail, format_err};
use filetime::FileTime;
use log::{debug, info};
use serde::de;
use serde::ser;
use serde::{Deserialize, Serialize};
use crate::core::compiler::unit_graph::UnitDep;
use crate::core::Package;
use crate::util;
use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::interning::InternedString;
use crate::util::paths;
use crate::util::{internal, profile, ProcessBuilder};
use super::custom_build::BuildDeps;
use super::job::{Job, Work};
use super::{BuildContext, Context, FileFlavor, Unit};
/// Determines if a `unit` is up-to-date, and if not prepares necessary work to
/// update the persisted fingerprint.
///
/// This function will inspect `unit`, calculate a fingerprint for it, and then
/// return an appropriate `Job` to run. The returned `Job` will be a noop if
/// `unit` is considered "fresh", or if it was previously built and cached.
/// Otherwise the `Job` returned will write out the true fingerprint to the
/// filesystem, to be executed after the unit's work has completed.
///
/// The `force` flag is a way to force the `Job` to be "dirty", or always
/// update the fingerprint. **Beware using this flag** because it does not
/// transitively propagate throughout the dependency graph, it only forces this
/// one unit which is very unlikely to be what you want unless you're
/// exclusively talking about top-level units.
pub fn prepare_target(cx: &mut Context<'_, '_>, unit: &Unit, force: bool) -> CargoResult<Job> {
let _p = profile::start(format!(
"fingerprint: {} / {}",
unit.pkg.package_id(),
unit.target.name()
));
let bcx = cx.bcx;
let loc = cx.files().fingerprint_file_path(unit, "");
debug!("fingerprint at: {}", loc.display());
// Figure out if this unit is up to date. After calculating the fingerprint
// compare it to an old version, if any, and attempt to print diagnostic
// information about failed comparisons to aid in debugging.
let fingerprint = calculate(cx, unit)?;
let mtime_on_use = cx.bcx.config.cli_unstable().mtime_on_use;
let compare = compare_old_fingerprint(&loc, &*fingerprint, mtime_on_use);
log_compare(unit, &compare);
// If our comparison failed (e.g., we're going to trigger a rebuild of this
// crate), then we also ensure the source of the crate passes all
// verification checks before we build it.
//
// The `Source::verify` method is intended to allow sources to execute
// pre-build checks to ensure that the relevant source code is all
// up-to-date and as expected. This is currently used primarily for
// directory sources which will use this hook to perform an integrity check
// on all files in the source to ensure they haven't changed. If they have
// changed then an error is issued.
if compare.is_err() {
let source_id = unit.pkg.package_id().source_id();
let sources = bcx.packages.sources();
let source = sources
.get(source_id)
.ok_or_else(|| internal("missing package source"))?;
source.verify(unit.pkg.package_id())?;
}
if compare.is_ok() && !force {
return Ok(Job::new_fresh());
}
// Clear out the old fingerprint file if it exists. This protects when
// compilation is interrupted leaving a corrupt file. For example, a
// project with a lib.rs and integration test (two units):
//
// 1. Build the library and integration test.
// 2. Make a change to lib.rs (NOT the integration test).
// 3. Build the integration test, hit Ctrl-C while linking. With gcc, this
// will leave behind an incomplete executable (zero size, or partially
// written). NOTE: The library builds successfully, it is the linking
// of the integration test that we are interrupting.
// 4. Build the integration test again.
//
// Without the following line, then step 3 will leave a valid fingerprint
// on the disk. Then step 4 will think the integration test is "fresh"
// because:
//
// - There is a valid fingerprint hash on disk (written in step 1).
// - The mtime of the output file (the corrupt integration executable
// written in step 3) is newer than all of its dependencies.
// - The mtime of the integration test fingerprint dep-info file (written
// in step 1) is newer than the integration test's source files, because
// we haven't modified any of its source files.
//
// But the executable is corrupt and needs to be rebuilt. Clearing the
// fingerprint at step 3 ensures that Cargo never mistakes a partially
// written output as up-to-date.
if loc.exists() {
// Truncate instead of delete so that compare_old_fingerprint will
// still log the reason for the fingerprint failure instead of just
// reporting "failed to read fingerprint" during the next build if
// this build fails.
paths::write(&loc, b"")?;
}
let write_fingerprint = if unit.mode.is_run_custom_build() {
// For build scripts the `local` field of the fingerprint may change
// while we're executing it. For example it could be in the legacy
// "consider everything a dependency mode" and then we switch to "deps
// are explicitly specified" mode.
//
// To handle this movement we need to regenerate the `local` field of a
// build script's fingerprint after it's executed. We do this by
// using the `build_script_local_fingerprints` function which returns a
// thunk we can invoke on a foreign thread to calculate this.
let build_script_outputs = Arc::clone(&cx.build_script_outputs);
let pkg_id = unit.pkg.package_id();
let metadata = cx.get_run_build_script_metadata(unit);
let (gen_local, _overridden) = build_script_local_fingerprints(cx, unit);
let output_path = cx.build_explicit_deps[unit].build_script_output.clone();
Work::new(move |_| {
let outputs = build_script_outputs.lock().unwrap();
let output = outputs
.get(pkg_id, metadata)
.expect("output must exist after running");
let deps = BuildDeps::new(&output_path, Some(output));
// FIXME: it's basically buggy that we pass `None` to `call_box`
// here. See documentation on `build_script_local_fingerprints`
// below for more information. Despite this just try to proceed and
// hobble along if it happens to return `Some`.
if let Some(new_local) = (gen_local)(&deps, None)? {
*fingerprint.local.lock().unwrap() = new_local;
}
write_fingerprint(&loc, &fingerprint)
})
} else {
Work::new(move |_| write_fingerprint(&loc, &fingerprint))
};
Ok(Job::new_dirty(write_fingerprint))
}
/// Dependency edge information for fingerprints. This is generated for each
/// dependency and is stored in a `Fingerprint` below.
#[derive(Clone)]
struct DepFingerprint {
/// The hash of the package id that this dependency points to
pkg_id: u64,
/// The crate name we're using for this dependency, which if we change we'll
/// need to recompile!
name: InternedString,
/// Whether or not this dependency is flagged as a public dependency or not.
public: bool,
/// Whether or not this dependency is an rmeta dependency or a "full"
/// dependency. In the case of an rmeta dependency our dependency edge only
/// actually requires the rmeta from what we depend on, so when checking
/// mtime information all files other than the rmeta can be ignored.
only_requires_rmeta: bool,
/// The dependency's fingerprint we recursively point to, containing all the
/// other hash information we'd otherwise need.
fingerprint: Arc<Fingerprint>,
}
/// A fingerprint can be considered to be a "short string" representing the
/// state of a world for a package.
///
/// If a fingerprint ever changes, then the package itself needs to be
/// recompiled. Inputs to the fingerprint include source code modifications,
/// compiler flags, compiler version, etc. This structure is not simply a
/// `String` due to the fact that some fingerprints cannot be calculated lazily.
///
/// Path sources, for example, use the mtime of the corresponding dep-info file
/// as a fingerprint (all source files must be modified *before* this mtime).
/// This dep-info file is not generated, however, until after the crate is
/// compiled. As a result, this structure can be thought of as a fingerprint
/// to-be. The actual value can be calculated via `hash()`, but the operation
/// may fail as some files may not have been generated.
///
/// Note that dependencies are taken into account for fingerprints because rustc
/// requires that whenever an upstream crate is recompiled that all downstream
/// dependents are also recompiled. This is typically tracked through
/// `DependencyQueue`, but it also needs to be retained here because Cargo can
/// be interrupted while executing, losing the state of the `DependencyQueue`
/// graph.
#[derive(Serialize, Deserialize)]
pub struct Fingerprint {
/// Hash of the version of `rustc` used.
rustc: u64,
/// Sorted list of cfg features enabled.
features: String,
/// Hash of the `Target` struct, including the target name,
/// package-relative source path, edition, etc.
target: u64,
/// Hash of the `Profile`, `CompileMode`, and any extra flags passed via
/// `cargo rustc` or `cargo rustdoc`.
profile: u64,
/// Hash of the path to the base source file. This is relative to the
/// workspace root for path members, or absolute for other sources.
path: u64,
/// Fingerprints of dependencies.
deps: Vec<DepFingerprint>,
/// Information about the inputs that affect this Unit (such as source
/// file mtimes or build script environment variables).
local: Mutex<Vec<LocalFingerprint>>,
/// Cached hash of the `Fingerprint` struct. Used to improve performance
/// for hashing.
#[serde(skip)]
memoized_hash: Mutex<Option<u64>>,
/// RUSTFLAGS/RUSTDOCFLAGS environment variable value (or config value).
rustflags: Vec<String>,
/// Hash of some metadata from the manifest, such as "authors", or
/// "description", which are exposed as environment variables during
/// compilation.
metadata: u64,
/// Hash of various config settings that change how things are compiled.
config: u64,
/// Description of whether the filesystem status for this unit is up to date
/// or should be considered stale.
#[serde(skip)]
fs_status: FsStatus,
/// Files, relative to `target_root`, that are produced by the step that
/// this `Fingerprint` represents. This is used to detect when the whole
/// fingerprint is out of date if this is missing, or if previous
/// fingerprints output files are regenerated and look newer than this one.
#[serde(skip)]
outputs: Vec<PathBuf>,
}
/// Indication of the status on the filesystem for a particular unit.
enum FsStatus {
/// This unit is to be considered stale, even if hash information all
/// matches. The filesystem inputs have changed (or are missing) and the
/// unit needs to subsequently be recompiled.
Stale,
/// This unit is up-to-date. All outputs and their corresponding mtime are
/// listed in the payload here for other dependencies to compare against.
UpToDate { mtimes: HashMap<PathBuf, FileTime> },
}
impl FsStatus {
fn up_to_date(&self) -> bool {
match self {
FsStatus::UpToDate { .. } => true,
FsStatus::Stale => false,
}
}
}
impl Default for FsStatus {
fn default() -> FsStatus {
FsStatus::Stale
}
}
impl Serialize for DepFingerprint {
fn serialize<S>(&self, ser: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
(
&self.pkg_id,
&self.name,
&self.public,
&self.fingerprint.hash(),
)
.serialize(ser)
}
}
impl<'de> Deserialize<'de> for DepFingerprint {
fn deserialize<D>(d: D) -> Result<DepFingerprint, D::Error>
where
D: de::Deserializer<'de>,
{
let (pkg_id, name, public, hash) = <(u64, String, bool, u64)>::deserialize(d)?;
Ok(DepFingerprint {
pkg_id,
name: InternedString::new(&name),
public,
fingerprint: Arc::new(Fingerprint {
memoized_hash: Mutex::new(Some(hash)),
..Fingerprint::new()
}),
// This field is never read since it's only used in
// `check_filesystem` which isn't used by fingerprints loaded from
// disk.
only_requires_rmeta: false,
})
}
}
/// A `LocalFingerprint` represents something that we use to detect direct
/// changes to a `Fingerprint`.
///
/// This is where we track file information, env vars, etc. This
/// `LocalFingerprint` struct is hashed and if the hash changes will force a
/// recompile of any fingerprint it's included into. Note that the "local"
/// terminology comes from the fact that it only has to do with one crate, and
/// `Fingerprint` tracks the transitive propagation of fingerprint changes.
///
/// Note that because this is hashed its contents are carefully managed. Like
/// mentioned in the above module docs, we don't want to hash absolute paths or
/// mtime information.
///
/// Also note that a `LocalFingerprint` is used in `check_filesystem` to detect
/// when the filesystem contains stale information (based on mtime currently).
/// The paths here don't change much between compilations but they're used as
/// inputs when we probe the filesystem looking at information.
#[derive(Debug, Serialize, Deserialize, Hash)]
enum LocalFingerprint {
/// This is a precalculated fingerprint which has an opaque string we just
/// hash as usual. This variant is primarily used for rustdoc where we
/// don't have a dep-info file to compare against.
///
/// This is also used for build scripts with no `rerun-if-*` statements, but
/// that's overall a mistake and causes bugs in Cargo. We shouldn't use this
/// for build scripts.
Precalculated(String),
/// This is used for crate compilations. The `dep_info` file is a relative
/// path anchored at `target_root(...)` to the dep-info file that Cargo
/// generates (which is a custom serialization after parsing rustc's own
/// `dep-info` output).
///
/// The `dep_info` file, when present, also lists a number of other files
/// for us to look at. If any of those files are newer than this file then
/// we need to recompile.
CheckDepInfo { dep_info: PathBuf },
/// This represents a nonempty set of `rerun-if-changed` annotations printed
/// out by a build script. The `output` file is a relative file anchored at
/// `target_root(...)` which is the actual output of the build script. That
/// output has already been parsed and the paths printed out via
/// `rerun-if-changed` are listed in `paths`. The `paths` field is relative
/// to `pkg.root()`
///
/// This is considered up-to-date if all of the `paths` are older than
/// `output`, otherwise we need to recompile.
RerunIfChanged {
output: PathBuf,
paths: Vec<PathBuf>,
},
/// This represents a single `rerun-if-env-changed` annotation printed by a
/// build script. The exact env var and value are hashed here. There's no
/// filesystem dependence here, and if the values are changed the hash will
/// change forcing a recompile.
RerunIfEnvChanged { var: String, val: Option<String> },
}
enum StaleItem {
MissingFile(PathBuf),
ChangedFile {
reference: PathBuf,
reference_mtime: FileTime,
stale: PathBuf,
stale_mtime: FileTime,
},
ChangedEnv {
var: String,
previous: Option<String>,
current: Option<String>,
},
}
impl LocalFingerprint {
/// Checks dynamically at runtime if this `LocalFingerprint` has a stale
/// item inside of it.
///
/// The main purpose of this function is to handle two different ways
/// fingerprints can be invalidated:
///
/// * One is a dependency listed in rustc's dep-info files is invalid. Note
/// that these could either be env vars or files. We check both here.
///
/// * Another is the `rerun-if-changed` directive from build scripts. This
/// is where we'll find whether files have actually changed
fn find_stale_item(
&self,
mtime_cache: &mut HashMap<PathBuf, FileTime>,
pkg_root: &Path,
target_root: &Path,
) -> CargoResult<Option<StaleItem>> {
match self {
// We need to parse `dep_info`, learn about the crate's dependencies.
//
// For each env var we see if our current process's env var still
// matches, and for each file we see if any of them are newer than
// the `dep_info` file itself whose mtime represents the start of
// rustc.
LocalFingerprint::CheckDepInfo { dep_info } => {
let dep_info = target_root.join(dep_info);
let info = match parse_dep_info(pkg_root, target_root, &dep_info)? {
Some(info) => info,
None => return Ok(Some(StaleItem::MissingFile(dep_info))),
};
for (key, previous) in info.env.iter() {
let current = env::var(key).ok();
if current == *previous {
continue;
}
return Ok(Some(StaleItem::ChangedEnv {
var: key.clone(),
previous: previous.clone(),
current,
}));
}
Ok(find_stale_file(mtime_cache, &dep_info, info.files.iter()))
}
// We need to verify that no paths listed in `paths` are newer than
// the `output` path itself, or the last time the build script ran.
LocalFingerprint::RerunIfChanged { output, paths } => Ok(find_stale_file(
mtime_cache,
&target_root.join(output),
paths.iter().map(|p| pkg_root.join(p)),
)),
// These have no dependencies on the filesystem, and their values
// are included natively in the `Fingerprint` hash so nothing
// tocheck for here.
LocalFingerprint::RerunIfEnvChanged { .. } => Ok(None),
LocalFingerprint::Precalculated(..) => Ok(None),
}
}
fn kind(&self) -> &'static str {
match self {
LocalFingerprint::Precalculated(..) => "precalculated",
LocalFingerprint::CheckDepInfo { .. } => "dep-info",
LocalFingerprint::RerunIfChanged { .. } => "rerun-if-changed",
LocalFingerprint::RerunIfEnvChanged { .. } => "rerun-if-env-changed",
}
}
}
#[derive(Debug)]
struct MtimeSlot(Mutex<Option<FileTime>>);
impl Fingerprint {
fn new() -> Fingerprint {
Fingerprint {
rustc: 0,
target: 0,
profile: 0,
path: 0,
features: String::new(),
deps: Vec::new(),
local: Mutex::new(Vec::new()),
memoized_hash: Mutex::new(None),
rustflags: Vec::new(),
metadata: 0,
config: 0,
fs_status: FsStatus::Stale,
outputs: Vec::new(),
}
}
/// For performance reasons fingerprints will memoize their own hash, but
/// there's also internal mutability with its `local` field which can
/// change, for example with build scripts, during a build.
///
/// This method can be used to bust all memoized hashes just before a build
/// to ensure that after a build completes everything is up-to-date.
pub fn clear_memoized(&self) {
*self.memoized_hash.lock().unwrap() = None;
}
fn hash(&self) -> u64 {
if let Some(s) = *self.memoized_hash.lock().unwrap() {
return s;
}
let ret = util::hash_u64(self);
*self.memoized_hash.lock().unwrap() = Some(ret);
ret
}
/// Compares this fingerprint with an old version which was previously
/// serialized to filesystem.
///
/// The purpose of this is exclusively to produce a diagnostic message
/// indicating why we're recompiling something. This function always returns
/// an error, it will never return success.
fn compare(&self, old: &Fingerprint) -> CargoResult<()> {
if self.rustc != old.rustc {
bail!("rust compiler has changed")
}
if self.features != old.features {
bail!(
"features have changed: {} != {}",
self.features,
old.features
)
}
if self.target != old.target {
bail!("target configuration has changed")
}
if self.path != old.path {
bail!("path to the source has changed")
}
if self.profile != old.profile {
bail!("profile configuration has changed")
}
if self.rustflags != old.rustflags {
bail!(
"RUSTFLAGS has changed: {:?} != {:?}",
self.rustflags,
old.rustflags
)
}
if self.metadata != old.metadata {
bail!("metadata changed")
}
if self.config != old.config {
bail!("configuration settings have changed")
}
let my_local = self.local.lock().unwrap();
let old_local = old.local.lock().unwrap();
if my_local.len() != old_local.len() {
bail!("local lens changed");
}
for (new, old) in my_local.iter().zip(old_local.iter()) {
match (new, old) {
(LocalFingerprint::Precalculated(a), LocalFingerprint::Precalculated(b)) => {
if a != b {
bail!("precalculated components have changed: {} != {}", a, b)
}
}
(
LocalFingerprint::CheckDepInfo { dep_info: adep },
LocalFingerprint::CheckDepInfo { dep_info: bdep },
) => {
if adep != bdep {
bail!("dep info output changed: {:?} != {:?}", adep, bdep)
}
}
(
LocalFingerprint::RerunIfChanged {
output: aout,
paths: apaths,
},
LocalFingerprint::RerunIfChanged {
output: bout,
paths: bpaths,
},
) => {
if aout != bout {
bail!("rerun-if-changed output changed: {:?} != {:?}", aout, bout)
}
if apaths != bpaths {
bail!(
"rerun-if-changed output changed: {:?} != {:?}",
apaths,
bpaths,
)
}
}
(
LocalFingerprint::RerunIfEnvChanged {
var: akey,
val: avalue,
},
LocalFingerprint::RerunIfEnvChanged {
var: bkey,
val: bvalue,
},
) => {
if *akey != *bkey {
bail!("env vars changed: {} != {}", akey, bkey);
}
if *avalue != *bvalue {
bail!(
"env var `{}` changed: previously {:?} now {:?}",
akey,
bvalue,
avalue
)
}
}
(a, b) => bail!(
"local fingerprint type has changed ({} => {})",
b.kind(),
a.kind()
),
}
}
if self.deps.len() != old.deps.len() {
bail!("number of dependencies has changed")
}
for (a, b) in self.deps.iter().zip(old.deps.iter()) {
if a.name != b.name {
let e = format_err!("`{}` != `{}`", a.name, b.name)
.context("unit dependency name changed");
return Err(e);
}
if a.fingerprint.hash() != b.fingerprint.hash() {
let e = format_err!(
"new ({}/{:x}) != old ({}/{:x})",
a.name,
a.fingerprint.hash(),
b.name,
b.fingerprint.hash()
)
.context("unit dependency information changed");
return Err(e);
}
}
if !self.fs_status.up_to_date() {
bail!("current filesystem status shows we're outdated");
}
// This typically means some filesystem modifications happened or
// something transitive was odd. In general we should strive to provide
// a better error message than this, so if you see this message a lot it
// likely means this method needs to be updated!
bail!("two fingerprint comparison turned up nothing obvious");
}
/// Dynamically inspect the local filesystem to update the `fs_status` field
/// of this `Fingerprint`.
///
/// This function is used just after a `Fingerprint` is constructed to check
/// the local state of the filesystem and propagate any dirtiness from
/// dependencies up to this unit as well. This function assumes that the
/// unit starts out as `FsStatus::Stale` and then it will optionally switch
/// it to `UpToDate` if it can.
fn check_filesystem(
&mut self,
mtime_cache: &mut HashMap<PathBuf, FileTime>,
pkg_root: &Path,
target_root: &Path,
) -> CargoResult<()> {
assert!(!self.fs_status.up_to_date());
let mut mtimes = HashMap::new();
// Get the `mtime` of all outputs. Optionally update their mtime
// afterwards based on the `mtime_on_use` flag. Afterwards we want the
// minimum mtime as it's the one we'll be comparing to inputs and
// dependencies.
for output in self.outputs.iter() {
let mtime = match paths::mtime(output) {
Ok(mtime) => mtime,
// This path failed to report its `mtime`. It probably doesn't
// exists, so leave ourselves as stale and bail out.
Err(e) => {
debug!("failed to get mtime of {:?}: {}", output, e);
return Ok(());
}
};
assert!(mtimes.insert(output.clone(), mtime).is_none());
}
let opt_max = mtimes.iter().max_by_key(|kv| kv.1);
let (max_path, max_mtime) = match opt_max {
Some(mtime) => mtime,
// We had no output files. This means we're an overridden build
// script and we're just always up to date because we aren't
// watching the filesystem.
None => {
self.fs_status = FsStatus::UpToDate { mtimes };
return Ok(());
}
};
debug!(
"max output mtime for {:?} is {:?} {}",
pkg_root, max_path, max_mtime
);
for dep in self.deps.iter() {
let dep_mtimes = match &dep.fingerprint.fs_status {
FsStatus::UpToDate { mtimes } => mtimes,
// If our dependency is stale, so are we, so bail out.
FsStatus::Stale => return Ok(()),
};
// If our dependency edge only requires the rmeta file to be present
// then we only need to look at that one output file, otherwise we
// need to consider all output files to see if we're out of date.
let (dep_path, dep_mtime) = if dep.only_requires_rmeta {
dep_mtimes
.iter()
.find(|(path, _mtime)| {
path.extension().and_then(|s| s.to_str()) == Some("rmeta")
})
.expect("failed to find rmeta")
} else {
match dep_mtimes.iter().max_by_key(|kv| kv.1) {
Some(dep_mtime) => dep_mtime,
// If our dependencies is up to date and has no filesystem
// interactions, then we can move on to the next dependency.
None => continue,
}
};
debug!(
"max dep mtime for {:?} is {:?} {}",
pkg_root, dep_path, dep_mtime
);
// If the dependency is newer than our own output then it was
// recompiled previously. We transitively become stale ourselves in
// that case, so bail out.
//
// Note that this comparison should probably be `>=`, not `>`, but
// for a discussion of why it's `>` see the discussion about #5918
// below in `find_stale`.
if dep_mtime > max_mtime {
info!(
"dependency on `{}` is newer than we are {} > {} {:?}",
dep.name, dep_mtime, max_mtime, pkg_root
);
return Ok(());
}
}
// If we reached this far then all dependencies are up to date. Check
// all our `LocalFingerprint` information to see if we have any stale
// files for this package itself. If we do find something log a helpful
// message and bail out so we stay stale.
for local in self.local.get_mut().unwrap().iter() {
if let Some(item) = local.find_stale_item(mtime_cache, pkg_root, target_root)? {
item.log();
return Ok(());
}
}
// Everything was up to date! Record such.
self.fs_status = FsStatus::UpToDate { mtimes };
debug!("filesystem up-to-date {:?}", pkg_root);
Ok(())
}
}
impl hash::Hash for Fingerprint {
fn hash<H: Hasher>(&self, h: &mut H) {
let Fingerprint {
rustc,
ref features,
target,
path,
profile,
ref deps,
ref local,
metadata,
config,
ref rustflags,
..
} = *self;
let local = local.lock().unwrap();
(
rustc, features, target, path, profile, &*local, metadata, config, rustflags,
)
.hash(h);
h.write_usize(deps.len());
for DepFingerprint {
pkg_id,
name,
public,
fingerprint,
only_requires_rmeta: _, // static property, no need to hash
} in deps
{
pkg_id.hash(h);
name.hash(h);
public.hash(h);
// use memoized dep hashes to avoid exponential blowup
h.write_u64(Fingerprint::hash(fingerprint));
}
}
}
impl hash::Hash for MtimeSlot {
fn hash<H: Hasher>(&self, h: &mut H) {
self.0.lock().unwrap().hash(h)
}
}
impl ser::Serialize for MtimeSlot {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
self.0
.lock()
.unwrap()
.map(|ft| (ft.unix_seconds(), ft.nanoseconds()))
.serialize(s)
}
}
impl<'de> de::Deserialize<'de> for MtimeSlot {
fn deserialize<D>(d: D) -> Result<MtimeSlot, D::Error>
where
D: de::Deserializer<'de>,
{
let kind: Option<(i64, u32)> = de::Deserialize::deserialize(d)?;
Ok(MtimeSlot(Mutex::new(
kind.map(|(s, n)| FileTime::from_unix_time(s, n)),
)))
}
}
impl DepFingerprint {
fn new(cx: &mut Context<'_, '_>, parent: &Unit, dep: &UnitDep) -> CargoResult<DepFingerprint> {
let fingerprint = calculate(cx, &dep.unit)?;
// We need to be careful about what we hash here. We have a goal of
// supporting renaming a project directory and not rebuilding
// everything. To do that, however, we need to make sure that the cwd
// doesn't make its way into any hashes, and one source of that is the
// `SourceId` for `path` packages.
//
// We already have a requirement that `path` packages all have unique
// names (sort of for this same reason), so if the package source is a
// `path` then we just hash the name, but otherwise we hash the full
// id as it won't change when the directory is renamed.
let pkg_id = if dep.unit.pkg.package_id().source_id().is_path() {
util::hash_u64(dep.unit.pkg.package_id().name())
} else {
util::hash_u64(dep.unit.pkg.package_id())
};
Ok(DepFingerprint {
pkg_id,
name: dep.extern_crate_name,
public: dep.public,
fingerprint,
only_requires_rmeta: cx.only_requires_rmeta(parent, &dep.unit),
})
}
}
impl StaleItem {
/// Use the `log` crate to log a hopefully helpful message in diagnosing
/// what file is considered stale and why. This is intended to be used in
/// conjunction with `CARGO_LOG` to determine why Cargo is recompiling
/// something. Currently there's no user-facing usage of this other than
/// that.
fn log(&self) {
match self {
StaleItem::MissingFile(path) => {
info!("stale: missing {:?}", path);
}
StaleItem::ChangedFile {
reference,
reference_mtime,
stale,
stale_mtime,
} => {
info!("stale: changed {:?}", stale);
info!(" (vs) {:?}", reference);
info!(" {:?} != {:?}", reference_mtime, stale_mtime);
}
StaleItem::ChangedEnv {
var,
previous,
current,
} => {
info!("stale: changed env {:?}", var);
info!(" {:?} != {:?}", previous, current);
}
}
}
}
/// Calculates the fingerprint for a `unit`.
///
/// This fingerprint is used by Cargo to learn about when information such as:
///
/// * A non-path package changes (changes version, changes revision, etc).
/// * Any dependency changes
/// * The compiler changes
/// * The set of features a package is built with changes
/// * The profile a target is compiled with changes (e.g., opt-level changes)
/// * Any other compiler flags change that will affect the result
///
/// Information like file modification time is only calculated for path
/// dependencies.
fn calculate(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Arc<Fingerprint>> {
// This function is slammed quite a lot, so the result is memoized.
if let Some(s) = cx.fingerprints.get(unit) {
return Ok(Arc::clone(s));
}
let mut fingerprint = if unit.mode.is_run_custom_build() {
calculate_run_custom_build(cx, unit)?
} else if unit.mode.is_doc_test() {
panic!("doc tests do not fingerprint");
} else {
calculate_normal(cx, unit)?
};
// After we built the initial `Fingerprint` be sure to update the
// `fs_status` field of it.
let target_root = target_root(cx);
fingerprint.check_filesystem(&mut cx.mtime_cache, unit.pkg.root(), &target_root)?;
let fingerprint = Arc::new(fingerprint);
cx.fingerprints
.insert(unit.clone(), Arc::clone(&fingerprint));
Ok(fingerprint)
}
/// Calculate a fingerprint for a "normal" unit, or anything that's not a build
/// script. This is an internal helper of `calculate`, don't call directly.
fn calculate_normal(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Fingerprint> {
// Recursively calculate the fingerprint for all of our dependencies.
//
// Skip fingerprints of binaries because they don't actually induce a
// recompile, they're just dependencies in the sense that they need to be
// built.
//
// Create Vec since mutable cx is needed in closure.
let deps = Vec::from(cx.unit_deps(unit));
let mut deps = deps
.into_iter()
.filter(|dep| !dep.unit.target.is_bin())
.map(|dep| DepFingerprint::new(cx, unit, &dep))
.collect::<CargoResult<Vec<_>>>()?;
deps.sort_by(|a, b| a.pkg_id.cmp(&b.pkg_id));
// Afterwards calculate our own fingerprint information.
let target_root = target_root(cx);
let local = if unit.mode.is_doc() {
// rustdoc does not have dep-info files.
let fingerprint = pkg_fingerprint(cx.bcx, &unit.pkg).chain_err(|| {
format!(
"failed to determine package fingerprint for documenting {}",
unit.pkg
)
})?;
vec![LocalFingerprint::Precalculated(fingerprint)]
} else {
let dep_info = dep_info_loc(cx, unit);
let dep_info = dep_info.strip_prefix(&target_root).unwrap().to_path_buf();
vec![LocalFingerprint::CheckDepInfo { dep_info }]
};
// Figure out what the outputs of our unit is, and we'll be storing them
// into the fingerprint as well.
let outputs = cx
.outputs(unit)?
.iter()
.filter(|output| !matches!(output.flavor, FileFlavor::DebugInfo | FileFlavor::Auxiliary))
.map(|output| output.path.clone())
.collect();
// Fill out a bunch more information that we'll be tracking typically
// hashed to take up less space on disk as we just need to know when things
// change.
let extra_flags = if unit.mode.is_doc() {
cx.bcx.rustdocflags_args(unit)
} else {
cx.bcx.rustflags_args(unit)
}
.to_vec();
let profile_hash = util::hash_u64((
&unit.profile,
unit.mode,
cx.bcx.extra_args_for(unit),
cx.lto[unit],
));
// Include metadata since it is exposed as environment variables.
let m = unit.pkg.manifest().metadata();
let metadata = util::hash_u64((&m.authors, &m.description, &m.homepage, &m.repository));
let config = if unit.mode.is_doc() && cx.bcx.config.cli_unstable().rustdoc_map {
cx.bcx
.config
.doc_extern_map()
.map_or(0, |map| util::hash_u64(map))
} else {
0
};
Ok(Fingerprint {
rustc: util::hash_u64(&cx.bcx.rustc().verbose_version),
target: util::hash_u64(&unit.target),
profile: profile_hash,
// Note that .0 is hashed here, not .1 which is the cwd. That doesn't
// actually affect the output artifact so there's no need to hash it.
path: util::hash_u64(super::path_args(cx.bcx, unit).0),
features: format!("{:?}", unit.features),
deps,
local: Mutex::new(local),
memoized_hash: Mutex::new(None),
metadata,
config,
rustflags: extra_flags,
fs_status: FsStatus::Stale,
outputs,
})
}
/// Calculate a fingerprint for an "execute a build script" unit. This is an
/// internal helper of `calculate`, don't call directly.
fn calculate_run_custom_build(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Fingerprint> {
assert!(unit.mode.is_run_custom_build());
// Using the `BuildDeps` information we'll have previously parsed and
// inserted into `build_explicit_deps` built an initial snapshot of the
// `LocalFingerprint` list for this build script. If we previously executed
// the build script this means we'll be watching files and env vars.
// Otherwise if we haven't previously executed it we'll just start watching
// the whole crate.
let (gen_local, overridden) = build_script_local_fingerprints(cx, unit);
let deps = &cx.build_explicit_deps[unit];
let local = (gen_local)(
deps,
Some(&|| {
pkg_fingerprint(cx.bcx, &unit.pkg).chain_err(|| {
format!(
"failed to determine package fingerprint for build script for {}",
unit.pkg
)
})
}),
)?
.unwrap();
let output = deps.build_script_output.clone();
// Include any dependencies of our execution, which is typically just the
// compilation of the build script itself. (if the build script changes we
// should be rerun!). Note though that if we're an overridden build script
// we have no dependencies so no need to recurse in that case.
let deps = if overridden {
// Overridden build scripts don't need to track deps.
vec![]
} else {
// Create Vec since mutable cx is needed in closure.
let deps = Vec::from(cx.unit_deps(unit));
deps.into_iter()
.map(|dep| DepFingerprint::new(cx, unit, &dep))
.collect::<CargoResult<Vec<_>>>()?
};
Ok(Fingerprint {
local: Mutex::new(local),
rustc: util::hash_u64(&cx.bcx.rustc().verbose_version),
deps,
outputs: if overridden { Vec::new() } else { vec![output] },
// Most of the other info is blank here as we don't really include it
// in the execution of the build script, but... this may be a latent
// bug in Cargo.
..Fingerprint::new()
})
}
/// Get ready to compute the `LocalFingerprint` values for a `RunCustomBuild`
/// unit.
///
/// This function has, what's on the surface, a seriously wonky interface.
/// You'll call this function and it'll return a closure and a boolean. The
/// boolean is pretty simple in that it indicates whether the `unit` has been
/// overridden via `.cargo/config`. The closure is much more complicated.
///
/// This closure is intended to capture any local state necessary to compute
/// the `LocalFingerprint` values for this unit. It is `Send` and `'static` to
/// be sent to other threads as well (such as when we're executing build
/// scripts). That deduplication is the rationale for the closure at least.
///
/// The arguments to the closure are a bit weirder, though, and I'll apologize
/// in advance for the weirdness too. The first argument to the closure is a
/// `&BuildDeps`. This is the parsed version of a build script, and when Cargo
/// starts up this is cached from previous runs of a build script. After a
/// build script executes the output file is reparsed and passed in here.
///
/// The second argument is the weirdest, it's *optionally* a closure to
/// call `pkg_fingerprint` below. The `pkg_fingerprint` below requires access
/// to "source map" located in `Context`. That's very non-`'static` and
/// non-`Send`, so it can't be used on other threads, such as when we invoke
/// this after a build script has finished. The `Option` allows us to for sure
/// calculate it on the main thread at the beginning, and then swallow the bug
/// for now where a worker thread after a build script has finished doesn't
/// have access. Ideally there would be no second argument or it would be more
/// "first class" and not an `Option` but something that can be sent between
/// threads. In any case, it's a bug for now.
///
/// This isn't the greatest of interfaces, and if there's suggestions to
/// improve please do so!
///
/// FIXME(#6779) - see all the words above
fn build_script_local_fingerprints(
cx: &mut Context<'_, '_>,
unit: &Unit,
) -> (
Box<
dyn FnOnce(
&BuildDeps,
Option<&dyn Fn() -> CargoResult<String>>,
) -> CargoResult<Option<Vec<LocalFingerprint>>>
+ Send,
>,
bool,
) {
assert!(unit.mode.is_run_custom_build());
// First up, if this build script is entirely overridden, then we just
// return the hash of what we overrode it with. This is the easy case!
if let Some(fingerprint) = build_script_override_fingerprint(cx, unit) {
debug!("override local fingerprints deps {}", unit.pkg);
return (
Box::new(
move |_: &BuildDeps, _: Option<&dyn Fn() -> CargoResult<String>>| {
Ok(Some(vec![fingerprint]))
},
),
true, // this is an overridden build script
);
}
// ... Otherwise this is a "real" build script and we need to return a real
// closure. Our returned closure classifies the build script based on
// whether it prints `rerun-if-*`. If it *doesn't* print this it's where the
// magical second argument comes into play, which fingerprints a whole
// package. Remember that the fact that this is an `Option` is a bug, but a
// longstanding bug, in Cargo. Recent refactorings just made it painfully
// obvious.
let pkg_root = unit.pkg.root().to_path_buf();
let target_dir = target_root(cx);
let calculate =
move |deps: &BuildDeps, pkg_fingerprint: Option<&dyn Fn() -> CargoResult<String>>| {
if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() {
match pkg_fingerprint {
// FIXME: this is somewhat buggy with respect to docker and
// weird filesystems. The `Precalculated` variant
// constructed below will, for `path` dependencies, contain
// a stringified version of the mtime for the local crate.
// This violates one of the things we describe in this
// module's doc comment, never hashing mtimes. We should
// figure out a better scheme where a package fingerprint
// may be a string (like for a registry) or a list of files
// (like for a path dependency). Those list of files would
// be stored here rather than the the mtime of them.
Some(f) => {
let s = f()?;
debug!(
"old local fingerprints deps {:?} precalculated={:?}",
pkg_root, s
);
return Ok(Some(vec![LocalFingerprint::Precalculated(s)]));
}
None => return Ok(None),
}
}
// Ok so now we're in "new mode" where we can have files listed as
// dependencies as well as env vars listed as dependencies. Process
// them all here.
Ok(Some(local_fingerprints_deps(deps, &target_dir, &pkg_root)))
};
// Note that `false` == "not overridden"
(Box::new(calculate), false)
}
/// Create a `LocalFingerprint` for an overridden build script.
/// Returns None if it is not overridden.
fn build_script_override_fingerprint(
cx: &mut Context<'_, '_>,
unit: &Unit,
) -> Option<LocalFingerprint> {
// Build script output is only populated at this stage when it is
// overridden.
let build_script_outputs = cx.build_script_outputs.lock().unwrap();
let metadata = cx.get_run_build_script_metadata(unit);
// Returns None if it is not overridden.
let output = build_script_outputs.get(unit.pkg.package_id(), metadata)?;
let s = format!(
"overridden build state with hash: {}",
util::hash_u64(output)
);
Some(LocalFingerprint::Precalculated(s))
}
/// Compute the `LocalFingerprint` values for a `RunCustomBuild` unit for
/// non-overridden new-style build scripts only. This is only used when `deps`
/// is already known to have a nonempty `rerun-if-*` somewhere.
fn local_fingerprints_deps(
deps: &BuildDeps,
target_root: &Path,
pkg_root: &Path,
) -> Vec<LocalFingerprint> {
debug!("new local fingerprints deps {:?}", pkg_root);
let mut local = Vec::new();
if !deps.rerun_if_changed.is_empty() {
// Note that like the module comment above says we are careful to never
// store an absolute path in `LocalFingerprint`, so ensure that we strip
// absolute prefixes from them.
let output = deps
.build_script_output
.strip_prefix(target_root)
.unwrap()
.to_path_buf();
let paths = deps
.rerun_if_changed
.iter()
.map(|p| p.strip_prefix(pkg_root).unwrap_or(p).to_path_buf())
.collect();
local.push(LocalFingerprint::RerunIfChanged { output, paths });
}
for var in deps.rerun_if_env_changed.iter() {
let val = env::var(var).ok();
local.push(LocalFingerprint::RerunIfEnvChanged {
var: var.clone(),
val,
});
}
local
}
fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> {
debug_assert_ne!(fingerprint.rustc, 0);
// fingerprint::new().rustc == 0, make sure it doesn't make it to the file system.
// This is mostly so outside tools can reliably find out what rust version this file is for,
// as we can use the full hash.
let hash = fingerprint.hash();
debug!("write fingerprint ({:x}) : {}", hash, loc.display());
paths::write(loc, util::to_hex(hash).as_bytes())?;
let json = serde_json::to_string(fingerprint).unwrap();
if cfg!(debug_assertions) {
let f: Fingerprint = serde_json::from_str(&json).unwrap();
assert_eq!(f.hash(), hash);
}
paths::write(&loc.with_extension("json"), json.as_bytes())?;
Ok(())
}
/// Prepare for work when a package starts to build
pub fn prepare_init(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<()> {
let new1 = cx.files().fingerprint_dir(unit);
// Doc tests have no output, thus no fingerprint.
if !new1.exists() && !unit.mode.is_doc_test() {
paths::create_dir_all(&new1)?;
}
Ok(())
}
/// Returns the location that the dep-info file will show up at for the `unit`
/// specified.
pub fn dep_info_loc(cx: &mut Context<'_, '_>, unit: &Unit) -> PathBuf {
cx.files().fingerprint_file_path(unit, "dep-")
}
/// Returns an absolute path that target directory.
/// All paths are rewritten to be relative to this.
fn target_root(cx: &Context<'_, '_>) -> PathBuf {
cx.bcx.ws.target_dir().into_path_unlocked()
}
fn compare_old_fingerprint(
loc: &Path,
new_fingerprint: &Fingerprint,
mtime_on_use: bool,
) -> CargoResult<()> {
let old_fingerprint_short = paths::read(loc)?;
if mtime_on_use {
// update the mtime so other cleaners know we used it
let t = FileTime::from_system_time(SystemTime::now());
debug!("mtime-on-use forcing {:?} to {}", loc, t);
paths::set_file_time_no_err(loc, t);
}
let new_hash = new_fingerprint.hash();
if util::to_hex(new_hash) == old_fingerprint_short && new_fingerprint.fs_status.up_to_date() {
return Ok(());
}
let old_fingerprint_json = paths::read(&loc.with_extension("json"))?;
let old_fingerprint: Fingerprint = serde_json::from_str(&old_fingerprint_json)
.chain_err(|| internal("failed to deserialize json"))?;
// Fingerprint can be empty after a failed rebuild (see comment in prepare_target).
if !old_fingerprint_short.is_empty() {
debug_assert_eq!(util::to_hex(old_fingerprint.hash()), old_fingerprint_short);
}
let result = new_fingerprint.compare(&old_fingerprint);
assert!(result.is_err());
result
}
fn log_compare(unit: &Unit, compare: &CargoResult<()>) {
let ce = match compare {
Ok(..) => return,
Err(e) => e,
};
info!(
"fingerprint error for {}/{:?}/{:?}",
unit.pkg, unit.mode, unit.target,
);
info!(" err: {:?}", ce);
}
/// Parses Cargo's internal `EncodedDepInfo` structure that was previously
/// serialized to disk.
///
/// Note that this is not rustc's `*.d` files.
///
/// Also note that rustc's `*.d` files are translated to Cargo-specific
/// `EncodedDepInfo` files after compilations have finished in
/// `translate_dep_info`.
///
/// Returns `None` if the file is corrupt or couldn't be read from disk. This
/// indicates that the crate should likely be rebuilt.
pub fn parse_dep_info(
pkg_root: &Path,
target_root: &Path,
dep_info: &Path,
) -> CargoResult<Option<RustcDepInfo>> {
let data = match paths::read_bytes(dep_info) {
Ok(data) => data,
Err(_) => return Ok(None),
};
let info = match EncodedDepInfo::parse(&data) {
Some(info) => info,
None => {
log::warn!("failed to parse cargo's dep-info at {:?}", dep_info);
return Ok(None);
}
};
let mut ret = RustcDepInfo::default();
ret.env = info.env;
for (ty, path) in info.files {
let path = match ty {
DepInfoPathType::PackageRootRelative => pkg_root.join(path),
// N.B. path might be absolute here in which case the join will have no effect
DepInfoPathType::TargetRootRelative => target_root.join(path),
};
ret.files.push(path);
}
Ok(Some(ret))
}
fn pkg_fingerprint(bcx: &BuildContext<'_, '_>, pkg: &Package) -> CargoResult<String> {
let source_id = pkg.package_id().source_id();
let sources = bcx.packages.sources();
let source = sources
.get(source_id)
.ok_or_else(|| internal("missing package source"))?;
source.fingerprint(pkg)
}
fn find_stale_file<I>(
mtime_cache: &mut HashMap<PathBuf, FileTime>,
reference: &Path,
paths: I,
) -> Option<StaleItem>
where
I: IntoIterator,
I::Item: AsRef<Path>,
{
let reference_mtime = match paths::mtime(reference) {
Ok(mtime) => mtime,
Err(..) => return Some(StaleItem::MissingFile(reference.to_path_buf())),
};
for path in paths {
let path = path.as_ref();
let path_mtime = match mtime_cache.entry(path.to_path_buf()) {
Entry::Occupied(o) => *o.get(),
Entry::Vacant(v) => {
let mtime = match paths::mtime(path) {
Ok(mtime) => mtime,
Err(..) => return Some(StaleItem::MissingFile(path.to_path_buf())),
};
*v.insert(mtime)
}
};
// TODO: fix #5918.
// Note that equal mtimes should be considered "stale". For filesystems with
// not much timestamp precision like 1s this is would be a conservative approximation
// to handle the case where a file is modified within the same second after
// a build starts. We want to make sure that incremental rebuilds pick that up!
//
// For filesystems with nanosecond precision it's been seen in the wild that
// its "nanosecond precision" isn't really nanosecond-accurate. It turns out that
// kernels may cache the current time so files created at different times actually
// list the same nanosecond precision. Some digging on #5919 picked up that the
// kernel caches the current time between timer ticks, which could mean that if
// a file is updated at most 10ms after a build starts then Cargo may not
// pick up the build changes.
//
// All in all, an equality check here would be a conservative assumption that,
// if equal, files were changed just after a previous build finished.
// Unfortunately this became problematic when (in #6484) cargo switch to more accurately
// measuring the start time of builds.
if path_mtime <= reference_mtime {
continue;
}
return Some(StaleItem::ChangedFile {
reference: reference.to_path_buf(),
reference_mtime,
stale: path.to_path_buf(),
stale_mtime: path_mtime,
});
}
debug!(
"all paths up-to-date relative to {:?} mtime={}",
reference, reference_mtime
);
None
}
enum DepInfoPathType {
// src/, e.g. src/lib.rs
PackageRootRelative,
// target/debug/deps/lib...
// or an absolute path /.../sysroot/...
TargetRootRelative,
}
/// Parses the dep-info file coming out of rustc into a Cargo-specific format.
///
/// This function will parse `rustc_dep_info` as a makefile-style dep info to
/// learn about the all files which a crate depends on. This is then
/// re-serialized into the `cargo_dep_info` path in a Cargo-specific format.
///
/// The `pkg_root` argument here is the absolute path to the directory
/// containing `Cargo.toml` for this crate that was compiled. The paths listed
/// in the rustc dep-info file may or may not be absolute but we'll want to
/// consider all of them relative to the `root` specified.
///
/// The `rustc_cwd` argument is the absolute path to the cwd of the compiler
/// when it was invoked.
///
/// If the `allow_package` argument is true, then package-relative paths are
/// included. If it is false, then package-relative paths are skipped and
/// ignored (typically used for registry or git dependencies where we assume
/// the source never changes, and we don't want the cost of running `stat` on
/// all those files). See the module-level docs for the note about
/// `-Zbinary-dep-depinfo` for more details on why this is done.
///
/// The serialized Cargo format will contain a list of files, all of which are
/// relative if they're under `root`. or absolute if they're elsewhere.
pub fn translate_dep_info(
rustc_dep_info: &Path,
cargo_dep_info: &Path,
rustc_cwd: &Path,
pkg_root: &Path,
target_root: &Path,
rustc_cmd: &ProcessBuilder,
allow_package: bool,
) -> CargoResult<()> {
let depinfo = parse_rustc_dep_info(rustc_dep_info)?;
let target_root = target_root.canonicalize()?;
let pkg_root = pkg_root.canonicalize()?;
let mut on_disk_info = EncodedDepInfo::default();
on_disk_info.env = depinfo.env;
// This is a bit of a tricky statement, but here we're *removing* the
// dependency on environment variables that were defined specifically for
// the command itself. Environment variables returend by `get_envs` includes
// environment variables like:
//
// * `OUT_DIR` if applicable
// * env vars added by a build script, if any
//
// The general idea here is that the dep info file tells us what, when
// changed, should cause us to rebuild the crate. These environment
// variables are synthesized by Cargo and/or the build script, and the
// intention is that their values are tracked elsewhere for whether the
// crate needs to be rebuilt.
//
// For example a build script says when it needs to be rerun and otherwise
// it's assumed to produce the same output, so we're guaranteed that env
// vars defined by the build script will always be the same unless the build
// script itself reruns, in which case the crate will rerun anyway.
//
// For things like `OUT_DIR` it's a bit sketchy for now. Most of the time
// that's used for code generation but this is technically buggy where if
// you write a binary that does `println!("{}", env!("OUT_DIR"))` we won't
// recompile that if you move the target directory. Hopefully that's not too
// bad of an issue for now...
on_disk_info
.env
.retain(|(key, _)| !rustc_cmd.get_envs().contains_key(key));
for file in depinfo.files {
// The path may be absolute or relative, canonical or not. Make sure
// it is canonicalized so we are comparing the same kinds of paths.
let abs_file = rustc_cwd.join(file);
// If canonicalization fails, just use the abs path. There is currently
// a bug where --remap-path-prefix is affecting .d files, causing them
// to point to non-existent paths.
let canon_file = abs_file.canonicalize().unwrap_or_else(|_| abs_file.clone());
let (ty, path) = if let Ok(stripped) = canon_file.strip_prefix(&target_root) {
(DepInfoPathType::TargetRootRelative, stripped)
} else if let Ok(stripped) = canon_file.strip_prefix(&pkg_root) {
if !allow_package {
continue;
}
(DepInfoPathType::PackageRootRelative, stripped)
} else {
// It's definitely not target root relative, but this is an absolute path (since it was
// joined to rustc_cwd) and as such re-joining it later to the target root will have no
// effect.
(DepInfoPathType::TargetRootRelative, &*abs_file)
};
on_disk_info.files.push((ty, path.to_owned()));
}
paths::write(cargo_dep_info, on_disk_info.serialize()?)?;
Ok(())
}
#[derive(Default)]
pub struct RustcDepInfo {
/// The list of files that the main target in the dep-info file depends on.
pub files: Vec<PathBuf>,
/// The list of environment variables we found that the rustc compilation
/// depends on.
///
/// The first element of the pair is the name of the env var and the second
/// item is the value. `Some` means that the env var was set, and `None`
/// means that the env var wasn't actually set and the compilation depends
/// on it not being set.
pub env: Vec<(String, Option<String>)>,
}
// Same as `RustcDepInfo` except avoids absolute paths as much as possible to
// allow moving around the target directory.
//
// This is also stored in an optimized format to make parsing it fast because
// Cargo will read it for crates on all future compilations.
#[derive(Default)]
struct EncodedDepInfo {
files: Vec<(DepInfoPathType, PathBuf)>,
env: Vec<(String, Option<String>)>,
}
impl EncodedDepInfo {
fn parse(mut bytes: &[u8]) -> Option<EncodedDepInfo> {
let bytes = &mut bytes;
let nfiles = read_usize(bytes)?;
let mut files = Vec::with_capacity(nfiles as usize);
for _ in 0..nfiles {
let ty = match read_u8(bytes)? {
0 => DepInfoPathType::PackageRootRelative,
1 => DepInfoPathType::TargetRootRelative,
_ => return None,
};
let bytes = read_bytes(bytes)?;
files.push((ty, util::bytes2path(bytes).ok()?));
}
let nenv = read_usize(bytes)?;
let mut env = Vec::with_capacity(nenv as usize);
for _ in 0..nenv {
let key = str::from_utf8(read_bytes(bytes)?).ok()?.to_string();
let val = match read_u8(bytes)? {
0 => None,
1 => Some(str::from_utf8(read_bytes(bytes)?).ok()?.to_string()),
_ => return None,
};
env.push((key, val));
}
return Some(EncodedDepInfo { files, env });
fn read_usize(bytes: &mut &[u8]) -> Option<usize> {
let ret = bytes.get(..4)?;
*bytes = &bytes[4..];
Some(u32::from_le_bytes(ret.try_into().unwrap()) as usize)
}
fn read_u8(bytes: &mut &[u8]) -> Option<u8> {
let ret = *bytes.get(0)?;
*bytes = &bytes[1..];
Some(ret)
}
fn read_bytes<'a>(bytes: &mut &'a [u8]) -> Option<&'a [u8]> {
let n = read_usize(bytes)? as usize;
let ret = bytes.get(..n)?;
*bytes = &bytes[n..];
Some(ret)
}
}
fn serialize(&self) -> CargoResult<Vec<u8>> {
let mut ret = Vec::new();
let dst = &mut ret;
write_usize(dst, self.files.len());
for (ty, file) in self.files.iter() {
match ty {
DepInfoPathType::PackageRootRelative => dst.push(0),
DepInfoPathType::TargetRootRelative => dst.push(1),
}
write_bytes(dst, util::path2bytes(file)?);
}
write_usize(dst, self.env.len());
for (key, val) in self.env.iter() {
write_bytes(dst, key);
match val {
None => dst.push(0),
Some(val) => {
dst.push(1);
write_bytes(dst, val);
}
}
}
return Ok(ret);
fn write_bytes(dst: &mut Vec<u8>, val: impl AsRef<[u8]>) {
let val = val.as_ref();
write_usize(dst, val.len());
dst.extend_from_slice(val);
}
fn write_usize(dst: &mut Vec<u8>, val: usize) {
dst.extend(&u32::to_le_bytes(val as u32));
}
}
}
/// Parse the `.d` dep-info file generated by rustc.
pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult<RustcDepInfo> {
let contents = paths::read(rustc_dep_info)?;
let mut ret = RustcDepInfo::default();
let mut found_deps = false;
for line in contents.lines() {
if let Some(rest) = line.strip_prefix("# env-dep:") {
let mut parts = rest.splitn(2, '=');
let env_var = match parts.next() {
Some(s) => s,
None => continue,
};
let env_val = match parts.next() {
Some(s) => Some(unescape_env(s)?),
None => None,
};
ret.env.push((unescape_env(env_var)?, env_val));
} else if let Some(pos) = line.find(": ") {
if found_deps {
continue;
}
found_deps = true;
let mut deps = line[pos + 2..].split_whitespace();
while let Some(s) = deps.next() {
let mut file = s.to_string();
while file.ends_with('\\') {
file.pop();
file.push(' ');
file.push_str(deps.next().ok_or_else(|| {
internal("malformed dep-info format, trailing \\".to_string())
})?);
}
ret.files.push(file.into());
}
}
}
return Ok(ret);
// rustc tries to fit env var names and values all on a single line, which
// means it needs to escape `\r` and `\n`. The escape syntax used is "\n"
// which means that `\` also needs to be escaped.
fn unescape_env(s: &str) -> CargoResult<String> {
let mut ret = String::with_capacity(s.len());
let mut chars = s.chars();
while let Some(c) = chars.next() {
if c != '\\' {
ret.push(c);
continue;
}
match chars.next() {
Some('\\') => ret.push('\\'),
Some('n') => ret.push('\n'),
Some('r') => ret.push('\r'),
Some(c) => bail!("unknown escape character `{}`", c),
None => bail!("unterminated escape character"),
}
}
Ok(ret)
}
}
| 41.919921 | 99 | 0.610926 |
8a7ef3f2808b77bc92b94f42fa5832d198d55e55 | 4,709 | use std::sync::{Arc, Mutex};
use simple_parallel;
use state::LSystem;
use super::{LProcessor, SimpleProcessor};
/// Parallel processor dividing a state into chunks to be individually iterated
/// within a pool of threads.
pub struct ChunksProcessor {
/// The number of symbols per full chunk.
chunk_size: usize,
/// The thread pool.
pool: simple_parallel::Pool,
}
impl ChunksProcessor {
/// Try and create a new 'ChunksProcessor' instance with the given parameters.
/// Typical values:
/// - max_tasks : number of CPU logical cores
/// - chunks_size : between 100_000 and 1_000_000 symbols per chunk
pub fn new(max_tasks: usize, chunks_size: usize) -> Result<ChunksProcessor, String> {
if max_tasks == 0 {
Err(format!("ChunksProcessor::new : invalid maximum tasks number ({})",
max_tasks))
} else if chunks_size == 0 {
Err(format!("ChunksProcessor::new : invalid chunks size ({})",
chunks_size))
} else {
Ok(ChunksProcessor {
chunk_size: chunks_size,
pool: simple_parallel::Pool::new(max_tasks),
})
}
}
}
impl<S> LProcessor<S> for ChunksProcessor
where S: Clone + Eq + Send + Sync
{
// TODO : better error handling...
fn iterate<'a>(&mut self, lsystem: &LSystem<'a, S>) -> Result<LSystem<'a, S>, String> {
// Set-up
let mut vec: Vec<Vec<S>> = Vec::new();
let state_len = lsystem.state().len();
if state_len == 0 {
return Err(format!("cannot iterate an empty state"));
}
let rem = state_len % self.chunk_size;
let chunks_number = state_len / self.chunk_size +
match rem {
0 => 0,
_ => 1,
};
for _ in 0..chunks_number {
vec.push(Vec::new());
}
let sub_states = Arc::new(Mutex::new(vec));
// Chunks processing
let rules = lsystem.rules().clone();
let errors = Mutex::new(String::new());
let chunks_iter = lsystem.state().chunks(self.chunk_size);
self.pool
.for_(chunks_iter.enumerate(), |(n, chunk)| {
let result: Vec<S> = match SimpleProcessor::iterate_slice(chunk, &rules) {
Ok(v) => v,
Err(why) => {
let mut error_lock = errors.lock().unwrap();
*error_lock = format!("{}\n{}", *error_lock, why);
Vec::new()
}
};
let mut chunk_data = sub_states.lock().unwrap();
chunk_data[n] = result;
});
// Error handling
let error_lock = errors.lock().unwrap();
if !error_lock.is_empty() {
return Err(format!("ChunksProcessor : iteration error(s):\n{}", *error_lock));
}
// Final assembling
let mut new_state_size = 0usize;
let mut new_state: Vec<S> = Vec::new();
let data = sub_states.lock().unwrap();
for n in 0..chunks_number {
let chunk_iterated = &data[n];
new_state_size = match new_state_size.checked_add(chunk_iterated.len()) {
Some(v) => v,
None => {
return Err(format!("ChunksProcessor::iterate : usize overflow, state too big \
for for Vec"))
}
};
new_state.extend(chunk_iterated.iter().cloned());
}
Ok(LSystem::<S>::new(new_state, rules, Some(lsystem.iteration() + 1)))
}
}
#[cfg(test)]
mod test {
use rules::HashMapRules;
use state::{LSystem, new_rules_value};
use interpret::TurtleCommand;
use process::{LProcessor, ChunksProcessor};
#[test]
fn chunks_processing() {
let mut rules = HashMapRules::new(); // algae rules
rules.set_str('A', "AB", TurtleCommand::None);
rules.set_str('B', "A", TurtleCommand::None);
let expected_sizes = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597,
2584, 4181, 6765, 10946, 17711, 28657, 46368];
let mut lsystem = LSystem::new_with_char("A", new_rules_value(rules));
let mut processor = ChunksProcessor::new(4, 10_000).ok().unwrap();
for n in 0..expected_sizes.len() {
assert_eq!(lsystem.iteration(), n as u64);
assert_eq!(lsystem.state().len(), expected_sizes[n]);
lsystem = processor.iterate(&lsystem).ok().unwrap();
}
}
}
| 37.373016 | 98 | 0.531323 |
61092fc9b7343cf350b1854ffda46167c9f4d375 | 2,709 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use std::collections::HashMap;
use common_arrow::parquet::statistics::Statistics;
use common_datavalues::DataSchema;
use common_datavalues::DataValue;
use uuid::Uuid;
pub type SnapshotId = Uuid;
pub type ColumnId = u32;
pub type Location = String;
#[derive(serde::Serialize, serde::Deserialize, Debug)]
pub struct TableSnapshot {
pub snapshot_id: SnapshotId,
pub prev_snapshot_id: Option<SnapshotId>,
/// For each snapshot, we keep a schema for it (in case of schema evolution)
pub schema: DataSchema,
/// Summary Statistics
pub summary: Stats,
/// Pointers to SegmentInfos
///
/// We rely on background merge tasks to keep merging segments, so that
/// this the size of this vector could be kept reasonable
pub segments: Vec<Location>,
}
impl TableSnapshot {
pub fn new() -> Self {
todo!()
}
}
/// A segment comprised of one or more blocks
#[derive(serde::Serialize, serde::Deserialize, Debug)]
pub struct SegmentInfo {
pub blocks: Vec<BlockMeta>,
pub summary: Stats,
}
#[derive(serde::Serialize, serde::Deserialize, Debug)]
pub struct Stats {
pub row_count: u64,
pub block_count: u64,
pub uncompressed_byte_size: u64,
pub compressed_byte_size: u64,
pub col_stats: HashMap<ColumnId, ColStats>,
}
/// Meta information of a block (currently, the parquet file)
#[derive(serde::Serialize, serde::Deserialize, Debug)]
pub struct BlockMeta {
/// Pointer of the data Block
pub row_count: u64,
pub block_size: u64,
pub col_stats: HashMap<ColumnId, ColStats>,
pub location: BlockLocation,
}
#[derive(serde::Serialize, serde::Deserialize, Debug)]
pub struct BlockLocation {
pub location: Location,
// for parquet, this filed can be used to fetch the meta data without seeking around
pub meta_size: u64,
}
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
pub struct ColStats {
pub min: DataValue,
pub max: DataValue,
pub null_count: usize,
pub row_count: usize,
}
#[allow(dead_code)]
pub type RawBlockStats = HashMap<u32, std::sync::Arc<dyn Statistics>>;
| 29.769231 | 88 | 0.709856 |
e95a4626c4c04551fec06ebe5561c49fadfeb592 | 1,090 | pub mod rome;
use std::io;
fn read_eval(input: String, env: &mut rome::Model) -> Result<rome::Oexp, rome::RomeError> {
let (parsed_exp, _) = rome::parse(&rome::tokenise(input))?;
let evaluated_exp = rome::eval(&parsed_exp, env)?;
Ok(evaluated_exp)
}
fn slurp_input() -> String {
let mut input = String::new();
io::stdin().read_line(&mut input)
.expect("Failed to read line");
input
}
fn main() {
let env = &mut rome::new_core_model();
println!("-- Ask in Roman, when O. prompts --");
loop {
println!("O. ");
let input = slurp_input();
match read_eval(input, env) {
Ok(res) => println!(" : {}", res),
Err(e) => match e {
rome::RomeError::ReaderError(msg) => println!(" ~ {}", msg),
rome::RomeError::OperatorError(msg) => println!(" ~ {}", msg),
rome::RomeError::ModelingError(msg) => println!(" ~ {}", msg),
rome::RomeError::EffectorError(msg) => println!(" ~ {}", msg),
}
}
}
}
| 27.25 | 91 | 0.511927 |
08472400cf1d3498aa53709efd8501c40f9107e1 | 31,596 | // This file is part of Substrate.
// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A slashing implementation for NPoS systems.
//!
//! For the purposes of the economic model, it is easiest to think of each validator as a nominator
//! which nominates only its own identity.
//!
//! The act of nomination signals intent to unify economic identity with the validator - to take
//! part in the rewards of a job well done, and to take part in the punishment of a job done badly.
//!
//! There are 3 main difficulties to account for with slashing in NPoS:
//! - A nominator can nominate multiple validators and be slashed via any of them.
//! - Until slashed, stake is reused from era to era. Nominating with N coins for E eras in a row
//! does not mean you have N*E coins to be slashed - you've only ever had N.
//! - Slashable offences can be found after the fact and out of order.
//!
//! The algorithm implemented in this module tries to balance these 3 difficulties.
//!
//! First, we only slash participants for the _maximum_ slash they receive in some time period,
//! rather than the sum. This ensures a protection from overslashing.
//!
//! Second, we do not want the time period (or "span") that the maximum is computed
//! over to last indefinitely. That would allow participants to begin acting with
//! impunity after some point, fearing no further repercussions. For that reason, we
//! automatically "chill" validators and withdraw a nominator's nomination after a slashing event,
//! requiring them to re-enlist voluntarily (acknowledging the slash) and begin a new
//! slashing span.
//!
//! Typically, you will have a single slashing event per slashing span. Only in the case
//! where a validator releases many misbehaviors at once, or goes "back in time" to misbehave in
//! eras that have already passed, would you encounter situations where a slashing span
//! has multiple misbehaviors. However, accounting for such cases is necessary
//! to deter a class of "rage-quit" attacks.
//!
//! Based on research at https://research.web3.foundation/en/latest/polkadot/slashing/npos/
use super::{
BalanceOf, EraIndex, Error, Exposure, Module, NegativeImbalanceOf, Perbill, SessionInterface,
Store, Trait, UnappliedSlash,
};
use codec::{Decode, Encode};
use frame_support::{
ensure,
traits::{Currency, Imbalance, OnUnbalanced},
StorageDoubleMap, StorageMap,
};
use sp_runtime::{
traits::{Saturating, Zero},
DispatchResult, RuntimeDebug,
};
use sp_std::vec::Vec;
/// The proportion of the slashing reward to be paid out on the first slashing detection.
/// This is f_1 in the paper.
const REWARD_F1: Perbill = Perbill::from_percent(50);
/// The index of a slashing span - unique to each stash.
pub type SpanIndex = u32;
// A range of start..end eras for a slashing span.
#[derive(Encode, Decode)]
#[cfg_attr(test, derive(Debug, PartialEq))]
pub(crate) struct SlashingSpan {
pub(crate) index: SpanIndex,
pub(crate) start: EraIndex,
pub(crate) length: Option<EraIndex>, // the ongoing slashing span has indeterminate length.
}
impl SlashingSpan {
fn contains_era(&self, era: EraIndex) -> bool {
self.start <= era && self.length.map_or(true, |l| self.start + l > era)
}
}
/// An encoding of all of a nominator's slashing spans.
#[derive(Encode, Decode, RuntimeDebug)]
pub struct SlashingSpans {
// the index of the current slashing span of the nominator. different for
// every stash, resets when the account hits free balance 0.
span_index: SpanIndex,
// the start era of the most recent (ongoing) slashing span.
last_start: EraIndex,
// the last era at which a non-zero slash occurred.
last_nonzero_slash: EraIndex,
// all prior slashing spans' start indices, in reverse order (most recent first)
// encoded as offsets relative to the slashing span after it.
prior: Vec<EraIndex>,
}
impl SlashingSpans {
// creates a new record of slashing spans for a stash, starting at the beginning
// of the bonding period, relative to now.
pub(crate) fn new(window_start: EraIndex) -> Self {
SlashingSpans {
span_index: 0,
last_start: window_start,
// initialize to zero, as this structure is lazily created until
// the first slash is applied. setting equal to `window_start` would
// put a time limit on nominations.
last_nonzero_slash: 0,
prior: Vec::new(),
}
}
// update the slashing spans to reflect the start of a new span at the era after `now`
// returns `true` if a new span was started, `false` otherwise. `false` indicates
// that internal state is unchanged.
pub(crate) fn end_span(&mut self, now: EraIndex) -> bool {
let next_start = now + 1;
if next_start <= self.last_start {
return false;
}
let last_length = next_start - self.last_start;
self.prior.insert(0, last_length);
self.last_start = next_start;
self.span_index += 1;
true
}
// an iterator over all slashing spans in _reverse_ order - most recent first.
pub(crate) fn iter(&'_ self) -> impl Iterator<Item = SlashingSpan> + '_ {
let mut last_start = self.last_start;
let mut index = self.span_index;
let last = SlashingSpan {
index,
start: last_start,
length: None,
};
let prior = self.prior.iter().cloned().map(move |length| {
let start = last_start - length;
last_start = start;
index -= 1;
SlashingSpan {
index,
start,
length: Some(length),
}
});
sp_std::iter::once(last).chain(prior)
}
/// Yields the era index where the most recent non-zero slash occurred.
pub fn last_nonzero_slash(&self) -> EraIndex {
self.last_nonzero_slash
}
// prune the slashing spans against a window, whose start era index is given.
//
// If this returns `Some`, then it includes a range start..end of all the span
// indices which were pruned.
fn prune(&mut self, window_start: EraIndex) -> Option<(SpanIndex, SpanIndex)> {
let old_idx = self
.iter()
.skip(1) // skip ongoing span.
.position(|span| {
span.length
.map_or(false, |len| span.start + len <= window_start)
});
let earliest_span_index = self.span_index - self.prior.len() as SpanIndex;
let pruned = match old_idx {
Some(o) => {
self.prior.truncate(o);
let new_earliest = self.span_index - self.prior.len() as SpanIndex;
Some((earliest_span_index, new_earliest))
}
None => None,
};
// readjust the ongoing span, if it started before the beginning of the window.
self.last_start = sp_std::cmp::max(self.last_start, window_start);
pruned
}
}
/// A slashing-span record for a particular stash.
#[derive(Encode, Decode, Default)]
pub(crate) struct SpanRecord<Balance> {
slashed: Balance,
paid_out: Balance,
}
impl<Balance> SpanRecord<Balance> {
/// The value of stash balance slashed in this span.
#[cfg(test)]
pub(crate) fn amount_slashed(&self) -> &Balance {
&self.slashed
}
}
/// Parameters for performing a slash.
#[derive(Clone)]
pub(crate) struct SlashParams<'a, T: 'a + Trait> {
/// The stash account being slashed.
pub(crate) stash: &'a T::AccountId,
/// The proportion of the slash.
pub(crate) slash: Perbill,
/// The exposure of the stash and all nominators.
pub(crate) exposure: &'a Exposure<T::AccountId, BalanceOf<T>>,
/// The era where the offence occurred.
pub(crate) slash_era: EraIndex,
/// The first era in the current bonding period.
pub(crate) window_start: EraIndex,
/// The current era.
pub(crate) now: EraIndex,
/// The maximum percentage of a slash that ever gets paid out.
/// This is f_inf in the paper.
pub(crate) reward_proportion: Perbill,
}
/// Computes a slash of a validator and nominators. It returns an unapplied
/// record to be applied at some later point. Slashing metadata is updated in storage,
/// since unapplied records are only rarely intended to be dropped.
///
/// The pending slash record returned does not have initialized reporters. Those have
/// to be set at a higher level, if any.
pub(crate) fn compute_slash<T: Trait>(
params: SlashParams<T>,
) -> Option<UnappliedSlash<T::AccountId, BalanceOf<T>>> {
let SlashParams {
stash,
slash,
exposure,
slash_era,
window_start,
now,
reward_proportion,
} = params.clone();
let mut reward_payout = Zero::zero();
let mut val_slashed = Zero::zero();
// is the slash amount here a maximum for the era?
let own_slash = slash * exposure.own;
if slash * exposure.total == Zero::zero() {
// kick out the validator even if they won't be slashed,
// as long as the misbehavior is from their most recent slashing span.
kick_out_if_recent::<T>(params);
return None;
}
let (prior_slash_p, _era_slash) =
<Module<T> as Store>::ValidatorSlashInEra::get(&slash_era, stash)
.unwrap_or((Perbill::zero(), Zero::zero()));
// compare slash proportions rather than slash values to avoid issues due to rounding
// error.
if slash.deconstruct() > prior_slash_p.deconstruct() {
<Module<T> as Store>::ValidatorSlashInEra::insert(&slash_era, stash, &(slash, own_slash));
} else {
// we slash based on the max in era - this new event is not the max,
// so neither the validator or any nominators will need an update.
//
// this does lead to a divergence of our system from the paper, which
// pays out some reward even if the latest report is not max-in-era.
// we opt to avoid the nominator lookups and edits and leave more rewards
// for more drastic misbehavior.
return None;
}
// apply slash to validator.
{
let mut spans = fetch_spans::<T>(
stash,
window_start,
&mut reward_payout,
&mut val_slashed,
reward_proportion,
);
let target_span = spans.compare_and_update_span_slash(slash_era, own_slash);
if target_span == Some(spans.span_index()) {
// misbehavior occurred within the current slashing span - take appropriate
// actions.
// chill the validator - it misbehaved in the current span and should
// not continue in the next election. also end the slashing span.
spans.end_span(now);
<Module<T>>::chill_stash(stash);
// make sure to disable validator till the end of this session
if T::SessionInterface::disable_validator(stash).unwrap_or(false) {
// force a new era, to select a new validator set
<Module<T>>::ensure_new_era()
}
}
}
let mut nominators_slashed = Vec::new();
reward_payout += slash_nominators::<T>(params, prior_slash_p, &mut nominators_slashed);
Some(UnappliedSlash {
validator: stash.clone(),
own: val_slashed,
others: nominators_slashed,
reporters: Vec::new(),
payout: reward_payout,
})
}
// doesn't apply any slash, but kicks out the validator if the misbehavior is from
// the most recent slashing span.
fn kick_out_if_recent<T: Trait>(params: SlashParams<T>) {
// these are not updated by era-span or end-span.
let mut reward_payout = Zero::zero();
let mut val_slashed = Zero::zero();
let mut spans = fetch_spans::<T>(
params.stash,
params.window_start,
&mut reward_payout,
&mut val_slashed,
params.reward_proportion,
);
if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) {
spans.end_span(params.now);
<Module<T>>::chill_stash(params.stash);
// make sure to disable validator till the end of this session
if T::SessionInterface::disable_validator(params.stash).unwrap_or(false) {
// force a new era, to select a new validator set
<Module<T>>::ensure_new_era()
}
}
}
/// Slash nominators. Accepts general parameters and the prior slash percentage of the validator.
///
/// Returns the amount of reward to pay out.
fn slash_nominators<T: Trait>(
params: SlashParams<T>,
prior_slash_p: Perbill,
nominators_slashed: &mut Vec<(T::AccountId, BalanceOf<T>)>,
) -> BalanceOf<T> {
let SlashParams {
stash: _,
slash,
exposure,
slash_era,
window_start,
now,
reward_proportion,
} = params;
let mut reward_payout = Zero::zero();
nominators_slashed.reserve(exposure.others.len());
for nominator in &exposure.others {
let stash = &nominator.who;
let mut nom_slashed = Zero::zero();
// the era slash of a nominator always grows, if the validator
// had a new max slash for the era.
let era_slash = {
let own_slash_prior = prior_slash_p * nominator.value;
let own_slash_by_validator = slash * nominator.value;
let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior);
let mut era_slash = <Module<T> as Store>::NominatorSlashInEra::get(&slash_era, stash)
.unwrap_or_else(|| Zero::zero());
era_slash += own_slash_difference;
<Module<T> as Store>::NominatorSlashInEra::insert(&slash_era, stash, &era_slash);
era_slash
};
// compare the era slash against other eras in the same span.
{
let mut spans = fetch_spans::<T>(
stash,
window_start,
&mut reward_payout,
&mut nom_slashed,
reward_proportion,
);
let target_span = spans.compare_and_update_span_slash(slash_era, era_slash);
if target_span == Some(spans.span_index()) {
// End the span, but don't chill the nominator. its nomination
// on this validator will be ignored in the future.
spans.end_span(now);
}
}
nominators_slashed.push((stash.clone(), nom_slashed));
}
reward_payout
}
// helper struct for managing a set of spans we are currently inspecting.
// writes alterations to disk on drop, but only if a slash has been carried out.
//
// NOTE: alterations to slashing metadata should not be done after this is dropped.
// dropping this struct applies any necessary slashes, which can lead to free balance
// being 0, and the account being garbage-collected -- a dead account should get no new
// metadata.
struct InspectingSpans<'a, T: Trait + 'a> {
dirty: bool,
window_start: EraIndex,
stash: &'a T::AccountId,
spans: SlashingSpans,
paid_out: &'a mut BalanceOf<T>,
slash_of: &'a mut BalanceOf<T>,
reward_proportion: Perbill,
_marker: sp_std::marker::PhantomData<T>,
}
// fetches the slashing spans record for a stash account, initializing it if necessary.
fn fetch_spans<'a, T: Trait + 'a>(
stash: &'a T::AccountId,
window_start: EraIndex,
paid_out: &'a mut BalanceOf<T>,
slash_of: &'a mut BalanceOf<T>,
reward_proportion: Perbill,
) -> InspectingSpans<'a, T> {
let spans = <Module<T> as Store>::SlashingSpans::get(stash).unwrap_or_else(|| {
let spans = SlashingSpans::new(window_start);
<Module<T> as Store>::SlashingSpans::insert(stash, &spans);
spans
});
InspectingSpans {
dirty: false,
window_start,
stash,
spans,
slash_of,
paid_out,
reward_proportion,
_marker: sp_std::marker::PhantomData,
}
}
impl<'a, T: 'a + Trait> InspectingSpans<'a, T> {
fn span_index(&self) -> SpanIndex {
self.spans.span_index
}
fn end_span(&mut self, now: EraIndex) {
self.dirty = self.spans.end_span(now) || self.dirty;
}
// add some value to the slash of the staker.
// invariant: the staker is being slashed for non-zero value here
// although `amount` may be zero, as it is only a difference.
fn add_slash(&mut self, amount: BalanceOf<T>, slash_era: EraIndex) {
*self.slash_of += amount;
self.spans.last_nonzero_slash = sp_std::cmp::max(self.spans.last_nonzero_slash, slash_era);
}
// find the span index of the given era, if covered.
fn era_span(&self, era: EraIndex) -> Option<SlashingSpan> {
self.spans.iter().find(|span| span.contains_era(era))
}
// compares the slash in an era to the overall current span slash.
// if it's higher, applies the difference of the slashes and then updates the span on disk.
//
// returns the span index of the era where the slash occurred, if any.
fn compare_and_update_span_slash(
&mut self,
slash_era: EraIndex,
slash: BalanceOf<T>,
) -> Option<SpanIndex> {
let target_span = self.era_span(slash_era)?;
let span_slash_key = (self.stash.clone(), target_span.index);
let mut span_record = <Module<T> as Store>::SpanSlash::get(&span_slash_key);
let mut changed = false;
let reward = if span_record.slashed < slash {
// new maximum span slash. apply the difference.
let difference = slash - span_record.slashed;
span_record.slashed = slash;
// compute reward.
let reward =
REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out);
self.add_slash(difference, slash_era);
changed = true;
reward
} else if span_record.slashed == slash {
// compute reward. no slash difference to apply.
REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out)
} else {
Zero::zero()
};
if !reward.is_zero() {
changed = true;
span_record.paid_out += reward;
*self.paid_out += reward;
}
if changed {
self.dirty = true;
<Module<T> as Store>::SpanSlash::insert(&span_slash_key, &span_record);
}
Some(target_span.index)
}
}
impl<'a, T: 'a + Trait> Drop for InspectingSpans<'a, T> {
fn drop(&mut self) {
// only update on disk if we slashed this account.
if !self.dirty {
return;
}
if let Some((start, end)) = self.spans.prune(self.window_start) {
for span_index in start..end {
<Module<T> as Store>::SpanSlash::remove(&(self.stash.clone(), span_index));
}
}
<Module<T> as Store>::SlashingSpans::insert(self.stash, &self.spans);
}
}
/// Clear slashing metadata for an obsolete era.
pub(crate) fn clear_era_metadata<T: Trait>(obsolete_era: EraIndex) {
<Module<T> as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era);
<Module<T> as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era);
}
/// Clear slashing metadata for a dead account.
pub(crate) fn clear_stash_metadata<T: Trait>(
stash: &T::AccountId,
num_slashing_spans: u32,
) -> DispatchResult {
let spans = match <Module<T> as Store>::SlashingSpans::get(stash) {
None => return Ok(()),
Some(s) => s,
};
ensure!(
num_slashing_spans as usize >= spans.iter().count(),
Error::<T>::IncorrectSlashingSpans
);
<Module<T> as Store>::SlashingSpans::remove(stash);
// kill slashing-span metadata for account.
//
// this can only happen while the account is staked _if_ they are completely slashed.
// in that case, they may re-bond, but it would count again as span 0. Further ancient
// slashes would slash into this new bond, since metadata has now been cleared.
for span in spans.iter() {
<Module<T> as Store>::SpanSlash::remove(&(stash.clone(), span.index));
}
Ok(())
}
// apply the slash to a stash account, deducting any missing funds from the reward
// payout, saturating at 0. this is mildly unfair but also an edge-case that
// can only occur when overlapping locked funds have been slashed.
pub fn do_slash<T: Trait>(
stash: &T::AccountId,
value: BalanceOf<T>,
reward_payout: &mut BalanceOf<T>,
slashed_imbalance: &mut NegativeImbalanceOf<T>,
) {
let controller = match <Module<T>>::bonded(stash) {
None => return, // defensive: should always exist.
Some(c) => c,
};
let mut ledger = match <Module<T>>::ledger(&controller) {
Some(ledger) => ledger,
None => return, // nothing to do.
};
let value = ledger.slash(value, T::Currency::minimum_balance());
if !value.is_zero() {
let (imbalance, missing) = T::Currency::slash(stash, value);
slashed_imbalance.subsume(imbalance);
if !missing.is_zero() {
// deduct overslash from the reward payout
*reward_payout = reward_payout.saturating_sub(missing);
}
<Module<T>>::update_ledger(&controller, &ledger);
// trigger the event
<Module<T>>::deposit_event(super::RawEvent::Slash(stash.clone(), value));
}
}
/// Apply a previously-unapplied slash.
pub(crate) fn apply_slash<T: Trait>(unapplied_slash: UnappliedSlash<T::AccountId, BalanceOf<T>>) {
let mut slashed_imbalance = NegativeImbalanceOf::<T>::zero();
let mut reward_payout = unapplied_slash.payout;
do_slash::<T>(
&unapplied_slash.validator,
unapplied_slash.own,
&mut reward_payout,
&mut slashed_imbalance,
);
for &(ref nominator, nominator_slash) in &unapplied_slash.others {
do_slash::<T>(
&nominator,
nominator_slash,
&mut reward_payout,
&mut slashed_imbalance,
);
}
pay_reporters::<T>(reward_payout, slashed_imbalance, &unapplied_slash.reporters);
}
/// Apply a reward payout to some reporters, paying the rewards out of the slashed imbalance.
fn pay_reporters<T: Trait>(
reward_payout: BalanceOf<T>,
slashed_imbalance: NegativeImbalanceOf<T>,
reporters: &[T::AccountId],
) {
if reward_payout.is_zero() || reporters.is_empty() {
// nobody to pay out to or nothing to pay;
// just treat the whole value as slashed.
T::Slash::on_unbalanced(slashed_imbalance);
return;
}
// take rewards out of the slashed imbalance.
let reward_payout = reward_payout.min(slashed_imbalance.peek());
let (mut reward_payout, mut value_slashed) = slashed_imbalance.split(reward_payout);
let per_reporter = reward_payout.peek() / (reporters.len() as u32).into();
for reporter in reporters {
let (reporter_reward, rest) = reward_payout.split(per_reporter);
reward_payout = rest;
// this cancels out the reporter reward imbalance internally, leading
// to no change in total issuance.
T::Currency::resolve_creating(reporter, reporter_reward);
}
// the rest goes to the on-slash imbalance handler (e.g. treasury)
value_slashed.subsume(reward_payout); // remainder of reward division remains.
T::Slash::on_unbalanced(value_slashed);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn span_contains_era() {
// unbounded end
let span = SlashingSpan {
index: 0,
start: 1000,
length: None,
};
assert!(!span.contains_era(0));
assert!(!span.contains_era(999));
assert!(span.contains_era(1000));
assert!(span.contains_era(1001));
assert!(span.contains_era(10000));
// bounded end - non-inclusive range.
let span = SlashingSpan {
index: 0,
start: 1000,
length: Some(10),
};
assert!(!span.contains_era(0));
assert!(!span.contains_era(999));
assert!(span.contains_era(1000));
assert!(span.contains_era(1001));
assert!(span.contains_era(1009));
assert!(!span.contains_era(1010));
assert!(!span.contains_era(1011));
}
#[test]
fn single_slashing_span() {
let spans = SlashingSpans {
span_index: 0,
last_start: 1000,
last_nonzero_slash: 0,
prior: Vec::new(),
};
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![SlashingSpan {
index: 0,
start: 1000,
length: None
}],
);
}
#[test]
fn many_prior_spans() {
let spans = SlashingSpans {
span_index: 10,
last_start: 1000,
last_nonzero_slash: 0,
prior: vec![10, 9, 8, 10],
};
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![
SlashingSpan {
index: 10,
start: 1000,
length: None
},
SlashingSpan {
index: 9,
start: 990,
length: Some(10)
},
SlashingSpan {
index: 8,
start: 981,
length: Some(9)
},
SlashingSpan {
index: 7,
start: 973,
length: Some(8)
},
SlashingSpan {
index: 6,
start: 963,
length: Some(10)
},
],
)
}
#[test]
fn pruning_spans() {
let mut spans = SlashingSpans {
span_index: 10,
last_start: 1000,
last_nonzero_slash: 0,
prior: vec![10, 9, 8, 10],
};
assert_eq!(spans.prune(981), Some((6, 8)));
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![
SlashingSpan {
index: 10,
start: 1000,
length: None
},
SlashingSpan {
index: 9,
start: 990,
length: Some(10)
},
SlashingSpan {
index: 8,
start: 981,
length: Some(9)
},
],
);
assert_eq!(spans.prune(982), None);
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![
SlashingSpan {
index: 10,
start: 1000,
length: None
},
SlashingSpan {
index: 9,
start: 990,
length: Some(10)
},
SlashingSpan {
index: 8,
start: 981,
length: Some(9)
},
],
);
assert_eq!(spans.prune(989), None);
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![
SlashingSpan {
index: 10,
start: 1000,
length: None
},
SlashingSpan {
index: 9,
start: 990,
length: Some(10)
},
SlashingSpan {
index: 8,
start: 981,
length: Some(9)
},
],
);
assert_eq!(spans.prune(1000), Some((8, 10)));
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![SlashingSpan {
index: 10,
start: 1000,
length: None
},],
);
assert_eq!(spans.prune(2000), None);
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![SlashingSpan {
index: 10,
start: 2000,
length: None
},],
);
// now all in one shot.
let mut spans = SlashingSpans {
span_index: 10,
last_start: 1000,
last_nonzero_slash: 0,
prior: vec![10, 9, 8, 10],
};
assert_eq!(spans.prune(2000), Some((6, 10)));
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![SlashingSpan {
index: 10,
start: 2000,
length: None
},],
);
}
#[test]
fn ending_span() {
let mut spans = SlashingSpans {
span_index: 1,
last_start: 10,
last_nonzero_slash: 0,
prior: Vec::new(),
};
assert!(spans.end_span(10));
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![
SlashingSpan {
index: 2,
start: 11,
length: None
},
SlashingSpan {
index: 1,
start: 10,
length: Some(1)
},
],
);
assert!(spans.end_span(15));
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![
SlashingSpan {
index: 3,
start: 16,
length: None
},
SlashingSpan {
index: 2,
start: 11,
length: Some(5)
},
SlashingSpan {
index: 1,
start: 10,
length: Some(1)
},
],
);
// does nothing if not a valid end.
assert!(!spans.end_span(15));
assert_eq!(
spans.iter().collect::<Vec<_>>(),
vec![
SlashingSpan {
index: 3,
start: 16,
length: None
},
SlashingSpan {
index: 2,
start: 11,
length: Some(5)
},
SlashingSpan {
index: 1,
start: 10,
length: Some(1)
},
],
);
}
}
| 33.119497 | 99 | 0.572541 |
fbc8ec723c75812d7b22f7a8e86e08e1e8f1a505 | 1,537 | use cosmwasm_std::{Addr, Decimal, Uint128};
use cw_controllers::Claims;
use cw_storage_plus::{Item, Map};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct Config {
pub owner: Addr,
pub staking_token: Addr,
pub terraland_token: Addr,
pub unbonding_period: u64,
pub burn_address: Addr,
pub instant_claim_percentage_loss: u64,
pub distribution_schedule: Vec<Schedule>,
pub fee_config: Vec<FeeConfig>,
}
#[derive(Default, Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct Schedule {
pub amount: Uint128,
pub start_time: u64,
pub end_time: u64,
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct FeeConfig {
pub fee: Uint128,
pub operation: String,
pub denom: String,
}
#[derive(Default, Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct MemberInfo {
pub stake: Uint128,
pub pending_reward: Uint128,
pub reward_index: Decimal,
pub withdrawn: Uint128,
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct State {
pub total_stake: Uint128,
pub last_updated: u64,
pub global_reward_index: Decimal,
pub num_of_members: u64,
}
pub const CONFIG: Item<Config> = Item::new("config");
pub const MEMBERS: Map<&Addr, MemberInfo> = Map::new("members");
pub const STATE: Item<State> = Item::new("state");
pub const CLAIMS: Claims = Claims::new("claims");
| 29 | 79 | 0.716981 |
691459f1220faa4f6501d839b7d4a2e769509553 | 10,280 | use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufWriter;
use std::path::PathBuf;
use std::time::SystemTime;
use crossbeam_channel::Receiver;
use crate::common::Common;
use crate::common_dir_traversal::{Collect, DirTraversalBuilder, DirTraversalResult, ErrorType, FileEntry, ProgressData};
use crate::common_directory::Directories;
use crate::common_extensions::Extensions;
use crate::common_items::ExcludedItems;
use crate::common_messages::Messages;
use crate::common_traits::*;
#[derive(Eq, PartialEq, Clone, Debug)]
pub enum DeleteMethod {
None,
Delete,
}
/// Info struck with helpful information's about results
#[derive(Default)]
pub struct Info {
pub number_of_invalid_symlinks: usize,
}
impl Info {
pub fn new() -> Self {
Default::default()
}
}
/// Struct with required information's to work
pub struct InvalidSymlinks {
text_messages: Messages,
information: Info,
invalid_symlinks: Vec<FileEntry>,
directories: Directories,
allowed_extensions: Extensions,
excluded_items: ExcludedItems,
recursive_search: bool,
delete_method: DeleteMethod,
stopped_search: bool,
}
impl InvalidSymlinks {
pub fn new() -> Self {
Self {
text_messages: Messages::new(),
information: Info::new(),
recursive_search: true,
allowed_extensions: Extensions::new(),
directories: Directories::new(),
excluded_items: ExcludedItems::new(),
invalid_symlinks: vec![],
delete_method: DeleteMethod::None,
stopped_search: false,
}
}
pub fn find_invalid_links(&mut self, stop_receiver: Option<&Receiver<()>>, progress_sender: Option<&futures::channel::mpsc::UnboundedSender<ProgressData>>) {
self.directories.optimize_directories(self.recursive_search, &mut self.text_messages);
if !self.check_files(stop_receiver, progress_sender) {
self.stopped_search = true;
return;
}
self.delete_files();
self.debug_print();
}
pub fn get_stopped_search(&self) -> bool {
self.stopped_search
}
pub const fn get_invalid_symlinks(&self) -> &Vec<FileEntry> {
&self.invalid_symlinks
}
pub const fn get_text_messages(&self) -> &Messages {
&self.text_messages
}
pub const fn get_information(&self) -> &Info {
&self.information
}
pub fn set_delete_method(&mut self, delete_method: DeleteMethod) {
self.delete_method = delete_method;
}
pub fn set_recursive_search(&mut self, recursive_search: bool) {
self.recursive_search = recursive_search;
}
#[cfg(target_family = "unix")]
pub fn set_exclude_other_filesystems(&mut self, exclude_other_filesystems: bool) {
self.directories.set_exclude_other_filesystems(exclude_other_filesystems);
}
pub fn set_included_directory(&mut self, included_directory: Vec<PathBuf>) -> bool {
self.directories.set_included_directory(included_directory, &mut self.text_messages)
}
pub fn set_excluded_directory(&mut self, excluded_directory: Vec<PathBuf>) {
self.directories.set_excluded_directory(excluded_directory, &mut self.text_messages);
}
pub fn set_allowed_extensions(&mut self, allowed_extensions: String) {
self.allowed_extensions.set_allowed_extensions(allowed_extensions, &mut self.text_messages);
}
pub fn set_excluded_items(&mut self, excluded_items: Vec<String>) {
self.excluded_items.set_excluded_items(excluded_items, &mut self.text_messages);
}
/// Check files for any with size == 0
fn check_files(&mut self, stop_receiver: Option<&Receiver<()>>, progress_sender: Option<&futures::channel::mpsc::UnboundedSender<ProgressData>>) -> bool {
let result = DirTraversalBuilder::new()
.root_dirs(self.directories.included_directories.clone())
.group_by(|_fe| ())
.stop_receiver(stop_receiver)
.progress_sender(progress_sender)
.collect(Collect::InvalidSymlinks)
.directories(self.directories.clone())
.allowed_extensions(self.allowed_extensions.clone())
.excluded_items(self.excluded_items.clone())
.recursive_search(self.recursive_search)
.build()
.run();
match result {
DirTraversalResult::SuccessFiles {
start_time,
grouped_file_entries,
warnings,
} => {
if let Some(((), invalid_symlinks)) = grouped_file_entries.into_iter().next() {
self.invalid_symlinks = invalid_symlinks;
}
self.information.number_of_invalid_symlinks = self.invalid_symlinks.len();
self.text_messages.warnings.extend(warnings);
Common::print_time(start_time, SystemTime::now(), "check_files_name".to_string());
true
}
DirTraversalResult::SuccessFolders { .. } => unreachable!(),
DirTraversalResult::Stopped => false,
}
}
/// Function to delete files, from filed Vector
fn delete_files(&mut self) {
let start_time: SystemTime = SystemTime::now();
match self.delete_method {
DeleteMethod::Delete => {
for file_entry in &self.invalid_symlinks {
if fs::remove_file(file_entry.path.clone()).is_err() {
self.text_messages.warnings.push(file_entry.path.display().to_string());
}
}
}
DeleteMethod::None => {
//Just do nothing
}
}
Common::print_time(start_time, SystemTime::now(), "delete_files".to_string());
}
}
impl Default for InvalidSymlinks {
fn default() -> Self {
Self::new()
}
}
impl DebugPrint for InvalidSymlinks {
#[allow(dead_code)]
#[allow(unreachable_code)]
/// Debugging printing - only available on debug build
fn debug_print(&self) {
#[cfg(not(debug_assertions))]
{
return;
}
println!("---------------DEBUG PRINT---------------");
println!("### Information's");
println!("Errors size - {}", self.text_messages.errors.len());
println!("Warnings size - {}", self.text_messages.warnings.len());
println!("Messages size - {}", self.text_messages.messages.len());
println!("### Other");
println!("Invalid symlinks list size - {}", self.invalid_symlinks.len());
println!("Excluded items - {:?}", self.excluded_items.items);
println!("Included directories - {:?}", self.directories.included_directories);
println!("Excluded directories - {:?}", self.directories.excluded_directories);
println!("Recursive search - {}", self.recursive_search);
#[cfg(target_family = "unix")]
println!("Skip other filesystems - {}", self.directories.exclude_other_filesystems());
println!("Delete Method - {:?}", self.delete_method);
println!("-----------------------------------------");
}
}
impl SaveResults for InvalidSymlinks {
fn save_results_to_file(&mut self, file_name: &str) -> bool {
let start_time: SystemTime = SystemTime::now();
let file_name: String = match file_name {
"" => "results.txt".to_string(),
k => k.to_string(),
};
let file_handler = match File::create(&file_name) {
Ok(t) => t,
Err(e) => {
self.text_messages.errors.push(format!("Failed to create file {}, reason {}", file_name, e));
return false;
}
};
let mut writer = BufWriter::new(file_handler);
if let Err(e) = writeln!(
writer,
"Results of searching {:?} with excluded directories {:?} and excluded items {:?}",
self.directories.included_directories, self.directories.excluded_directories, self.excluded_items.items
) {
self.text_messages.errors.push(format!("Failed to save results to file {}, reason {}", file_name, e));
return false;
}
if !self.invalid_symlinks.is_empty() {
writeln!(writer, "Found {} invalid symlinks.", self.information.number_of_invalid_symlinks).unwrap();
for file_entry in self.invalid_symlinks.iter() {
writeln!(
writer,
"{}\t\t{}\t\t{}",
file_entry.path.display(),
file_entry.symlink_info.clone().expect("invalid traversal result").destination_path.display(),
match file_entry.symlink_info.clone().expect("invalid traversal result").type_of_error {
ErrorType::InfiniteRecursion => "Infinite Recursion",
ErrorType::NonExistentFile => "Non Existent File",
}
)
.unwrap();
}
} else {
write!(writer, "Not found any invalid symlinks.").unwrap();
}
Common::print_time(start_time, SystemTime::now(), "save_results_to_file".to_string());
true
}
}
impl PrintResults for InvalidSymlinks {
/// Print information's about duplicated entries
/// Only needed for CLI
fn print_results(&self) {
let start_time: SystemTime = SystemTime::now();
println!("Found {} invalid symlinks.\n", self.information.number_of_invalid_symlinks);
for file_entry in self.invalid_symlinks.iter() {
println!(
"{}\t\t{}\t\t{}",
file_entry.path.display(),
file_entry.symlink_info.clone().expect("invalid traversal result").destination_path.display(),
match file_entry.symlink_info.clone().expect("invalid traversal result").type_of_error {
ErrorType::InfiniteRecursion => "Infinite Recursion",
ErrorType::NonExistentFile => "Non Existent File",
}
);
}
Common::print_time(start_time, SystemTime::now(), "print_entries".to_string());
}
}
| 36.978417 | 161 | 0.606907 |
8a9d9f25c0c034fa9c65f17314f3e972e9b5aee0 | 22,857 | use std::env;
use std::fs::File;
use std::io::prelude::*;
use std::io::{self, BufReader};
use std::path::PathBuf;
const IN: &str = "neon.spec";
const ARM_OUT: &str = "generated.rs";
const AARCH64_OUT: &str = "generated.rs";
const UINT_TYPES: [&str; 6] = [
"uint8x8_t",
"uint8x16_t",
"uint16x4_t",
"uint16x8_t",
"uint32x2_t",
"uint32x4_t",
];
const UINT_TYPES_64: [&str; 2] = ["uint64x1_t", "uint64x2_t"];
const INT_TYPES: [&str; 6] = [
"int8x8_t",
"int8x16_t",
"int16x4_t",
"int16x8_t",
"int32x2_t",
"int32x4_t",
];
const INT_TYPES_64: [&str; 2] = ["int64x1_t", "int64x2_t"];
const FLOAT_TYPES: [&str; 2] = [
//"float8x8_t", not supported by rust
//"float8x16_t", not supported by rust
//"float16x4_t", not supported by rust
//"float16x8_t", not supported by rust
"float32x2_t",
"float32x4_t",
];
const FLOAT_TYPES_64: [&str; 2] = [
//"float8x8_t", not supported by rust
//"float8x16_t", not supported by rust
//"float16x4_t", not supported by rust
//"float16x8_t", not supported by rust
"float64x1_t",
"float64x2_t",
];
fn type_len(t: &str) -> usize {
match t {
"int8x8_t" => 8,
"int8x16_t" => 16,
"int16x4_t" => 4,
"int16x8_t" => 8,
"int32x2_t" => 2,
"int32x4_t" => 4,
"int64x1_t" => 1,
"int64x2_t" => 2,
"uint8x8_t" => 8,
"uint8x16_t" => 16,
"uint16x4_t" => 4,
"uint16x8_t" => 8,
"uint32x2_t" => 2,
"uint32x4_t" => 4,
"uint64x1_t" => 1,
"uint64x2_t" => 2,
"float16x4_t" => 4,
"float16x8_t" => 8,
"float32x2_t" => 2,
"float32x4_t" => 4,
"float64x1_t" => 1,
"float64x2_t" => 2,
"poly64x1_t" => 1,
"poly64x2_t" => 2,
_ => panic!("unknown type: {}", t),
}
}
fn type_to_suffix(t: &str) -> &str {
match t {
"int8x8_t" => "_s8",
"int8x16_t" => "q_s8",
"int16x4_t" => "_s16",
"int16x8_t" => "q_s16",
"int32x2_t" => "_s32",
"int32x4_t" => "q_s32",
"int64x1_t" => "_s64",
"int64x2_t" => "q_s64",
"uint8x8_t" => "_u8",
"uint8x16_t" => "q_u8",
"uint16x4_t" => "_u16",
"uint16x8_t" => "q_u16",
"uint32x2_t" => "_u32",
"uint32x4_t" => "q_u32",
"uint64x1_t" => "_u64",
"uint64x2_t" => "q_u64",
"float16x4_t" => "_f16",
"float16x8_t" => "q_f16",
"float32x2_t" => "_f32",
"float32x4_t" => "q_f32",
"float64x1_t" => "_f64",
"float64x2_t" => "q_f64",
"poly64x1_t" => "_p64",
"poly64x2_t" => "q_p64",
_ => panic!("unknown type: {}", t),
}
}
fn type_to_global_type(t: &str) -> &str {
match t {
"int8x8_t" => "i8x8",
"int8x16_t" => "i8x16",
"int16x4_t" => "i16x4",
"int16x8_t" => "i16x8",
"int32x2_t" => "i32x2",
"int32x4_t" => "i32x4",
"int64x1_t" => "i64x1",
"int64x2_t" => "i64x2",
"uint8x8_t" => "u8x8",
"uint8x16_t" => "u8x16",
"uint16x4_t" => "u16x4",
"uint16x8_t" => "u16x8",
"uint32x2_t" => "u32x2",
"uint32x4_t" => "u32x4",
"uint64x1_t" => "u64x1",
"uint64x2_t" => "u64x2",
"float16x4_t" => "f16x4",
"float16x8_t" => "f16x8",
"float32x2_t" => "f32x2",
"float32x4_t" => "f32x4",
"float64x1_t" => "f64",
"float64x2_t" => "f64x2",
"poly64x1_t" => "i64x1",
"poly64x2_t" => "i64x2",
_ => panic!("unknown type: {}", t),
}
}
// fn type_to_native_type(t: &str) -> &str {
// match t {
// "int8x8_t" => "i8",
// "int8x16_t" => "i8",
// "int16x4_t" => "i16",
// "int16x8_t" => "i16",
// "int32x2_t" => "i32",
// "int32x4_t" => "i32",
// "int64x1_t" => "i64",
// "int64x2_t" => "i64",
// "uint8x8_t" => "u8",
// "uint8x16_t" => "u8",
// "uint16x4_t" => "u16",
// "uint16x8_t" => "u16",
// "uint32x2_t" => "u32",
// "uint32x4_t" => "u32",
// "uint64x1_t" => "u64",
// "uint64x2_t" => "u64",
// "float16x4_t" => "f16",
// "float16x8_t" => "f16",
// "float32x2_t" => "f32",
// "float32x4_t" => "f32",
// "float64x1_t" => "f64",
// "float64x2_t" => "f64",
// "poly64x1_t" => "i64",
// "poly64x2_t" => "i64",
// _ => panic!("unknown type: {}", t),
// }
// }
fn type_to_ext(t: &str) -> &str {
match t {
"int8x8_t" => "v8i8",
"int8x16_t" => "v16i8",
"int16x4_t" => "v4i16",
"int16x8_t" => "v8i16",
"int32x2_t" => "v2i32",
"int32x4_t" => "v4i32",
"int64x1_t" => "v1i64",
"int64x2_t" => "v2i64",
"uint8x8_t" => "v8i8",
"uint8x16_t" => "v16i8",
"uint16x4_t" => "v4i16",
"uint16x8_t" => "v8i16",
"uint32x2_t" => "v2i32",
"uint32x4_t" => "v4i32",
"uint64x1_t" => "v1i64",
"uint64x2_t" => "v2i64",
"float16x4_t" => "v4f16",
"float16x8_t" => "v8f16",
"float32x2_t" => "v2f32",
"float32x4_t" => "v4f32",
"float64x1_t" => "v1f64",
"float64x2_t" => "v2f64",
/*
"poly64x1_t" => "i64x1",
"poly64x2_t" => "i64x2",
*/
_ => panic!("unknown type for extension: {}", t),
}
}
fn values(t: &str, vs: &[String]) -> String {
if vs.len() == 1 && !t.contains('x') {
format!(": {} = {}", t, vs[0])
} else if vs.len() == 1 && type_to_global_type(t) == "f64" {
format!(": {} = {}", type_to_global_type(t), vs[0])
} else {
format!(
": {} = {}::new({})",
type_to_global_type(t),
type_to_global_type(t),
vs.iter()
.map(|v| map_val(type_to_global_type(t), v))
//.map(|v| format!("{}{}", v, type_to_native_type(t)))
.collect::<Vec<_>>()
.join(", ")
)
}
}
fn max_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0xFF",
"u16" => "0xFF_FF",
"u32" => "0xFF_FF_FF_FF",
"u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
"i8x" => "0x7F",
"i16" => "0x7F_FF",
"i32" => "0x7F_FF_FF_FF",
"i64" => "0x7F_FF_FF_FF_FF_FF_FF_FF",
"f32" => "3.40282347e+38",
"f64" => "1.7976931348623157e+308",
_ => panic!("No TRUE for type {}", t),
}
}
fn min_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0",
"u16" => "0",
"u32" => "0",
"u64" => "0",
"i8x" => "-128",
"i16" => "-32768",
"i32" => "-2147483648",
"i64" => "-9223372036854775808",
"f32" => "-3.40282347e+38",
"f64" => "-1.7976931348623157e+308",
_ => panic!("No TRUE for type {}", t),
}
}
fn true_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0xFF",
"u16" => "0xFF_FF",
"u32" => "0xFF_FF_FF_FF",
"u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
_ => panic!("No TRUE for type {}", t),
}
}
fn ff_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0xFF",
"u16" => "0xFF_FF",
"u32" => "0xFF_FF_FF_FF",
"u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
"i8x" => "0xFF",
"i16" => "0xFF_FF",
"i32" => "0xFF_FF_FF_FF",
"i64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
_ => panic!("No TRUE for type {}", t),
}
}
fn false_val(_t: &str) -> &'static str {
"0"
}
fn map_val<'v>(t: &str, v: &'v str) -> &'v str {
match v {
"FALSE" => false_val(t),
"TRUE" => true_val(t),
"MAX" => min_val(t),
"MIN" => max_val(t),
"FF" => ff_val(t),
o => o,
}
}
#[allow(clippy::too_many_arguments)]
fn gen_aarch64(
current_comment: &str,
current_fn: &Option<String>,
name: &str,
current_aarch64: &Option<String>,
link_aarch64: &Option<String>,
in_t: &str,
out_t: &str,
current_tests: &[(Vec<String>, Vec<String>, Vec<String>)],
) -> (String, String) {
let _global_t = type_to_global_type(in_t);
let _global_ret_t = type_to_global_type(out_t);
let current_fn = if let Some(current_fn) = current_fn.clone() {
if link_aarch64.is_some() {
panic!("[{}] Can't specify link and fn at the same time.", name)
}
current_fn
} else {
if link_aarch64.is_none() {
panic!("[{}] Either fn or link-aarch have to be specified.", name)
}
format!("{}_", name)
};
let current_aarch64 = current_aarch64.clone().unwrap();
let ext_c = if let Some(link_aarch64) = link_aarch64.clone() {
let ext = type_to_ext(in_t);
format!(
r#"
#[allow(improper_ctypes)]
extern "C" {{
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.{}")]
fn {}(a: {}, a: {}) -> {};
}}
"#,
link_aarch64.replace("_EXT_", ext),
current_fn,
in_t,
in_t,
out_t
)
} else {
String::new()
};
let function = format!(
r#"
{}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr({}))]
pub unsafe fn {}(a: {}, b: {}) -> {} {{
{}{}(a, b)
}}
"#,
current_comment, current_aarch64, name, in_t, in_t, out_t, ext_c, current_fn,
);
let test = gen_test(name, &in_t, &out_t, current_tests, type_len(in_t));
(function, test)
}
fn gen_test(
name: &str,
in_t: &str,
out_t: &str,
current_tests: &[(Vec<String>, Vec<String>, Vec<String>)],
len: usize,
) -> String {
let mut test = format!(
r#"
#[simd_test(enable = "neon")]
unsafe fn test_{}() {{"#,
name,
);
for (a, b, e) in current_tests {
let a: Vec<String> = a.iter().take(len).cloned().collect();
let b: Vec<String> = b.iter().take(len).cloned().collect();
let e: Vec<String> = e.iter().take(len).cloned().collect();
let t = format!(
r#"
let a{};
let b{};
let e{};
let r: {} = transmute({}(transmute(a), transmute(b)));
assert_eq!(r, e);
"#,
values(in_t, &a),
values(in_t, &b),
values(out_t, &e),
type_to_global_type(out_t),
name
);
test.push_str(&t);
}
test.push_str(" }\n");
test
}
#[allow(clippy::too_many_arguments)]
fn gen_arm(
current_comment: &str,
current_fn: &Option<String>,
name: &str,
current_arm: &str,
link_arm: &Option<String>,
current_aarch64: &Option<String>,
link_aarch64: &Option<String>,
in_t: &str,
out_t: &str,
current_tests: &[(Vec<String>, Vec<String>, Vec<String>)],
) -> (String, String) {
let _global_t = type_to_global_type(in_t);
let _global_ret_t = type_to_global_type(out_t);
let current_aarch64 = current_aarch64
.clone()
.unwrap_or_else(|| current_arm.to_string());
let current_fn = if let Some(current_fn) = current_fn.clone() {
if link_aarch64.is_some() || link_arm.is_some() {
panic!(
"[{}] Can't specify link and function at the same time. {} / {:?} / {:?}",
name, current_fn, link_aarch64, link_arm
)
}
current_fn
} else {
if link_aarch64.is_none() || link_arm.is_none() {
panic!(
"[{}] Either fn or link-arm and link-aarch have to be specified.",
name
)
}
format!("{}_", name)
};
let ext_c =
if let (Some(link_arm), Some(link_aarch64)) = (link_arm.clone(), link_aarch64.clone()) {
let ext = type_to_ext(in_t);
format!(
r#"#[allow(improper_ctypes)]
extern "C" {{
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.{}")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.{}")]
fn {}(a: {}, b: {}) -> {};
}}
"#,
link_arm.replace("_EXT_", ext),
link_aarch64.replace("_EXT_", ext),
current_fn,
in_t,
in_t,
out_t
)
} else {
String::new()
};
let function = format!(
r#"
{}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr({}))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}))]
pub unsafe fn {}(a: {}, b: {}) -> {} {{
{}{}(a, b)
}}
"#,
current_comment,
expand_intrinsic(¤t_arm, in_t),
expand_intrinsic(¤t_aarch64, in_t),
name,
in_t,
in_t,
out_t,
ext_c,
current_fn,
);
let test = gen_test(name, &in_t, &out_t, current_tests, type_len(in_t));
(function, test)
}
fn expand_intrinsic(intr: &str, t: &str) -> String {
if intr.ends_with(".") {
let ext = match t {
"int8x8_t" => "i8",
"int8x16_t" => "i8",
"int16x4_t" => "i16",
"int16x8_t" => "i16",
"int32x2_t" => "i32",
"int32x4_t" => "i32",
"int64x1_t" => "i64",
"int64x2_t" => "i64",
"uint8x8_t" => "i8",
"uint8x16_t" => "i8",
"uint16x4_t" => "i16",
"uint16x8_t" => "i16",
"uint32x2_t" => "i32",
"uint32x4_t" => "i32",
"uint64x1_t" => "i64",
"uint64x2_t" => "i64",
"float16x4_t" => "f16",
"float16x8_t" => "f16",
"float32x2_t" => "f32",
"float32x4_t" => "f32",
"float64x1_t" => "f64",
"float64x2_t" => "f64",
/*
"poly64x1_t" => "i64x1",
"poly64x2_t" => "i64x2",
*/
_ => panic!("unknown type for extension: {}", t),
};
format!(r#""{}{}""#, intr, ext)
} else if intr.ends_with(".s") {
let ext = match t {
"int8x8_t" => "s8",
"int8x16_t" => "s8",
"int16x4_t" => "s16",
"int16x8_t" => "s16",
"int32x2_t" => "s32",
"int32x4_t" => "s32",
"int64x1_t" => "s64",
"int64x2_t" => "s64",
"uint8x8_t" => "u8",
"uint8x16_t" => "u8",
"uint16x4_t" => "u16",
"uint16x8_t" => "u16",
"uint32x2_t" => "u32",
"uint32x4_t" => "u32",
"uint64x1_t" => "u64",
"uint64x2_t" => "u64",
"float16x4_t" => "f16",
"float16x8_t" => "f16",
"float32x2_t" => "f32",
"float32x4_t" => "f32",
"float64x1_t" => "f64",
"float64x2_t" => "f64",
/*
"poly64x1_t" => "i64x1",
"poly64x2_t" => "i64x2",
*/
_ => panic!("unknown type for extension: {}", t),
};
format!(r#""{}{}""#, &intr[..intr.len() - 1], ext)
} else {
intr.to_string()
}
}
fn main() -> io::Result<()> {
let args: Vec<String> = env::args().collect();
let in_file = args.get(1).cloned().unwrap_or_else(|| IN.to_string());
let f = File::open(in_file).expect("Failed to open neon.spec");
let f = BufReader::new(f);
let mut current_comment = String::new();
let mut current_name: Option<String> = None;
let mut current_fn: Option<String> = None;
let mut current_arm: Option<String> = None;
let mut current_aarch64: Option<String> = None;
let mut link_arm: Option<String> = None;
let mut link_aarch64: Option<String> = None;
let mut a: Vec<String> = Vec::new();
let mut b: Vec<String> = Vec::new();
let mut current_tests: Vec<(Vec<String>, Vec<String>, Vec<String>)> = Vec::new();
//
// THIS FILE IS GENERATED FORM neon.spec DO NOT CHANGE IT MANUALLY
//
let mut out_arm = String::from(
r#"// This code is automatically generated. DO NOT MODIFY.
//
// Instead, modify `crates/stdarch-gen/neon.spec` and run the following command to re-generate this file:
//
// ```
// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen -- crates/stdarch-gen/neon.spec
// ```
use super::*;
#[cfg(test)]
use stdarch_test::assert_instr;
"#,
);
let mut tests_arm = String::from(
r#"
#[cfg(test)]
#[allow(overflowing_literals)]
mod test {
use super::*;
use crate::core_arch::simd::*;
use std::mem::transmute;
use stdarch_test::simd_test;
"#,
);
//
// THIS FILE IS GENERATED FORM neon.spec DO NOT CHANGE IT MANUALLY
//
let mut out_aarch64 = String::from(
r#"// This code is automatically generated. DO NOT MODIFY.
//
// Instead, modify `crates/stdarch-gen/neon.spec` and run the following command to re-generate this file:
//
// ```
// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen -- crates/stdarch-gen/neon.spec
// ```
use super::*;
#[cfg(test)]
use stdarch_test::assert_instr;
"#,
);
let mut tests_aarch64 = String::from(
r#"
#[cfg(test)]
mod test {
use super::*;
use crate::core_arch::simd::*;
use std::mem::transmute;
use stdarch_test::simd_test;
"#,
);
for line in f.lines() {
let line = line.unwrap();
if line.is_empty() {
continue;
}
if line.starts_with("/// ") {
current_comment = line;
current_name = None;
current_fn = None;
current_arm = None;
current_aarch64 = None;
link_aarch64 = None;
link_arm = None;
current_tests = Vec::new();
} else if line.starts_with("//") {
} else if line.starts_with("name = ") {
current_name = Some(String::from(&line[7..]));
} else if line.starts_with("fn = ") {
current_fn = Some(String::from(&line[5..]));
} else if line.starts_with("arm = ") {
current_arm = Some(String::from(&line[6..]));
} else if line.starts_with("aarch64 = ") {
current_aarch64 = Some(String::from(&line[10..]));
} else if line.starts_with("a = ") {
a = line[4..].split(',').map(|v| v.trim().to_string()).collect();
} else if line.starts_with("b = ") {
b = line[4..].split(',').map(|v| v.trim().to_string()).collect();
} else if line.starts_with("validate ") {
let e = line[9..].split(',').map(|v| v.trim().to_string()).collect();
current_tests.push((a.clone(), b.clone(), e));
} else if line.starts_with("link-aarch64 = ") {
link_aarch64 = Some(String::from(&line[15..]));
} else if line.starts_with("link-arm = ") {
link_arm = Some(String::from(&line[11..]));
} else if line.starts_with("generate ") {
let line = &line[9..];
let types: Vec<String> = line
.split(',')
.map(|v| v.trim().to_string())
.flat_map(|v| match v.as_str() {
"uint*_t" => UINT_TYPES.iter().map(|v| v.to_string()).collect(),
"uint64x*_t" => UINT_TYPES_64.iter().map(|v| v.to_string()).collect(),
"int*_t" => INT_TYPES.iter().map(|v| v.to_string()).collect(),
"int64x*_t" => INT_TYPES_64.iter().map(|v| v.to_string()).collect(),
"float*_t" => FLOAT_TYPES.iter().map(|v| v.to_string()).collect(),
"float64x*_t" => FLOAT_TYPES_64.iter().map(|v| v.to_string()).collect(),
_ => vec![v],
})
.collect();
for line in types {
let spec: Vec<&str> = line.split(':').map(|e| e.trim()).collect();
let in_t;
let out_t;
if spec.len() == 1 {
in_t = spec[0];
out_t = spec[0];
} else if spec.len() == 2 {
in_t = spec[0];
out_t = spec[1];
} else {
panic!("Bad spec: {}", line)
}
let current_name = current_name.clone().unwrap();
let name = format!("{}{}", current_name, type_to_suffix(in_t),);
if let Some(current_arm) = current_arm.clone() {
let (function, test) = gen_arm(
¤t_comment,
¤t_fn,
&name,
¤t_arm,
&link_arm,
¤t_aarch64,
&link_aarch64,
&in_t,
&out_t,
¤t_tests,
);
out_arm.push_str(&function);
tests_arm.push_str(&test);
} else {
let (function, test) = gen_aarch64(
¤t_comment,
¤t_fn,
&name,
¤t_aarch64,
&link_aarch64,
&in_t,
&out_t,
¤t_tests,
);
out_aarch64.push_str(&function);
tests_aarch64.push_str(&test);
}
}
}
}
tests_arm.push('}');
tests_arm.push('\n');
tests_aarch64.push('}');
tests_aarch64.push('\n');
let arm_out_path: PathBuf = PathBuf::from(env::var("OUT_DIR").unwrap())
.join("src")
.join("arm")
.join("neon");
std::fs::create_dir_all(&arm_out_path)?;
let mut file_arm = File::create(arm_out_path.join(ARM_OUT))?;
file_arm.write_all(out_arm.as_bytes())?;
file_arm.write_all(tests_arm.as_bytes())?;
let aarch64_out_path: PathBuf = PathBuf::from(env::var("OUT_DIR").unwrap())
.join("src")
.join("aarch64")
.join("neon");
std::fs::create_dir_all(&aarch64_out_path)?;
let mut file_aarch = File::create(aarch64_out_path.join(AARCH64_OUT))?;
file_aarch.write_all(out_aarch64.as_bytes())?;
file_aarch.write_all(tests_aarch64.as_bytes())?;
/*
if let Err(e) = Command::new("rustfmt")
.arg(&arm_out_path)
.arg(&aarch64_out_path)
.status() {
eprintln!("Could not format `{}`: {}", arm_out_path.to_str().unwrap(), e);
eprintln!("Could not format `{}`: {}", aarch64_out_path.to_str().unwrap(), e);
};
*/
Ok(())
}
| 30.435419 | 105 | 0.47854 |
891d6e1060df85cf3446ce270ae1e856c22f4400 | 9,868 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use cursive::event::{Event, EventResult, Key};
use cursive::theme::ColorStyle;
use cursive::vec::Vec2;
use cursive::views::{EditView, NamedView};
use cursive::Cursive;
use cursive::Printer;
use cursive::View;
use std::cell::RefCell;
use std::collections::{HashMap, VecDeque};
use std::rc::Rc;
use crate::controllers::Controllers;
use crate::stats_view::{StatsView, ViewBridge};
const MAX_CMD_HISTORY: usize = 10;
/// Command palette will have different mode:
/// Info is used to show info like full cgroup path.
/// Alert is used to show error messages.
/// Command is used to turn command palette in Command mode.
// TODO: command mode for command palette.
#[derive(PartialEq)]
enum CPMode {
Info,
Alert,
Command,
}
/// TextView that used to display extra information
///
/// Currently, we will use command palette to display extra information like
/// full cgroup name. But the idea for this view is something like vim's command palette
/// that use for input operation command like search, filter, rearrange, apply config, etc.
pub struct CommandPalette {
content: String,
filter: Option<String>,
fold: bool,
mode: CPMode,
cmd_view: RefCell<EditView>,
cmd_controllers: Rc<RefCell<HashMap<&'static str, Controllers>>>,
cmd_history: VecDeque<String>,
cur_cmd_idx: usize,
}
impl View for CommandPalette {
fn draw(&self, printer: &Printer) {
// Right most X position that contains text
let mut max_x = printer.size.x;
printer.print_hline((0, 0), printer.size.x, "─");
if let Some(filter) = &self.filter {
let filter = format!("| Filter: {:>10.10} |", filter);
max_x -= filter.len();
printer.print((max_x, 0), &filter);
}
if self.fold {
let text = "| Fold |";
max_x -= text.len();
printer.print((max_x, 0), text);
}
match self.mode {
CPMode::Command => {
printer.print((0, 1), ":");
let inner_printer = printer.offset((1, 1));
self.cmd_view.borrow_mut().layout(inner_printer.size);
self.cmd_view.borrow().draw(&inner_printer);
}
_ => {
// Message should adapt the screen size
let mut msg_len_left = self.content.len();
let mut idx = 0;
let mut line = 1;
while msg_len_left > printer.size.x {
self.print(printer, (0, line), idx);
msg_len_left -= printer.size.x;
idx += printer.size.x;
line += 1;
}
self.print(printer, (0, line), idx);
}
}
}
fn on_event(&mut self, event: Event) -> EventResult {
match event {
Event::Key(Key::Up) => {
self.prev_cmd();
EventResult::Consumed(None)
}
Event::Key(Key::Down) => {
self.next_cmd();
EventResult::Consumed(None)
}
_ => self.cmd_view.borrow_mut().on_event(event),
}
}
fn required_size(&mut self, constraint: Vec2) -> Vec2 {
Vec2::new(1, self.content.len() / constraint.x + 2)
}
}
impl CommandPalette {
/// Create a new CommandPalette
pub fn new<V: 'static + ViewBridge>(
name: &'static str,
content: &str,
cmd_controllers: Rc<RefCell<HashMap<&'static str, Controllers>>>,
) -> Self {
Self {
content: content.into(),
filter: None,
fold: false,
mode: CPMode::Info,
cmd_view: RefCell::new(
EditView::new()
.on_submit(move |c, cmd| {
Self::handle_cmd_history(name, c, cmd);
Self::run_cmd::<V>(name, c, cmd)
})
.style(ColorStyle::terminal_default()),
),
cmd_controllers,
cmd_history: VecDeque::new(),
cur_cmd_idx: 0,
}
}
fn handle_cmd_history(name: &'static str, c: &mut Cursive, cmd: &str) {
c.call_on_name(
&format!("{}_cmd_palette", name),
|cp: &mut NamedView<CommandPalette>| {
let mut cmd_palette = cp.get_mut();
cmd_palette.cmd_history.push_back(cmd.into());
if cmd_palette.cmd_history.len() > MAX_CMD_HISTORY {
cmd_palette.cmd_history.pop_front();
}
cmd_palette.cur_cmd_idx = cmd_palette.cmd_history.len() - 1;
},
);
}
fn prev_cmd(&mut self) {
if self.cmd_history.is_empty() {
return;
}
self.cmd_view
.borrow_mut()
.set_content(&self.cmd_history[self.cur_cmd_idx]);
if self.cur_cmd_idx > 0 {
self.cur_cmd_idx -= 1;
}
}
fn next_cmd(&mut self) {
if self.cmd_history.is_empty() {
return;
}
if self.cur_cmd_idx == self.cmd_history.len() - 1 {
self.cmd_view.borrow_mut().set_content("");
} else {
self.cur_cmd_idx += 1;
self.cmd_view
.borrow_mut()
.set_content(&self.cmd_history[self.cur_cmd_idx]);
}
}
/// Run the captured command
// In this function, we should avoid borrowing command palette object, since
// it will cause a double mut borrow in the handler.
pub fn run_cmd<V: 'static + ViewBridge>(name: &'static str, c: &mut Cursive, cmd: &str) {
let cmd_vec = cmd.trim().split(' ').collect::<Vec<&str>>();
let controller = c
.find_name::<Self>(&format!("{}_cmd_palette", name))
.expect("Fail to get cmd_palette")
.cmd_controllers
.borrow()
.get(cmd_vec[0])
.unwrap_or(&Controllers::Unknown)
.clone();
match controller {
Controllers::Unknown => {
let mut cp = c
.find_name::<Self>(&format!("{}_cmd_palette", name))
.expect("Fail to get cmd_palette");
cp.mode = CPMode::Alert;
cp.content = "Unknown Command".into();
cp.cmd_view.borrow_mut().set_content("");
}
_ => {
controller.handle(&mut StatsView::<V>::get_view(c), &cmd_vec);
controller.callback::<V>(c, &cmd_vec);
c.call_on_name(
&format!("{}_cmd_palette", name),
|cp: &mut NamedView<CommandPalette>| {
cp.get_mut().reset_cmd();
},
);
}
}
}
pub fn reset_cmd(&mut self) {
self.mode = CPMode::Info;
self.cmd_view.borrow_mut().set_content("");
}
/// Turn cmd_palette into command input mode
pub fn invoke_cmd(&mut self) {
self.mode = CPMode::Command;
self.content = "".into()
}
/// Check if command palette is in command mode
pub fn is_cmd_mode(&self) -> bool {
self.mode == CPMode::Command
}
/// Set the display info
pub fn set_info<T: Into<String>>(&mut self, content: T) {
self.content = content.into();
if self.mode != CPMode::Command {
self.mode = CPMode::Info;
}
}
/// Set alert
/// This will preempt the command palette mode.
pub fn set_alert<T: Into<String>>(&mut self, content: T) {
if self.mode == CPMode::Alert {
// Attach to current alert if it is not consumed.
self.content = format!("{} |=| {}", self.content, content.into());
} else {
self.content = content.into();
if self.mode != CPMode::Command {
self.mode = CPMode::Alert;
}
}
}
pub fn set_filter(&mut self, filter: Option<String>) {
self.filter = filter;
}
pub fn toggle_fold(&mut self) {
self.fold = !self.fold;
}
fn print_info(&self, printer: &Printer, pos: Vec2, idx: usize) {
if idx + printer.size.x > self.content.len() {
printer.print(pos, &self.content[idx..]);
} else {
printer.print(pos, &self.content[idx..idx + printer.size.x]);
}
}
fn print_alert(&self, printer: &Printer, pos: Vec2, idx: usize) {
printer.with_color(ColorStyle::title_primary(), |printer| {
if idx + printer.size.x > self.content.len() {
printer.print(pos, &self.content[idx..]);
} else {
printer.print(pos, &self.content[idx..idx + printer.size.x]);
}
})
}
fn print<T: Into<Vec2>>(&self, printer: &Printer, pos: T, idx: usize) {
match self.mode {
CPMode::Info => self.print_info(printer, pos.into(), idx),
CPMode::Alert => self.print_alert(printer, pos.into(), idx),
_ => {}
}
}
pub fn is_alerting(&self) -> bool {
self.mode == CPMode::Alert
}
pub fn get_content<'a>(&'a self) -> &'a str {
&self.content
}
}
| 32.675497 | 93 | 0.537394 |
919b0f37dbd29b2d3872ffb2ea733d8af686c345 | 7,919 | use super::*;
pub(super) const PATTERN_FIRST: TokenSet = expressions::LITERAL_FIRST
.union(paths::PATH_FIRST)
.union(token_set![BOX_KW, REF_KW, MUT_KW, L_PAREN, L_BRACK, AMP, UNDERSCORE, MINUS, DOT]);
pub(crate) fn pattern(p: &mut Parser) {
pattern_r(p, PAT_RECOVERY_SET);
}
/// Parses a pattern list separated by pipes `|`
pub(super) fn pattern_list(p: &mut Parser) {
pattern_list_r(p, PAT_RECOVERY_SET)
}
/// Parses a pattern list separated by pipes `|`
/// using the given `recovery_set`
pub(super) fn pattern_list_r(p: &mut Parser, recovery_set: TokenSet) {
p.eat(T![|]);
pattern_r(p, recovery_set);
while p.eat(T![|]) {
pattern_r(p, recovery_set);
}
}
pub(super) fn pattern_r(p: &mut Parser, recovery_set: TokenSet) {
if let Some(lhs) = atom_pat(p, recovery_set) {
// test range_pat
// fn main() {
// match 92 {
// 0 ... 100 => (),
// 101 ..= 200 => (),
// 200 .. 301=> (),
// }
// }
for &range_op in [T![...], T![..=], T![..]].iter() {
if p.at(range_op) {
let m = lhs.precede(p);
p.bump(range_op);
atom_pat(p, recovery_set);
m.complete(p, RANGE_PAT);
return;
}
}
// test marco_pat
// fn main() {
// let m!(x) = 0;
// }
if lhs.kind() == PATH_PAT && p.at(T![!]) {
let m = lhs.precede(p);
items::macro_call_after_excl(p);
m.complete(p, MACRO_CALL);
}
}
}
const PAT_RECOVERY_SET: TokenSet =
token_set![LET_KW, IF_KW, WHILE_KW, LOOP_KW, MATCH_KW, R_PAREN, COMMA];
fn atom_pat(p: &mut Parser, recovery_set: TokenSet) -> Option<CompletedMarker> {
let m = match p.nth(0) {
T![box] => box_pat(p),
T![ref] | T![mut] => bind_pat(p, true),
IDENT => match p.nth(1) {
// Checks the token after an IDENT to see if a pattern is a path (Struct { .. }) or macro
// (T![x]).
T!['('] | T!['{'] | T![!] => path_pat(p),
T![:] if p.nth_at(1, T![::]) => path_pat(p),
_ => bind_pat(p, true),
},
_ if paths::is_use_path_start(p) => path_pat(p),
_ if is_literal_pat_start(p) => literal_pat(p),
T![.] if p.at(T![..]) => dot_dot_pat(p),
T![_] => placeholder_pat(p),
T![&] => ref_pat(p),
T!['('] => tuple_pat(p),
T!['['] => slice_pat(p),
_ => {
p.err_recover("expected pattern", recovery_set);
return None;
}
};
Some(m)
}
fn is_literal_pat_start(p: &Parser) -> bool {
p.at(T![-]) && (p.nth(1) == INT_NUMBER || p.nth(1) == FLOAT_NUMBER)
|| p.at_ts(expressions::LITERAL_FIRST)
}
// test literal_pattern
// fn main() {
// match () {
// -1 => (),
// 92 => (),
// 'c' => (),
// "hello" => (),
// }
// }
fn literal_pat(p: &mut Parser) -> CompletedMarker {
assert!(is_literal_pat_start(p));
let m = p.start();
if p.at(T![-]) {
p.bump_any();
}
expressions::literal(p);
m.complete(p, LITERAL_PAT)
}
// test path_part
// fn foo() {
// let foo::Bar = ();
// let ::Bar = ();
// let Bar { .. } = ();
// let Bar(..) = ();
// }
fn path_pat(p: &mut Parser) -> CompletedMarker {
assert!(paths::is_use_path_start(p));
let m = p.start();
paths::expr_path(p);
let kind = match p.current() {
T!['('] => {
tuple_pat_fields(p);
TUPLE_STRUCT_PAT
}
T!['{'] => {
record_field_pat_list(p);
RECORD_PAT
}
_ => PATH_PAT,
};
m.complete(p, kind)
}
// test tuple_pat_fields
// fn foo() {
// let S() = ();
// let S(_) = ();
// let S(_,) = ();
// let S(_, .. , x) = ();
// }
fn tuple_pat_fields(p: &mut Parser) {
assert!(p.at(T!['(']));
p.bump_any();
pat_list(p, T![')']);
p.expect(T![')']);
}
// test record_field_pat_list
// fn foo() {
// let S {} = ();
// let S { f, ref mut g } = ();
// let S { h: _, ..} = ();
// let S { h: _, } = ();
// }
fn record_field_pat_list(p: &mut Parser) {
assert!(p.at(T!['{']));
let m = p.start();
p.bump_any();
while !p.at(EOF) && !p.at(T!['}']) {
match p.current() {
// A trailing `..` is *not* treated as a DOT_DOT_PAT.
T![.] if p.at(T![..]) => p.bump(T![..]),
IDENT if p.nth(1) == T![:] => record_field_pat(p),
T!['{'] => error_block(p, "expected ident"),
T![box] => {
box_pat(p);
}
_ => {
bind_pat(p, false);
}
}
if !p.at(T!['}']) {
p.expect(T![,]);
}
}
p.expect(T!['}']);
m.complete(p, RECORD_FIELD_PAT_LIST);
}
fn record_field_pat(p: &mut Parser) {
assert!(p.at(IDENT));
assert!(p.nth(1) == T![:]);
let m = p.start();
name(p);
p.bump_any();
pattern(p);
m.complete(p, RECORD_FIELD_PAT);
}
// test placeholder_pat
// fn main() { let _ = (); }
fn placeholder_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![_]));
let m = p.start();
p.bump_any();
m.complete(p, PLACEHOLDER_PAT)
}
// test dot_dot_pat
// fn main() {
// let .. = ();
// //
// // Tuples
// //
// let (a, ..) = ();
// let (a, ..,) = ();
// let Tuple(a, ..) = ();
// let Tuple(a, ..,) = ();
// let (.., ..) = ();
// let Tuple(.., ..) = ();
// let (.., a, ..) = ();
// let Tuple(.., a, ..) = ();
// //
// // Slices
// //
// let [..] = ();
// let [head, ..] = ();
// let [head, tail @ ..] = ();
// let [head, .., cons] = ();
// let [head, mid @ .., cons] = ();
// let [head, .., .., cons] = ();
// let [head, .., mid, tail @ ..] = ();
// let [head, .., mid, .., cons] = ();
// }
fn dot_dot_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![..]));
let m = p.start();
p.bump(T![..]);
m.complete(p, DOT_DOT_PAT)
}
// test ref_pat
// fn main() {
// let &a = ();
// let &mut b = ();
// }
fn ref_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![&]));
let m = p.start();
p.bump_any();
p.eat(T![mut]);
pattern(p);
m.complete(p, REF_PAT)
}
// test tuple_pat
// fn main() {
// let (a, b, ..) = ();
// }
fn tuple_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T!['(']));
let m = p.start();
tuple_pat_fields(p);
m.complete(p, TUPLE_PAT)
}
// test slice_pat
// fn main() {
// let [a, b, ..] = [];
// }
fn slice_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T!['[']));
let m = p.start();
p.bump_any();
pat_list(p, T![']']);
p.expect(T![']']);
m.complete(p, SLICE_PAT)
}
fn pat_list(p: &mut Parser, ket: SyntaxKind) {
while !p.at(EOF) && !p.at(ket) {
if !p.at_ts(PATTERN_FIRST) {
p.error("expected a pattern");
break;
}
pattern(p);
if !p.at(ket) {
p.expect(T![,]);
}
}
}
// test bind_pat
// fn main() {
// let a = ();
// let mut b = ();
// let ref c = ();
// let ref mut d = ();
// let e @ _ = ();
// let ref mut f @ g @ _ = ();
// }
fn bind_pat(p: &mut Parser, with_at: bool) -> CompletedMarker {
let m = p.start();
p.eat(T![ref]);
p.eat(T![mut]);
name(p);
if with_at && p.eat(T![@]) {
pattern(p);
}
m.complete(p, BIND_PAT)
}
// test box_pat
// fn main() {
// let box i = ();
// let box Outer { box i, j: box Inner(box &x) } = ();
// let box ref mut i = ();
// }
fn box_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![box]));
let m = p.start();
p.bump_any();
pattern(p);
m.complete(p, BOX_PAT)
}
| 24.366154 | 101 | 0.454224 |
c1480d3d149eadfe8d92cdbf3019ee4d19961fb5 | 9,514 | use specs::prelude::*;
use super::*;
use crate::components::{Pools, Player, Attributes, Confusion, SerializeMe, Duration, StatusEffect,
Name, EquipmentChanged, Slow, DamageOverTime, Skills};
use crate::map::Map;
use crate::gamesystem::{player_hp_at_level, mana_at_level};
use crate::gamelog::GameLog;
use specs::saveload::{MarkedBuilder, SimpleMarker};
pub fn inflict_damage(ecs: &mut World, damage: &EffectSpawner, target: Entity) {
let mut pools = ecs.write_storage::<Pools>();
if let Some(pool) = pools.get_mut(target) {
if !pool.god_mode {
if let Some(creator) = damage.creator {
if creator == target {
return;
}
}
if let EffectType::Damage{amount} = damage.effect_type {
pool.hit_points.current -= amount;
add_effect(None, EffectType::Bloodstain, Targets::Single{target});
add_effect(None,
EffectType::Particle{
glyph: rltk::to_cp437('‼'),
fg : rltk::RGB::named(rltk::ORANGE),
bg : rltk::RGB::named(rltk::BLACK),
lifespan: 200.0
},
Targets::Single{target}
);
if pool.hit_points.current < 1 {
add_effect(damage.creator, EffectType::EntityDeath, Targets::Single{target});
}
}
}
}
}
pub fn bloodstain(ecs: &mut World, tile_idx : i32) {
let mut map = ecs.fetch_mut::<Map>();
map.bloodstains.insert(tile_idx as usize);
}
pub fn death(ecs: &mut World, effect: &EffectSpawner, target : Entity) {
let mut xp_gain = 0;
let mut gold_gain = 0.0f32;
let mut pools = ecs.write_storage::<Pools>();
let mut attributes = ecs.write_storage::<Attributes>();
if let Some(pos) = entity_position(ecs, target) {
let mut map_mut = ecs.fetch_mut::<Map>();
map_mut.blocked[pos as usize] = false;
std::mem::drop(map_mut);
}
if let Some(source) = effect.creator {
if ecs.read_storage::<Player>().get(source).is_some() {
if let Some(stats) = pools.get(target) {
xp_gain += stats.level * 100;
gold_gain += stats.gold;
}
if xp_gain != 0 || gold_gain != 0.0 {
let mut log = ecs.fetch_mut::<GameLog>();
let mut player_stats = pools.get_mut(source).unwrap();
let mut player_attributes = attributes.get_mut(source).unwrap();
player_stats.xp += xp_gain;
player_stats.gold += gold_gain;
if player_stats.xp >= player_stats.level * 1000 {
// We've gone up a level!
player_stats.level += 1;
log.entries.push(format!("Congratulations, you are now level {}", player_stats.level));
// Improve a random attribute
let mut rng = ecs.fetch_mut::<rltk::RandomNumberGenerator>();
let attr_to_boost = rng.roll_dice(1, 4);
match attr_to_boost {
1 => {
player_attributes.might.base += 1;
log.entries.push("You feel stronger!".to_string());
}
2 => {
player_attributes.fitness.base += 1;
log.entries.push("You feel healthier!".to_string());
}
3 => {
player_attributes.quickness.base += 1;
log.entries.push("You feel quicker!".to_string());
}
_ => {
player_attributes.intelligence.base += 1;
log.entries.push("You feel smarter!".to_string());
}
}
// Improve all skills
let mut skills = ecs.write_storage::<Skills>();
let player_skills = skills.get_mut(*ecs.fetch::<Entity>()).unwrap();
for sk in player_skills.skills.iter_mut() {
*sk.1 += 1;
}
ecs.write_storage::<EquipmentChanged>()
.insert(
*ecs.fetch::<Entity>(),
EquipmentChanged{})
.expect("Insert Failed");
player_stats.hit_points.max = player_hp_at_level(
player_attributes.fitness.base + player_attributes.fitness.modifiers,
player_stats.level
);
player_stats.hit_points.current = player_stats.hit_points.max;
player_stats.mana.max = mana_at_level(
player_attributes.intelligence.base + player_attributes.intelligence.modifiers,
player_stats.level
);
player_stats.mana.current = player_stats.mana.max;
let player_pos = ecs.fetch::<rltk::Point>();
let map = ecs.fetch::<Map>();
for i in 0..10 {
if player_pos.y - i > 1 {
add_effect(None,
EffectType::Particle{
glyph: rltk::to_cp437('░'),
fg : rltk::RGB::named(rltk::GOLD),
bg : rltk::RGB::named(rltk::BLACK),
lifespan: 400.0
},
Targets::Tile{ tile_idx : map.xy_idx(player_pos.x, player_pos.y - i) as i32 }
);
}
}
}
}
}
}
}
pub fn heal_damage(ecs: &mut World, heal: &EffectSpawner, target: Entity) {
let mut pools = ecs.write_storage::<Pools>();
if let Some(pool) = pools.get_mut(target) {
if let EffectType::Healing{amount} = heal.effect_type {
pool.hit_points.current = i32::min(pool.hit_points.max, pool.hit_points.current + amount);
add_effect(None,
EffectType::Particle{
glyph: rltk::to_cp437('‼'),
fg : rltk::RGB::named(rltk::GREEN),
bg : rltk::RGB::named(rltk::BLACK),
lifespan: 200.0
},
Targets::Single{target}
);
}
}
}
pub fn restore_mana(ecs: &mut World, mana: &EffectSpawner, target: Entity) {
let mut pools = ecs.write_storage::<Pools>();
if let Some(pool) = pools.get_mut(target) {
if let EffectType::Mana{amount} = mana.effect_type {
pool.mana.current = i32::min(pool.mana.max, pool.mana.current + amount);
add_effect(None,
EffectType::Particle{
glyph: rltk::to_cp437('‼'),
fg : rltk::RGB::named(rltk::BLUE),
bg : rltk::RGB::named(rltk::BLACK),
lifespan: 200.0
},
Targets::Single{target}
);
}
}
}
pub fn add_confusion(ecs: &mut World, effect: &EffectSpawner, target: Entity) {
if let EffectType::Confusion{turns} = &effect.effect_type {
ecs.create_entity()
.with(StatusEffect{ target })
.with(Confusion{})
.with(Duration{ turns : *turns})
.with(Name{ name : "Confusion".to_string() })
.marked::<SimpleMarker<SerializeMe>>()
.build();
}
}
pub fn attribute_effect(ecs: &mut World, effect: &EffectSpawner, target: Entity) {
if let EffectType::AttributeEffect{bonus, name, duration} = &effect.effect_type {
ecs.create_entity()
.with(StatusEffect{ target })
.with(bonus.clone())
.with(Duration { turns : *duration })
.with(Name { name : name.clone() })
.marked::<SimpleMarker<SerializeMe>>()
.build();
ecs.write_storage::<EquipmentChanged>().insert(target, EquipmentChanged{}).expect("Insert failed");
}
}
pub fn slow(ecs: &mut World, effect: &EffectSpawner, target: Entity) {
if let EffectType::Slow{initiative_penalty} = &effect.effect_type {
ecs.create_entity()
.with(StatusEffect{ target })
.with(Slow{ initiative_penalty : *initiative_penalty })
.with(Duration{ turns : 5})
.with(
if *initiative_penalty > 0.0 {
Name{ name : "Slowed".to_string() }
} else {
Name{ name : "Hasted".to_string() }
}
)
.marked::<SimpleMarker<SerializeMe>>()
.build();
}
}
pub fn damage_over_time(ecs: &mut World, effect: &EffectSpawner, target: Entity) {
if let EffectType::DamageOverTime{damage} = &effect.effect_type {
ecs.create_entity()
.with(StatusEffect{ target })
.with(DamageOverTime{ damage : *damage })
.with(Duration{ turns : 5})
.with(Name{ name : "Damage Over Time".to_string() })
.marked::<SimpleMarker<SerializeMe>>()
.build();
}
}
| 40.832618 | 109 | 0.490961 |
e2d7da45c81a92dc731fbb0e6849492cf545ab01 | 10,680 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::FsHcint6 {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
}
#[doc = r" Value of the field"]
pub struct XfrcR {
bits: u8,
}
impl XfrcR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct ChhR {
bits: u8,
}
impl ChhR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct StallR {
bits: u8,
}
impl StallR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct NakR {
bits: u8,
}
impl NakR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct AckR {
bits: u8,
}
impl AckR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct TxerrR {
bits: u8,
}
impl TxerrR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct BberrR {
bits: u8,
}
impl BberrR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct FrmorR {
bits: u8,
}
impl FrmorR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct DterrR {
bits: u8,
}
impl DterrR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _XfrcW<'a> {
w: &'a mut W,
}
impl<'a> _XfrcW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _ChhW<'a> {
w: &'a mut W,
}
impl<'a> _ChhW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _StallW<'a> {
w: &'a mut W,
}
impl<'a> _StallW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _NakW<'a> {
w: &'a mut W,
}
impl<'a> _NakW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _AckW<'a> {
w: &'a mut W,
}
impl<'a> _AckW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TxerrW<'a> {
w: &'a mut W,
}
impl<'a> _TxerrW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _BberrW<'a> {
w: &'a mut W,
}
impl<'a> _BberrW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FrmorW<'a> {
w: &'a mut W,
}
impl<'a> _FrmorW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _DterrW<'a> {
w: &'a mut W,
}
impl<'a> _DterrW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Transfer completed"]
#[inline(always)]
pub fn xfrc(&self) -> XfrcR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
};
XfrcR { bits }
}
#[doc = "Bit 1 - Channel halted"]
#[inline(always)]
pub fn chh(&self) -> ChhR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) as u8
};
ChhR { bits }
}
#[doc = "Bit 3 - STALL response received interrupt"]
#[inline(always)]
pub fn stall(&self) -> StallR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) as u8
};
StallR { bits }
}
#[doc = "Bit 4 - NAK response received interrupt"]
#[inline(always)]
pub fn nak(&self) -> NakR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) as u8
};
NakR { bits }
}
#[doc = "Bit 5 - ACK response received/transmitted interrupt"]
#[inline(always)]
pub fn ack(&self) -> AckR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) as u8
};
AckR { bits }
}
#[doc = "Bit 7 - Transaction error"]
#[inline(always)]
pub fn txerr(&self) -> TxerrR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) as u8
};
TxerrR { bits }
}
#[doc = "Bit 8 - Babble error"]
#[inline(always)]
pub fn bberr(&self) -> BberrR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) as u8
};
BberrR { bits }
}
#[doc = "Bit 9 - Frame overrun"]
#[inline(always)]
pub fn frmor(&self) -> FrmorR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) as u8
};
FrmorR { bits }
}
#[doc = "Bit 10 - Data toggle error"]
#[inline(always)]
pub fn dterr(&self) -> DterrR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) as u8
};
DterrR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Transfer completed"]
#[inline(always)]
pub fn xfrc(&mut self) -> _XfrcW {
_XfrcW { w: self }
}
#[doc = "Bit 1 - Channel halted"]
#[inline(always)]
pub fn chh(&mut self) -> _ChhW {
_ChhW { w: self }
}
#[doc = "Bit 3 - STALL response received interrupt"]
#[inline(always)]
pub fn stall(&mut self) -> _StallW {
_StallW { w: self }
}
#[doc = "Bit 4 - NAK response received interrupt"]
#[inline(always)]
pub fn nak(&mut self) -> _NakW {
_NakW { w: self }
}
#[doc = "Bit 5 - ACK response received/transmitted interrupt"]
#[inline(always)]
pub fn ack(&mut self) -> _AckW {
_AckW { w: self }
}
#[doc = "Bit 7 - Transaction error"]
#[inline(always)]
pub fn txerr(&mut self) -> _TxerrW {
_TxerrW { w: self }
}
#[doc = "Bit 8 - Babble error"]
#[inline(always)]
pub fn bberr(&mut self) -> _BberrW {
_BberrW { w: self }
}
#[doc = "Bit 9 - Frame overrun"]
#[inline(always)]
pub fn frmor(&mut self) -> _FrmorW {
_FrmorW { w: self }
}
#[doc = "Bit 10 - Data toggle error"]
#[inline(always)]
pub fn dterr(&mut self) -> _DterrW {
_DterrW { w: self }
}
}
| 24.895105 | 66 | 0.493258 |
4a8e66c042d9fcb0d491a76b79fba520a596e5cb | 1,896 |
pub struct IconVisibilityOff {
props: crate::Props,
}
impl yew::Component for IconVisibilityOff {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0zm0 0h24v24H0V0zm0 0h24v24H0V0zm0 0h24v24H0V0z" fill="none"/><path d="M12 6.5c2.76 0 5 2.24 5 5 0 .51-.1 1-.24 1.46l3.06 3.06c1.39-1.23 2.49-2.77 3.18-4.53C21.27 7.11 17 4 12 4c-1.27 0-2.49.2-3.64.57l2.17 2.17c.47-.14.96-.24 1.47-.24zM3.42 2.45L2.01 3.87l2.68 2.68C3.06 7.83 1.77 9.53 1 11.5 2.73 15.89 7 19 12 19c1.52 0 2.97-.3 4.31-.82l3.43 3.43 1.41-1.41L3.42 2.45zM12 16.5c-2.76 0-5-2.24-5-5 0-.77.18-1.5.49-2.14l1.57 1.57c-.03.18-.06.37-.06.57 0 1.66 1.34 3 3 3 .2 0 .38-.03.57-.07L14.14 16c-.65.32-1.37.5-2.14.5zm2.97-5.33c-.15-1.4-1.25-2.49-2.64-2.64l2.64 2.64z"/></svg>
</svg>
}
}
}
| 41.217391 | 695 | 0.59019 |
69e55c972785bff3743400e3ad9aa4b1d5a98b9c | 1,962 | #[macro_use]
extern crate lua;
mod lib;
use lua::{Error, Index};
use lib::{example, vector};
use std::{env, fs, io};
fn main() {
let mut state = lua::State::new();
state.open_libs();
vector::load_lib("vec", &mut state).unwrap();
example::load_lib("rust", &mut state).unwrap();
// evaluate file from first argument
if let Some(file) = env::args().nth(1) {
state.eval(fs::read(file).unwrap()).unwrap();
}
splash();
let mut stdin = io::stdin();
let mut source = String::new();
loop {
let bytes_read = read_line(&mut stdin, &mut source);
match (bytes_read, state.eval(&source)) {
(_, Ok(_)) => source.clear(),
(b, Err(Error::Syntax)) if b > 0 => state.pop(1),
(_, Err(Error::Runtime)) | (_, Err(Error::Syntax)) => {
{
let error: &str = state.get(Index::TOP).unwrap();
eprintln!("ERROR: {:?}", error);
}
state.pop(1);
source.clear();
}
_ => panic!(),
}
}
}
fn read_line(stdin: &mut io::Stdin, source: &mut String) -> usize {
if source.is_empty() {
eprint!("lua> ");
} else {
eprint!(".... ");
}
let mut line = String::new();
if let Ok(b) = stdin.read_line(&mut line) {
source.push_str(line.as_str());
b - 1
} else {
0
}
}
fn splash() {
eprintln!("# Welcome to the Lua shell! (written in Rust)");
eprintln!();
eprintln!("The following Rust functions can be called from the shell:");
eprintln!(
" * rust.error() - Raises a runtime error. The error message is also formatted in rust"
);
eprintln!(" * rust.add(a, b) - Returns the sum of `a` and `b`");
eprintln!(" * rust.len(c) - Returns the length of the string `c`");
eprintln!(" * vec.new(c) - Creates a vector of capacity c");
eprintln!();
}
| 25.815789 | 96 | 0.514781 |
0eb604a783acde7c24780b46f83673cb0493912e | 8,089 | use crate::search::{Search, SearchCommand, SearchInfo, SearchResult, MAX_SEARCH_DEPTH};
use crossbeam_channel::{Receiver, Sender};
use eval::eval::{Eval, Score, CHECKMATE_BLACK, CHECKMATE_WHITE, EQUAL_POSITION, NEGATIVE_INF};
use movegen::move_generator::MoveGenerator;
use movegen::position_history::PositionHistory;
use movegen::r#move::{Move, MoveList};
use movegen::side::Side;
use movegen::transposition_table::TranspositionTable;
use movegen::zobrist::Zobrist;
use std::cmp;
use std::ops::Neg;
#[derive(Clone, Copy, Debug)]
struct NegamaxTableEntry {
depth: u8,
score: Score,
best_move: Move,
}
impl NegamaxTableEntry {
fn new(depth: usize, score: Score, best_move: Move) -> Self {
debug_assert!(depth <= MAX_SEARCH_DEPTH);
Self {
depth: depth as u8,
score,
best_move,
}
}
fn depth(&self) -> usize {
self.depth as usize
}
fn score(&self) -> Score {
self.score
}
fn best_move(&self) -> Move {
self.best_move
}
}
impl Neg for NegamaxTableEntry {
type Output = Self;
// Changes the sign of the score and leaves the rest unchanged
fn neg(self) -> Self::Output {
Self::new(self.depth(), -self.score(), self.best_move())
}
}
pub struct Negamax {
transpos_table: TranspositionTable<Zobrist, NegamaxTableEntry>,
}
impl Search for Negamax {
fn search(
&mut self,
pos_history: &mut PositionHistory,
depth: usize,
command_receiver: &mut Receiver<SearchCommand>,
info_sender: &mut Sender<SearchInfo>,
) {
for depth in 0..=depth {
if let Ok(SearchCommand::Stop) = command_receiver.try_recv() {
break;
}
match self.search_recursive(pos_history, depth, command_receiver, info_sender) {
Some(rel_negamax_res) => {
let abs_negamax_res = match pos_history.current_pos().side_to_move() {
Side::White => rel_negamax_res,
Side::Black => -rel_negamax_res,
};
let search_res = SearchResult::new(
depth,
abs_negamax_res.score(),
abs_negamax_res.best_move(),
self.principal_variation(pos_history, depth),
);
info_sender
.send(SearchInfo::DepthFinished(search_res))
.expect("Error sending SearchInfo");
if let CHECKMATE_WHITE | CHECKMATE_BLACK = abs_negamax_res.score() {
break;
}
}
None => break,
}
}
info_sender
.send(SearchInfo::Stopped)
.expect("Error sending SearchInfo");
}
}
impl Negamax {
pub fn new(table_idx_bits: usize) -> Self {
assert!(table_idx_bits > 0);
Self {
transpos_table: TranspositionTable::new(table_idx_bits),
}
}
fn principal_variation(&self, pos_history: &mut PositionHistory, depth: usize) -> MoveList {
let mut move_list = MoveList::with_capacity(depth);
let mut d = depth;
while d > 0 {
match self.transpos_table.get(&pos_history.current_pos_hash()) {
Some(entry) if entry.depth() == d && entry.best_move != Move::NULL => {
move_list.push(entry.best_move());
debug_assert!(move_list.len() <= depth);
pos_history.do_move(entry.best_move());
d -= 1;
}
_ => break,
}
}
while d < depth {
d += 1;
pos_history.undo_last_move();
}
move_list
}
fn search_recursive(
&mut self,
pos_history: &mut PositionHistory,
depth: usize,
command_receiver: &mut Receiver<SearchCommand>,
info_sender: &mut Sender<SearchInfo>,
) -> Option<NegamaxTableEntry> {
if let Ok(SearchCommand::Stop) = command_receiver.try_recv() {
return None;
}
let pos = pos_history.current_pos();
let pos_hash = pos_history.current_pos_hash();
if let Some(entry) = self.lookup_table_entry(pos_hash, depth) {
return Some(*entry);
}
let mut best_score = NEGATIVE_INF;
let mut best_move = Move::NULL;
match depth {
0 => Some(self.search_quiescence(pos_history)),
_ => {
let mut move_list = MoveList::new();
MoveGenerator::generate_moves(&mut move_list, pos);
if move_list.is_empty() {
let score = if pos.is_in_check(pos.side_to_move()) {
CHECKMATE_WHITE
} else {
EQUAL_POSITION
};
let node = NegamaxTableEntry::new(depth, score, Move::NULL);
self.update_table(pos_hash, node);
best_score = cmp::max(best_score, score);
} else {
for m in move_list.iter() {
pos_history.do_move(*m);
let opt_neg_res = self.search_recursive(
pos_history,
depth - 1,
command_receiver,
info_sender,
);
pos_history.undo_last_move();
match opt_neg_res {
Some(neg_search_res) => {
let search_result = -neg_search_res;
let score = search_result.score();
if score > best_score {
best_score = score;
best_move = *m;
}
}
None => return None,
}
let node = NegamaxTableEntry::new(depth, best_score, best_move);
self.update_table(pos_hash, node);
}
}
debug_assert!(self.transpos_table.get(&pos_hash).is_some());
let node = NegamaxTableEntry::new(depth, best_score, best_move);
self.update_table(pos_hash, node);
Some(node)
}
}
}
fn search_quiescence(&mut self, pos_history: &mut PositionHistory) -> NegamaxTableEntry {
let depth = 0;
let pos = pos_history.current_pos();
let pos_hash = pos_history.current_pos_hash();
if let Some(entry) = self.lookup_table_entry(pos_hash, depth) {
return *entry;
}
let mut score = Eval::eval_relative(pos);
let mut best_score = score;
let mut best_move = Move::NULL;
let mut move_list = MoveList::new();
MoveGenerator::generate_moves(&mut move_list, pos);
for m in move_list.iter().filter(|m| m.is_capture()) {
pos_history.do_move(*m);
let search_result = -self.search_quiescence(pos_history);
score = search_result.score();
pos_history.undo_last_move();
if score > best_score {
best_score = score;
best_move = *m;
}
}
let node = NegamaxTableEntry::new(depth, best_score, best_move);
self.update_table(pos_hash, node);
node
}
fn update_table(&mut self, pos_hash: Zobrist, node: NegamaxTableEntry) {
self.transpos_table.insert(pos_hash, node);
}
fn lookup_table_entry(&self, pos_hash: Zobrist, depth: usize) -> Option<&NegamaxTableEntry> {
match self.transpos_table.get(&pos_hash) {
Some(entry) if entry.depth() == depth => Some(entry),
_ => None,
}
}
}
| 33.987395 | 97 | 0.518853 |
2311e13cb74a41ff530acc96afb1709d96e3b897 | 13,930 | //! Peripheral Reset and Enable Control (REC)
//!
//! This module contains safe accessors to the RCC functionality for each
//! periperal.
//!
//! At a minimum each peripheral implements
//! [ResetEnable](trait.ResetEnable.html). Peripherals that have an
//! individual clock multiplexer in the PKSU also have methods
//! `kernel_clk_mux` and `get_kernel_clk_mux`. These set and get the state
//! of the kernel clock multiplexer respectively.
//!
//! Peripherals that share a clock multiplexer in the PKSU with other
//! peripherals implement a trait with a `get_kernel_clk_mux` method that
//! returns the current kernel clock state. There is currently no safe API
//! for setting these shared multiplexers.
//!
//! # Reset/Enable Example
//!
//! ```
//! // Constrain and Freeze power
//! ...
//! let rcc = dp.RCC.constrain();
//! let ccdr = rcc.sys_ck(100.mhz()).freeze(vos, &dp.SYSCFG);
//!
//! // Enable the clock to a peripheral and reset it
//! ccdr.peripheral.FDCAN.enable().reset();
//! ```
//!
//! # Kernel Clock Example
//! ```
//! let ccdr = ...; // Returned by `freeze()`, see example above
//!
//! let cec_mux_state = ccdr.peripheral.CEC.kernel_clk_mux(CecClkSel::LSI).get_kernel_clk_mux();
//!
//! assert_eq!(cec_mux_state, CecClkSel::LSI);
//!
//! // Can't set this mux because it would also affect I2C2 and I2C3
//! let i2c_mux_state = ccdr.peripheral.I2C1.get_kernel_clk_mux();
//! ```
#![deny(missing_docs)]
use core::marker::PhantomData;
use super::Rcc;
use crate::stm32::{rcc, RCC};
use cortex_m::interrupt;
/// A trait for Resetting, Enabling and Disabling a single peripheral
pub trait ResetEnable {
/// Enable this peripheral
fn enable(self) -> Self;
/// Disable this peripheral
fn disable(self) -> Self;
/// Reset this peripheral
fn reset(self) -> Self;
}
impl Rcc {
/// Returns all the peripherals resets / enables / kernel clocks.
///
/// # Use case
///
/// Allows peripherals to be reset / enabled before the calling
/// freeze. For example, the internal watchdog could be enabled to
/// issue a reset if the call the freeze hangs waiting for an external
/// clock that is stopped.
///
/// # Safety
///
/// If this method is called multiple times, or is called before the
/// [freeze](struct.Rcc.html#freeze), then multiple accesses to the
/// same memory exist.
#[inline]
pub unsafe fn steal_peripheral_rec(&self) -> PeripheralREC {
PeripheralREC::new_singleton()
}
}
macro_rules! peripheral_reset_and_enable_control {
($($AXBn:ident, $axb_doc:expr => [
$(
$( #[ $pmeta:meta ] )*
$p:ident
$([ kernel $clk:ident: $pk:ident $(($Variant:ident))* $ccip:ident $clk_doc:expr ])*
$([ group clk: $pk_g:ident $( $(($Variant_g:ident))* $ccip_g:ident $clk_doc_g:expr )* ])*
),*
];)+) => {
paste::item! {
/// Peripheral Reset and Enable Control
#[allow(non_snake_case)]
#[non_exhaustive]
pub struct PeripheralREC {
$(
$(
#[allow(missing_docs)]
$( #[ $pmeta ] )*
pub [< $p:upper >]: $p,
)*
)+
}
impl PeripheralREC {
/// Return a new instance of the peripheral resets /
/// enables / kernel clocks
///
/// # Safety
///
/// If this method is called multiple times, then multiple
/// accesses to the same memory exist.
pub(super) unsafe fn new_singleton() -> PeripheralREC {
PeripheralREC {
$(
$(
$( #[ $pmeta ] )*
[< $p:upper >]: $p {
_marker: PhantomData,
},
)*
)+
}
}
}
$(
$(
/// Owned ability to Reset, Enable and Disable peripheral
$( #[ $pmeta ] )*
pub struct $p {
pub(crate) _marker: PhantomData<*const ()>,
}
$( #[ $pmeta ] )*
unsafe impl Send for $p {}
$( #[ $pmeta ] )*
impl ResetEnable for $p {
#[inline(always)]
fn enable(self) -> Self {
// unsafe: Owned exclusive access to this bitfield
interrupt::free(|_| {
let enr = unsafe {
&(*RCC::ptr()).[< $AXBn:lower enr >]
};
enr.modify(|_, w| w.
[< $p:lower en >]().set_bit());
});
self
}
#[inline(always)]
fn disable(self) -> Self {
// unsafe: Owned exclusive access to this bitfield
interrupt::free(|_| {
let enr = unsafe {
&(*RCC::ptr()).[< $AXBn:lower enr >]
};
enr.modify(|_, w| w.
[< $p:lower en >]().clear_bit());
});
self
}
#[inline(always)]
fn reset(self) -> Self {
// unsafe: Owned exclusive access to this bitfield
interrupt::free(|_| {
let rstr = unsafe {
&(*RCC::ptr()).[< $AXBn:lower rstr >]
};
rstr.modify(|_, w| w.
[< $p:lower rst >]().set_bit());
rstr.modify(|_, w| w.
[< $p:lower rst >]().clear_bit());
});
self
}
}
$( #[ $pmeta ] )*
impl $p {
$( // Individual kernel clocks
#[inline(always)]
#[allow(unused)]
/// Modify a kernel clock for this
/// peripheral. See RM0433 Section 8.5.8.
///
/// It is possible to switch this clock
/// dynamically without generating spurs or
/// timing violations. However, the user must
/// ensure that both clocks are running. See
/// RM0433 Section 8.5.10
pub fn [< kernel_ $clk _mux >](self, sel: [< $pk ClkSel >]) -> Self {
// unsafe: Owned exclusive access to this bitfield
interrupt::free(|_| {
let ccip = unsafe {
&(*RCC::ptr()).[< $ccip r >]
};
ccip.modify(|_, w| w.
[< $pk:lower sel >]().variant(sel));
});
self
}
#[inline(always)]
#[allow(unused)]
/// Return the current kernel clock selection
pub fn [< get_kernel_ $clk _mux>](&self) ->
variant_return_type!([< $pk ClkSel >] $(, $Variant)*)
{
// unsafe: We only read from this bitfield
let ccip = unsafe {
&(*RCC::ptr()).[< $ccip r >]
};
ccip.read().[< $pk:lower sel >]().variant()
}
)*
}
$( // Group kernel clocks
impl [< $pk_g ClkSelGetter >] for $p {}
)*
$( // Individual kernel clocks
#[doc=$clk_doc]
/// kernel clock source selection
pub type [< $pk ClkSel >] =
rcc::[< $ccip r >]::[< $pk:upper SEL_A >];
)*
$( // Group kernel clocks
$(
#[doc=$clk_doc_g]
/// kernel clock source selection
pub type [< $pk_g ClkSel >] =
rcc::[< $ccip_g r >]::[< $pk_g:upper SEL_A >];
/// Can return
#[doc=$clk_doc_g]
/// kernel clock source selection
pub trait [< $pk_g ClkSelGetter >] {
#[inline(always)]
#[allow(unused)]
/// Return the
#[doc=$clk_doc_g]
/// kernel clock selection
fn get_kernel_clk_mux(&self) ->
variant_return_type!([< $pk_g ClkSel >] $(, $Variant_g)*)
{
// unsafe: We only read from this bitfield
let ccip = unsafe {
&(*RCC::ptr()).[< $ccip_g r >]
};
ccip.read().[< $pk_g:lower sel >]().variant()
}
}
)*
)*
)*
)+
}
}
}
// If the PAC does not fully specify a CCIP field (perhaps because one or
// more values are reserved), then we use a different return type
macro_rules! variant_return_type {
($t:ty) => { $t };
($t:ty, $Variant: ident) => {
stm32h7::Variant<u8, $t>
};
}
// Enumerate all peripherals and optional clock multiplexers
peripheral_reset_and_enable_control! {
AHB1, "AMBA High-performance Bus (AHB1) peripherals" => [
Eth1Mac, Dma2, Dma1,
#[cfg(any(feature = "dualcore"))] Art,
Adc12
];
AHB2, "AMBA High-performance Bus (AHB2) peripherals" => [
Hash, Crypt,
Rng [kernel clk: Rng d2ccip2 "RNG"],
Sdmmc2 [group clk: Sdmmc]
];
AHB3, "AMBA High-performance Bus (AHB3) peripherals" => [
Sdmmc1 [group clk: Sdmmc d1ccip "SDMMC"],
Qspi [kernel clk: Qspi d1ccip "QUADSPI"],
Fmc [kernel clk: Fmc d1ccip "FMC"],
Jpgdec, Dma2d, Mdma
];
AHB4, "AMBA High-performance Bus (AHB4) peripherals" => [
Hsem, Bdma, Crc, Adc3,
Gpioa, Gpiob, Gpioc, Gpiod, Gpioe, Gpiof, Gpiog, Gpioh, Gpioi, Gpioj, Gpiok
];
APB1L, "Advanced Peripheral Bus 1L (APB1L) peripherals" => [
Dac12,
I2c1 [group clk: I2c123 d2ccip2 "I2C1/2/3"],
I2c2 [group clk: I2c123],
I2c3 [group clk: I2c123],
Cec [kernel clk: Cec(Variant) d2ccip2 "CEC"],
Lptim1 [kernel clk: Lptim1(Variant) d2ccip2 "LPTIM1"],
Spi2 [group clk: Spi123],
Spi3 [group clk: Spi123],
Tim2, Tim3, Tim4, Tim5, Tim6, Tim7, Tim12, Tim13, Tim14,
Usart2 [group clk: Usart234578(Variant) d2ccip2 "USART2/3/4/5/7/8"],
Usart3 [group clk: Usart234578],
Uart4 [group clk: Usart234578],
Uart5 [group clk: Usart234578],
Uart7 [group clk: Usart234578],
Uart8 [group clk: Usart234578]
];
APB1H, "Advanced Peripheral Bus 1H (APB1H) peripherals" => [
Fdcan [kernel clk: Fdcan(Variant) d2ccip1 "FDCAN"],
Swp [kernel clk: Swp d2ccip1 "SWPMI"],
Crs, Mdios, Opamp
];
APB2, "Advanced Peripheral Bus 2 (APB2) peripherals" => [
Hrtim,
Dfsdm1 [kernel clk: Dfsdm1 d2ccip1 "DFSDM1"],
Sai1 [kernel clk: Sai1(Variant) d2ccip1 "SAI1"],
Sai2 [group clk: Sai23(Variant) d2ccip1 "SAI2/3"],
Sai3 [group clk: Sai23],
Spi1 [group clk: Spi123(Variant) d2ccip1 "SPI1/2/3"],
Spi4 [group clk: Spi45(Variant) d2ccip1 "SPI4/5"],
Spi5 [group clk: Spi45],
Tim1, Tim8, Tim15, Tim16, Tim17,
Usart1 [group clk: Usart16(Variant) d2ccip2 "USART1/6"],
Usart6 [group clk: Usart16]
];
APB3, "Advanced Peripheral Bus 3 (APB3) peripherals" => [
Ltdc,
#[cfg(any(feature = "dsi"))] Dsi
];
APB4, "Advanced Peripheral Bus 4 (APB4) peripherals" => [
Vref, Comp12,
Lptim2 [kernel clk: Lptim2(Variant) d3ccip "LPTIM2"],
Lptim3 [group clk: Lptim345(Variant) d3ccip "LPTIM3/4/5"],
Lptim4 [group clk: Lptim345],
Lptim5 [group clk: Lptim345],
I2c4 [kernel clk: I2c4 d3ccip "I2C4"],
Spi6 [kernel clk: Spi6(Variant) d3ccip "SPI6"],
Sai4 [kernel clk_a: Sai4A(Variant) d3ccip
"Sub-Block A of SAI4"]
[kernel clk_b: Sai4B(Variant) d3ccip
"Sub-Block B of SAI4"]
];
}
| 39.129213 | 105 | 0.427207 |
1e20dca86c6f1be4b79b6af9f06e291aed1c49ab | 290 | // tests2.rs
// This test has a problem with it -- make the test compile! Make the test
// pass! Make the test fail! Execute `rustlings hint tests2` for hints :)
// I AM NOT DONE
#[cfg(test)]
mod tests {
#[test]
fn you_can_assert_eq() {
assert_eq!("test","test");
}
}
| 20.714286 | 74 | 0.62069 |
bf50c2d29095be04ba8e633b396a7f67158760eb | 6,473 | use crate::{
physics::camera::default_camera,
physics::player::PhysicsPlayer,
physics::BlockContainer,
player::{PlayerId, PlayerInput},
};
use nalgebra::Point3;
use std::{
collections::HashMap,
time::{Duration, Instant},
};
/// Input of the whole simulation.
#[derive(Debug, Clone, Default)]
pub struct Input {
pub(self) player_inputs: HashMap<PlayerId, PlayerInput>,
}
/// Physics state of the whole simulation.
#[derive(Debug, Clone, Default)]
pub struct PhysicsState {
pub players: HashMap<PlayerId, PhysicsPlayer>,
}
impl PhysicsState {
/// Step the full physics simulation.
/// For now, it just moves all connected players.
pub fn step_simulation<BC: BlockContainer>(&mut self, input: &Input, dt: Duration, world: &BC) {
let seconds_delta = dt.as_secs_f64();
for (&id, input) in input.player_inputs.iter() {
default_camera(self.get_player_mut(id), *input, seconds_delta, world);
}
// Remove players that don't exist anymore
self.players
.retain(|id, _| input.player_inputs.contains_key(id));
}
pub fn get_player(&mut self, id: PlayerId) -> &PhysicsPlayer {
self.players.entry(id).or_insert(Default::default())
}
pub fn get_player_mut(&mut self, id: PlayerId) -> &mut PhysicsPlayer {
self.players.entry(id).or_insert(Default::default())
}
}
/// A physics state sent by the server.
#[derive(Debug, Clone)]
pub struct ServerState {
pub physics_state: PhysicsState,
pub server_time: Instant,
pub input: Input,
}
/// The client's physics simulation
pub struct ClientPhysicsSimulation {
/// Previous client inputs
client_inputs: Vec<(Instant, PlayerInput)>,
/// Last state validated by the server
last_server_state: ServerState,
/// Current simulation state
current_state: PhysicsState,
/// Dirty flag: whether the physics need to be computed again starting from the last server state.
needs_recomputing: bool,
/// Id of the current player
player_id: PlayerId,
}
impl ClientPhysicsSimulation {
/// Create a new simulation from some `ServerState` and the client's id
pub fn new(server_state: ServerState, player_id: PlayerId) -> Self {
Self {
client_inputs: Vec::new(),
last_server_state: server_state.clone(),
current_state: server_state.physics_state,
needs_recomputing: false,
player_id,
}
}
/// Process a server update
pub fn receive_server_update(&mut self, state: ServerState) {
// Save state
self.last_server_state = state;
// Drop inputs anterior to this server state
let last_server_time = self.last_server_state.server_time;
self.client_inputs
.retain(|(time, _)| *time > last_server_time);
// Mark dirty
self.needs_recomputing = true;
}
/// Get the camera position of the client
pub fn get_camera_position(&self) -> Point3<f64> {
self.current_state
.players
.get(&self.player_id)
.unwrap()
.get_camera_position()
}
/// Get the client player
pub fn get_player(&mut self) -> &PhysicsPlayer {
self.current_state.get_player(self.player_id)
}
/// Step the simulation according to the current input and time
pub fn step_simulation<BC: BlockContainer>(
&mut self,
input: PlayerInput,
time: Instant,
world: &BC,
) {
// Recompute simulation if necessary
if self.needs_recomputing {
self.needs_recomputing = false;
self.current_state = self.last_server_state.physics_state.clone();
let mut previous_time = self.last_server_state.server_time;
for &(time, player_input) in self.client_inputs.iter() {
// First, we have to apply the current client input to the server's input
self.last_server_state
.input
.player_inputs
.insert(self.player_id, player_input);
// Only then can we step the simulation
self.current_state.step_simulation(
&self.last_server_state.input,
time - previous_time,
world,
);
previous_time = time;
}
}
let previous_instant = match self.client_inputs.last() {
Some((time, _)) => *time,
None => self.last_server_state.server_time,
};
// Store input for future processing
self.client_inputs.push((time, input));
self.last_server_state
.input
.player_inputs
.insert(self.player_id, input);
// Step local simulation
self.current_state.step_simulation(
&self.last_server_state.input,
time - previous_instant,
world,
);
}
}
/// The server's physics simulation
pub struct ServerPhysicsSimulation {
/// The current state of the simulation
server_state: ServerState,
}
impl ServerPhysicsSimulation {
/// Create a new simulation with no connected players starting at the current time
pub fn new() -> Self {
Self {
server_state: ServerState {
physics_state: PhysicsState::default(),
server_time: Instant::now(),
input: Default::default(),
},
}
}
/// Update the input of a player
pub fn set_player_input(&mut self, player_id: PlayerId, input: PlayerInput) {
self.server_state
.input
.player_inputs
.insert(player_id, input);
}
/// Remove a player from the simulation
pub fn remove(&mut self, player_id: PlayerId) {
self.server_state.input.player_inputs.remove(&player_id);
}
/// Step the simulation according to the current input and time
pub fn step_simulation<BC: BlockContainer>(&mut self, time: Instant, world: &BC) {
self.server_state.physics_state.step_simulation(
&self.server_state.input,
time - self.server_state.server_time,
world,
);
self.server_state.server_time = time;
}
/// Get a reference to the current state of the simulation
pub fn get_state(&self) -> &ServerState {
&self.server_state
}
}
| 31.8867 | 102 | 0.615634 |
79a307f88aae83497b6d3bd2c819df4d1d4b1721 | 2,055 | use crate::{cmd::*, result::Result, wallet::Wallet};
use helium_api::accounts::{self, Account};
use prettytable::Table;
use qr2term::print_qr;
use serde_json::json;
/// Get wallet information
#[derive(Debug, StructOpt)]
pub struct Cmd {
/// Display QR code for a given single wallet.
#[structopt(long = "qr")]
qr_code: bool,
}
impl Cmd {
pub async fn run(&self, opts: Opts) -> Result {
let wallet = load_wallet(opts.files)?;
if self.qr_code {
let address = wallet.address()?;
print_qr(&address)?;
Ok(())
} else {
let client = Client::new_with_base_url(api_url(wallet.public_key.network));
let account = accounts::get(&client, &wallet.address()?).await?;
print_wallet(&wallet, &account, opts.format)
}
}
}
fn print_wallet(wallet: &Wallet, account: &Account, format: OutputFormat) -> Result {
match format {
OutputFormat::Table => {
let mut table = Table::new();
table.add_row(row!["Key", "Value"]);
table.add_row(row!["Address", account.address]);
table.add_row(row!["Network", wallet.public_key.tag().network]);
table.add_row(row!["Type", wallet.public_key.tag().key_type]);
table.add_row(row!["Sharded", wallet.is_sharded()]);
table.add_row(row!["PwHash", wallet.pwhash()]);
table.add_row(row!["Balance", account.balance]);
table.add_row(row!["DC Balance", account.dc_balance]);
table.add_row(row!["Securities Balance", account.sec_balance]);
print_table(&table)
}
OutputFormat::Json => {
let table = json!({
"sharded": wallet.is_sharded(),
"network": wallet.public_key.tag().network.to_string(),
"type": wallet.public_key.tag().key_type.to_string(),
"pwhash": wallet.pwhash().to_string(),
"account": account,
});
print_json(&table)
}
}
}
| 36.052632 | 87 | 0.567397 |
713e97e5fae808a9db6d2f03d37a1b0ffaa8024a | 903 | //! This crate contains computational geometry algorithms for [Rust CV](https://github.com/rust-cv/).
//!
//! ## Triangulation
//!
//! In this problem we know the relative pose of cameras and the [`Bearing`] of the same feature
//! observed in each camera frame. We want to find the point of intersection from all cameras.
//!
//! - `p` the point we are trying to triangulate
//! - `a` the normalized keypoint on camera A
//! - `b` the normalized keypoint on camera B
//! - `O` the optical center of a camera
//! - `@` the virtual image plane
//!
//! ```text
//! @
//! @
//! p--------b--------O
//! / @
//! / @
//! / @
//! / @
//! @@@@@@@a@@@@@
//! /
//! /
//! /
//! O
//! ```
#![no_std]
pub mod epipolar;
pub mod triangulation;
| 27.363636 | 101 | 0.469546 |
91813e1db5209ba3554cfa211e9316f2c5405a05 | 36 |
mod serde;
mod display;
mod debug;
| 7.2 | 12 | 0.722222 |
1d3576244c4afc0d597389a18b5231e0a32fbf6d | 94,933 | //! This query borrow-checks the MIR to (further) ensure it is not broken.
use crate::borrow_check::nll::region_infer::RegionInferenceContext;
use rustc::hir::{self, HirId};
use rustc::hir::Node;
use rustc::hir::def_id::DefId;
use rustc::infer::InferCtxt;
use rustc::lint::builtin::UNUSED_MUT;
use rustc::lint::builtin::{MUTABLE_BORROW_RESERVATION_CONFLICT};
use rustc::middle::borrowck::SignalledError;
use rustc::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
use rustc::mir::{
ClearCrossCrate, Local, Location, Body, Mutability, Operand, Place, PlaceBase, PlaceElem,
PlaceRef, Static, StaticKind
};
use rustc::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
use rustc::mir::{Terminator, TerminatorKind};
use rustc::ty::query::Providers;
use rustc::ty::{self, TyCtxt};
use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, Level};
use rustc_data_structures::bit_set::BitSet;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::graph::dominators::Dominators;
use rustc_data_structures::indexed_vec::IndexVec;
use smallvec::SmallVec;
use std::collections::BTreeMap;
use std::mem;
use std::rc::Rc;
use syntax::ast::Name;
use syntax_pos::{Span, DUMMY_SP};
use crate::dataflow::indexes::{BorrowIndex, InitIndex, MoveOutIndex, MovePathIndex};
use crate::dataflow::move_paths::{HasMoveData, InitLocation, LookupResult, MoveData, MoveError};
use crate::dataflow::Borrows;
use crate::dataflow::DataflowResultsConsumer;
use crate::dataflow::FlowAtLocation;
use crate::dataflow::MoveDataParamEnv;
use crate::dataflow::{do_dataflow, DebugFormatted};
use crate::dataflow::EverInitializedPlaces;
use crate::dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
use self::borrow_set::{BorrowData, BorrowSet};
use self::flows::Flows;
use self::location::LocationTable;
use self::prefixes::PrefixSet;
use self::MutateMode::{JustWrite, WriteAndRead};
use self::mutability_errors::AccessKind;
use self::path_utils::*;
crate mod borrow_set;
mod error_reporting;
mod flows;
mod location;
mod conflict_errors;
mod move_errors;
mod mutability_errors;
mod path_utils;
crate mod place_ext;
crate mod places_conflict;
mod prefixes;
mod used_muts;
pub(crate) mod nll;
// FIXME(eddyb) perhaps move this somewhere more centrally.
#[derive(Debug)]
crate struct Upvar {
name: Name,
var_hir_id: HirId,
/// If true, the capture is behind a reference.
by_ref: bool,
mutability: Mutability,
}
pub fn provide(providers: &mut Providers<'_>) {
*providers = Providers {
mir_borrowck,
..*providers
};
}
fn mir_borrowck(tcx: TyCtxt<'_>, def_id: DefId) -> BorrowCheckResult<'_> {
let (input_body, promoted) = tcx.mir_validated(def_id);
debug!("run query mir_borrowck: {}", tcx.def_path_str(def_id));
let opt_closure_req = tcx.infer_ctxt().enter(|infcx| {
let input_body: &Body<'_> = &input_body.borrow();
let promoted: &IndexVec<_, _> = &promoted.borrow();
do_mir_borrowck(&infcx, input_body, promoted, def_id)
});
debug!("mir_borrowck done");
opt_closure_req
}
fn do_mir_borrowck<'a, 'tcx>(
infcx: &InferCtxt<'a, 'tcx>,
input_body: &Body<'tcx>,
input_promoted: &IndexVec<Promoted, Body<'tcx>>,
def_id: DefId,
) -> BorrowCheckResult<'tcx> {
debug!("do_mir_borrowck(def_id = {:?})", def_id);
let tcx = infcx.tcx;
let attributes = tcx.get_attrs(def_id);
let param_env = tcx.param_env(def_id);
let id = tcx
.hir()
.as_local_hir_id(def_id)
.expect("do_mir_borrowck: non-local DefId");
// Gather the upvars of a closure, if any.
let tables = tcx.typeck_tables_of(def_id);
let upvars: Vec<_> = tables
.upvar_list
.get(&def_id)
.into_iter()
.flat_map(|v| v.values())
.map(|upvar_id| {
let var_hir_id = upvar_id.var_path.hir_id;
let capture = tables.upvar_capture(*upvar_id);
let by_ref = match capture {
ty::UpvarCapture::ByValue => false,
ty::UpvarCapture::ByRef(..) => true,
};
let mut upvar = Upvar {
name: tcx.hir().name(var_hir_id),
var_hir_id,
by_ref,
mutability: Mutability::Not,
};
let bm = *tables.pat_binding_modes().get(var_hir_id)
.expect("missing binding mode");
if bm == ty::BindByValue(hir::MutMutable) {
upvar.mutability = Mutability::Mut;
}
upvar
})
.collect();
// Replace all regions with fresh inference variables. This
// requires first making our own copy of the MIR. This copy will
// be modified (in place) to contain non-lexical lifetimes. It
// will have a lifetime tied to the inference context.
let mut body: Body<'tcx> = input_body.clone();
let mut promoted: IndexVec<Promoted, Body<'tcx>> = input_promoted.clone();
let free_regions =
nll::replace_regions_in_mir(infcx, def_id, param_env, &mut body, &mut promoted);
let body = &body; // no further changes
let location_table = &LocationTable::new(body);
let mut errors_buffer = Vec::new();
let (move_data, move_errors): (MoveData<'tcx>, Option<Vec<(Place<'tcx>, MoveError<'tcx>)>>) =
match MoveData::gather_moves(body, tcx) {
Ok(move_data) => (move_data, None),
Err((move_data, move_errors)) => (move_data, Some(move_errors)),
};
let mdpe = MoveDataParamEnv {
move_data,
param_env,
};
let dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let mut flow_inits = FlowAtLocation::new(do_dataflow(
tcx,
body,
def_id,
&attributes,
&dead_unwinds,
MaybeInitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let locals_are_invalidated_at_exit = tcx.hir().body_owner_kind(id).is_fn_or_closure();
let borrow_set = Rc::new(BorrowSet::build(
tcx, body, locals_are_invalidated_at_exit, &mdpe.move_data));
// If we are in non-lexical mode, compute the non-lexical lifetimes.
let (regioncx, polonius_output, opt_closure_req) = nll::compute_regions(
infcx,
def_id,
free_regions,
body,
&promoted,
&upvars,
location_table,
param_env,
&mut flow_inits,
&mdpe.move_data,
&borrow_set,
&mut errors_buffer,
);
// The various `flow_*` structures can be large. We drop `flow_inits` here
// so it doesn't overlap with the others below. This reduces peak memory
// usage significantly on some benchmarks.
drop(flow_inits);
let regioncx = Rc::new(regioncx);
let flow_borrows = FlowAtLocation::new(do_dataflow(
tcx,
body,
def_id,
&attributes,
&dead_unwinds,
Borrows::new(tcx, body, param_env, regioncx.clone(), &borrow_set),
|rs, i| DebugFormatted::new(&rs.location(i)),
));
let flow_uninits = FlowAtLocation::new(do_dataflow(
tcx,
body,
def_id,
&attributes,
&dead_unwinds,
MaybeUninitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let flow_ever_inits = FlowAtLocation::new(do_dataflow(
tcx,
body,
def_id,
&attributes,
&dead_unwinds,
EverInitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().inits[i]),
));
let movable_generator = match tcx.hir().get(id) {
Node::Expr(&hir::Expr {
node: hir::ExprKind::Closure(.., Some(hir::GeneratorMovability::Static)),
..
}) => false,
_ => true,
};
let dominators = body.dominators();
let mut mbcx = MirBorrowckCtxt {
infcx,
body,
mir_def_id: def_id,
param_env,
move_data: &mdpe.move_data,
location_table,
movable_generator,
locals_are_invalidated_at_exit,
access_place_error_reported: Default::default(),
reservation_error_reported: Default::default(),
reservation_warnings: Default::default(),
move_error_reported: BTreeMap::new(),
uninitialized_error_reported: Default::default(),
errors_buffer,
// Only downgrade errors on Rust 2015 and refuse to do so on Rust 2018.
// FIXME(Centril): In Rust 1.40.0, refuse doing so on 2015 as well and
// proceed to throwing out the migration infrastructure.
disable_error_downgrading: body.span.rust_2018(),
nonlexical_regioncx: regioncx,
used_mut: Default::default(),
used_mut_upvars: SmallVec::new(),
borrow_set,
dominators,
upvars,
};
let mut state = Flows::new(
flow_borrows,
flow_uninits,
flow_ever_inits,
polonius_output,
);
if let Some(errors) = move_errors {
mbcx.report_move_errors(errors);
}
mbcx.analyze_results(&mut state); // entry point for DataflowResultsConsumer
// Convert any reservation warnings into lints.
let reservation_warnings = mem::take(&mut mbcx.reservation_warnings);
for (_, (place, span, location, bk, borrow)) in reservation_warnings {
let mut initial_diag =
mbcx.report_conflicting_borrow(location, (&place, span), bk, &borrow);
let lint_root = if let ClearCrossCrate::Set(ref vsi) = mbcx.body.source_scope_local_data {
let scope = mbcx.body.source_info(location).scope;
vsi[scope].lint_root
} else {
id
};
// Span and message don't matter; we overwrite them below anyway
let mut diag = mbcx.infcx.tcx.struct_span_lint_hir(
MUTABLE_BORROW_RESERVATION_CONFLICT, lint_root, DUMMY_SP, "");
diag.message = initial_diag.styled_message().clone();
diag.span = initial_diag.span.clone();
initial_diag.cancel();
diag.buffer(&mut mbcx.errors_buffer);
}
// For each non-user used mutable variable, check if it's been assigned from
// a user-declared local. If so, then put that local into the used_mut set.
// Note that this set is expected to be small - only upvars from closures
// would have a chance of erroneously adding non-user-defined mutable vars
// to the set.
let temporary_used_locals: FxHashSet<Local> = mbcx.used_mut.iter()
.filter(|&local| mbcx.body.local_decls[*local].is_user_variable.is_none())
.cloned()
.collect();
// For the remaining unused locals that are marked as mutable, we avoid linting any that
// were never initialized. These locals may have been removed as unreachable code; or will be
// linted as unused variables.
let unused_mut_locals = mbcx.body.mut_vars_iter()
.filter(|local| !mbcx.used_mut.contains(local))
.collect();
mbcx.gather_used_muts(temporary_used_locals, unused_mut_locals);
debug!("mbcx.used_mut: {:?}", mbcx.used_mut);
let used_mut = mbcx.used_mut;
for local in mbcx.body.mut_vars_and_args_iter().filter(|local| !used_mut.contains(local)) {
if let ClearCrossCrate::Set(ref vsi) = mbcx.body.source_scope_local_data {
let local_decl = &mbcx.body.local_decls[local];
// Skip implicit `self` argument for closures
if local.index() == 1 && tcx.is_closure(mbcx.mir_def_id) {
continue;
}
// Skip over locals that begin with an underscore or have no name
match local_decl.name {
Some(name) => if name.as_str().starts_with("_") {
continue;
},
None => continue,
}
let span = local_decl.source_info.span;
if span.desugaring_kind().is_some() {
// If the `mut` arises as part of a desugaring, we should ignore it.
continue;
}
let mut_span = tcx.sess.source_map().span_until_non_whitespace(span);
tcx.struct_span_lint_hir(
UNUSED_MUT,
vsi[local_decl.source_info.scope].lint_root,
span,
"variable does not need to be mutable",
)
.span_suggestion_short(
mut_span,
"remove this `mut`",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
}
// Buffer any move errors that we collected and de-duplicated.
for (_, (_, diag)) in mbcx.move_error_reported {
diag.buffer(&mut mbcx.errors_buffer);
}
if !mbcx.errors_buffer.is_empty() {
mbcx.errors_buffer.sort_by_key(|diag| diag.span.primary_span());
if !mbcx.disable_error_downgrading && tcx.migrate_borrowck() {
// When borrowck=migrate, check if AST-borrowck would
// error on the given code.
// rust-lang/rust#55492, rust-lang/rust#58776 check the base def id
// for errors. AST borrowck is responsible for aggregating
// `signalled_any_error` from all of the nested closures here.
let base_def_id = tcx.closure_base_def_id(def_id);
match tcx.borrowck(base_def_id).signalled_any_error {
SignalledError::NoErrorsSeen => {
// if AST-borrowck signalled no errors, then
// downgrade all the buffered MIR-borrowck errors
// to warnings.
for err in mbcx.errors_buffer.iter_mut() {
downgrade_if_error(err);
}
}
SignalledError::SawSomeError => {
// if AST-borrowck signalled a (cancelled) error,
// then we will just emit the buffered
// MIR-borrowck errors as normal.
}
}
}
for diag in mbcx.errors_buffer.drain(..) {
DiagnosticBuilder::new_diagnostic(mbcx.infcx.tcx.sess.diagnostic(), diag).emit();
}
}
let result = BorrowCheckResult {
closure_requirements: opt_closure_req,
used_mut_upvars: mbcx.used_mut_upvars,
};
debug!("do_mir_borrowck: result = {:#?}", result);
result
}
fn downgrade_if_error(diag: &mut Diagnostic) {
if diag.is_error() {
diag.level = Level::Warning;
diag.warn(
"this error has been downgraded to a warning for backwards \
compatibility with previous releases",
).warn(
"this represents potential undefined behavior in your code and \
this warning will become a hard error in the future",
).note(
"for more information, try `rustc --explain E0729`"
);
}
}
crate struct MirBorrowckCtxt<'cx, 'tcx> {
crate infcx: &'cx InferCtxt<'cx, 'tcx>,
body: &'cx Body<'tcx>,
mir_def_id: DefId,
param_env: ty::ParamEnv<'tcx>,
move_data: &'cx MoveData<'tcx>,
/// Map from MIR `Location` to `LocationIndex`; created
/// when MIR borrowck begins.
location_table: &'cx LocationTable,
movable_generator: bool,
/// This keeps track of whether local variables are free-ed when the function
/// exits even without a `StorageDead`, which appears to be the case for
/// constants.
///
/// I'm not sure this is the right approach - @eddyb could you try and
/// figure this out?
locals_are_invalidated_at_exit: bool,
/// This field keeps track of when borrow errors are reported in the access_place function
/// so that there is no duplicate reporting. This field cannot also be used for the conflicting
/// borrow errors that is handled by the `reservation_error_reported` field as the inclusion
/// of the `Span` type (while required to mute some errors) stops the muting of the reservation
/// errors.
access_place_error_reported: FxHashSet<(Place<'tcx>, Span)>,
/// This field keeps track of when borrow conflict errors are reported
/// for reservations, so that we don't report seemingly duplicate
/// errors for corresponding activations.
//
// FIXME: ideally this would be a set of `BorrowIndex`, not `Place`s,
// but it is currently inconvenient to track down the `BorrowIndex`
// at the time we detect and report a reservation error.
reservation_error_reported: FxHashSet<Place<'tcx>>,
/// Migration warnings to be reported for #56254. We delay reporting these
/// so that we can suppress the warning if there's a corresponding error
/// for the activation of the borrow.
reservation_warnings: FxHashMap<
BorrowIndex,
(Place<'tcx>, Span, Location, BorrowKind, BorrowData<'tcx>)
>,
/// This field keeps track of move errors that are to be reported for given move indicies.
///
/// There are situations where many errors can be reported for a single move out (see #53807)
/// and we want only the best of those errors.
///
/// The `report_use_of_moved_or_uninitialized` function checks this map and replaces the
/// diagnostic (if there is one) if the `Place` of the error being reported is a prefix of the
/// `Place` of the previous most diagnostic. This happens instead of buffering the error. Once
/// all move errors have been reported, any diagnostics in this map are added to the buffer
/// to be emitted.
///
/// `BTreeMap` is used to preserve the order of insertions when iterating. This is necessary
/// when errors in the map are being re-added to the error buffer so that errors with the
/// same primary span come out in a consistent order.
move_error_reported: BTreeMap<Vec<MoveOutIndex>, (PlaceRef<'cx, 'tcx>, DiagnosticBuilder<'cx>)>,
/// This field keeps track of errors reported in the checking of uninitialized variables,
/// so that we don't report seemingly duplicate errors.
uninitialized_error_reported: FxHashSet<PlaceRef<'cx, 'tcx>>,
/// Errors to be reported buffer
errors_buffer: Vec<Diagnostic>,
/// If there are no errors reported by the HIR borrow checker, we downgrade
/// all NLL errors to warnings. Setting this flag disables downgrading.
disable_error_downgrading: bool,
/// This field keeps track of all the local variables that are declared mut and are mutated.
/// Used for the warning issued by an unused mutable local variable.
used_mut: FxHashSet<Local>,
/// If the function we're checking is a closure, then we'll need to report back the list of
/// mutable upvars that have been used. This field keeps track of them.
used_mut_upvars: SmallVec<[Field; 8]>,
/// Non-lexical region inference context, if NLL is enabled. This
/// contains the results from region inference and lets us e.g.
/// find out which CFG points are contained in each borrow region.
nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
/// The set of borrows extracted from the MIR
borrow_set: Rc<BorrowSet<'tcx>>,
/// Dominators for MIR
dominators: Dominators<BasicBlock>,
/// Information about upvars not necessarily preserved in types or MIR
upvars: Vec<Upvar>,
}
// Check that:
// 1. assignments are always made to mutable locations (FIXME: does that still really go here?)
// 2. loans made in overlapping scopes do not conflict
// 3. assignments do not affect things loaned out as immutable
// 4. moves do not affect things loaned out in any way
impl<'cx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'tcx> {
type FlowState = Flows<'cx, 'tcx>;
fn body(&self) -> &'cx Body<'tcx> {
self.body
}
fn visit_block_entry(&mut self, bb: BasicBlock, flow_state: &Self::FlowState) {
debug!("MirBorrowckCtxt::process_block({:?}): {}", bb, flow_state);
}
fn visit_statement_entry(
&mut self,
location: Location,
stmt: &'cx Statement<'tcx>,
flow_state: &Self::FlowState,
) {
debug!(
"MirBorrowckCtxt::process_statement({:?}, {:?}): {}",
location, stmt, flow_state
);
let span = stmt.source_info.span;
self.check_activations(location, span, flow_state);
match stmt.kind {
StatementKind::Assign(box(ref lhs, ref rhs)) => {
self.consume_rvalue(
location,
(rhs, span),
flow_state,
);
self.mutate_place(
location,
(lhs, span),
Shallow(None),
JustWrite,
flow_state,
);
}
StatementKind::FakeRead(_, box ref place) => {
// Read for match doesn't access any memory and is used to
// assert that a place is safe and live. So we don't have to
// do any checks here.
//
// FIXME: Remove check that the place is initialized. This is
// needed for now because matches don't have never patterns yet.
// So this is the only place we prevent
// let x: !;
// match x {};
// from compiling.
self.check_if_path_or_subpath_is_moved(
location,
InitializationRequiringAction::Use,
(place.as_ref(), span),
flow_state,
);
}
StatementKind::SetDiscriminant {
ref place,
variant_index: _,
} => {
self.mutate_place(
location,
(place, span),
Shallow(None),
JustWrite,
flow_state,
);
}
StatementKind::InlineAsm(ref asm) => {
for (o, output) in asm.asm.outputs.iter().zip(asm.outputs.iter()) {
if o.is_indirect {
// FIXME(eddyb) indirect inline asm outputs should
// be encoded through MIR place derefs instead.
self.access_place(
location,
(output, o.span),
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state,
);
self.check_if_path_or_subpath_is_moved(
location,
InitializationRequiringAction::Use,
(output.as_ref(), o.span),
flow_state,
);
} else {
self.mutate_place(
location,
(output, o.span),
if o.is_rw { Deep } else { Shallow(None) },
if o.is_rw { WriteAndRead } else { JustWrite },
flow_state,
);
}
}
for (_, input) in asm.inputs.iter() {
self.consume_operand(location, (input, span), flow_state);
}
}
StatementKind::Nop
| StatementKind::AscribeUserType(..)
| StatementKind::Retag { .. }
| StatementKind::StorageLive(..) => {
// `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
// to borrow check.
}
StatementKind::StorageDead(local) => {
self.access_place(
location,
(&Place::from(local), span),
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state,
);
}
}
}
fn visit_terminator_entry(
&mut self,
location: Location,
term: &'cx Terminator<'tcx>,
flow_state: &Self::FlowState,
) {
let loc = location;
debug!(
"MirBorrowckCtxt::process_terminator({:?}, {:?}): {}",
location, term, flow_state
);
let span = term.source_info.span;
self.check_activations(location, span, flow_state);
match term.kind {
TerminatorKind::SwitchInt {
ref discr,
switch_ty: _,
values: _,
targets: _,
} => {
self.consume_operand(loc, (discr, span), flow_state);
}
TerminatorKind::Drop {
location: ref drop_place,
target: _,
unwind: _,
} => {
let gcx = self.infcx.tcx.global_tcx();
// Compute the type with accurate region information.
let drop_place_ty = drop_place.ty(self.body, self.infcx.tcx);
// Erase the regions.
let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty;
// "Lift" into the gcx -- once regions are erased, this type should be in the
// global arenas; this "lift" operation basically just asserts that is true, but
// that is useful later.
gcx.lift_to_global(&drop_place_ty).unwrap();
debug!("visit_terminator_drop \
loc: {:?} term: {:?} drop_place: {:?} drop_place_ty: {:?} span: {:?}",
loc, term, drop_place, drop_place_ty, span);
self.access_place(
loc,
(drop_place, span),
(AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state,
);
}
TerminatorKind::DropAndReplace {
location: ref drop_place,
value: ref new_value,
target: _,
unwind: _,
} => {
self.mutate_place(
loc,
(drop_place, span),
Deep,
JustWrite,
flow_state,
);
self.consume_operand(
loc,
(new_value, span),
flow_state,
);
}
TerminatorKind::Call {
ref func,
ref args,
ref destination,
cleanup: _,
from_hir_call: _,
} => {
self.consume_operand(loc, (func, span), flow_state);
for arg in args {
self.consume_operand(
loc,
(arg, span),
flow_state,
);
}
if let Some((ref dest, _ /*bb*/)) = *destination {
self.mutate_place(
loc,
(dest, span),
Deep,
JustWrite,
flow_state,
);
}
}
TerminatorKind::Assert {
ref cond,
expected: _,
ref msg,
target: _,
cleanup: _,
} => {
self.consume_operand(loc, (cond, span), flow_state);
use rustc::mir::interpret::PanicInfo;
if let PanicInfo::BoundsCheck { ref len, ref index } = *msg {
self.consume_operand(loc, (len, span), flow_state);
self.consume_operand(loc, (index, span), flow_state);
}
}
TerminatorKind::Yield {
ref value,
resume: _,
drop: _,
} => {
self.consume_operand(loc, (value, span), flow_state);
if self.movable_generator {
// Look for any active borrows to locals
let borrow_set = self.borrow_set.clone();
flow_state.with_outgoing_borrows(|borrows| {
for i in borrows {
let borrow = &borrow_set[i];
self.check_for_local_borrow(borrow, span);
}
});
}
}
TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
// Returning from the function implicitly kills storage for all locals and statics.
// Often, the storage will already have been killed by an explicit
// StorageDead, but we don't always emit those (notably on unwind paths),
// so this "extra check" serves as a kind of backup.
let borrow_set = self.borrow_set.clone();
flow_state.with_outgoing_borrows(|borrows| {
for i in borrows {
let borrow = &borrow_set[i];
self.check_for_invalidation_at_exit(loc, borrow, span);
}
});
}
TerminatorKind::Goto { target: _ }
| TerminatorKind::Abort
| TerminatorKind::Unreachable
| TerminatorKind::FalseEdges {
real_target: _,
imaginary_target: _,
}
| TerminatorKind::FalseUnwind {
real_target: _,
unwind: _,
} => {
// no data used, thus irrelevant to borrowck
}
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum MutateMode {
JustWrite,
WriteAndRead,
}
use self::ReadOrWrite::{Activation, Read, Reservation, Write};
use self::AccessDepth::{Deep, Shallow};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ArtificialField {
ArrayLength,
ShallowBorrow,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum AccessDepth {
/// From the RFC: "A *shallow* access means that the immediate
/// fields reached at P are accessed, but references or pointers
/// found within are not dereferenced. Right now, the only access
/// that is shallow is an assignment like `x = ...;`, which would
/// be a *shallow write* of `x`."
Shallow(Option<ArtificialField>),
/// From the RFC: "A *deep* access means that all data reachable
/// through the given place may be invalidated or accesses by
/// this action."
Deep,
/// Access is Deep only when there is a Drop implementation that
/// can reach the data behind the reference.
Drop,
}
/// Kind of access to a value: read or write
/// (For informational purposes only)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ReadOrWrite {
/// From the RFC: "A *read* means that the existing data may be
/// read, but will not be changed."
Read(ReadKind),
/// From the RFC: "A *write* means that the data may be mutated to
/// new values or otherwise invalidated (for example, it could be
/// de-initialized, as in a move operation).
Write(WriteKind),
/// For two-phase borrows, we distinguish a reservation (which is treated
/// like a Read) from an activation (which is treated like a write), and
/// each of those is furthermore distinguished from Reads/Writes above.
Reservation(WriteKind),
Activation(WriteKind, BorrowIndex),
}
/// Kind of read access to a value
/// (For informational purposes only)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ReadKind {
Borrow(BorrowKind),
Copy,
}
/// Kind of write access to a value
/// (For informational purposes only)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum WriteKind {
StorageDeadOrDrop,
MutableBorrow(BorrowKind),
Mutate,
Move,
}
/// When checking permissions for a place access, this flag is used to indicate that an immutable
/// local place can be mutated.
//
// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications:
// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()`.
// - Split `is_mutable()` into `is_assignable()` (can be directly assigned) and
// `is_declared_mutable()`.
// - Take flow state into consideration in `is_assignable()` for local variables.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum LocalMutationIsAllowed {
Yes,
/// We want use of immutable upvars to cause a "write to immutable upvar"
/// error, not an "reassignment" error.
ExceptUpvars,
No,
}
#[derive(Copy, Clone, Debug)]
enum InitializationRequiringAction {
Update,
Borrow,
MatchOn,
Use,
Assignment,
PartialAssignment,
}
struct RootPlace<'d, 'tcx> {
place_base: &'d PlaceBase<'tcx>,
place_projection: &'d [PlaceElem<'tcx>],
is_local_mutation_allowed: LocalMutationIsAllowed,
}
impl InitializationRequiringAction {
fn as_noun(self) -> &'static str {
match self {
InitializationRequiringAction::Update => "update",
InitializationRequiringAction::Borrow => "borrow",
InitializationRequiringAction::MatchOn => "use", // no good noun
InitializationRequiringAction::Use => "use",
InitializationRequiringAction::Assignment => "assign",
InitializationRequiringAction::PartialAssignment => "assign to part",
}
}
fn as_verb_in_past_tense(self) -> &'static str {
match self {
InitializationRequiringAction::Update => "updated",
InitializationRequiringAction::Borrow => "borrowed",
InitializationRequiringAction::MatchOn => "matched on",
InitializationRequiringAction::Use => "used",
InitializationRequiringAction::Assignment => "assigned",
InitializationRequiringAction::PartialAssignment => "partially assigned",
}
}
}
impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
/// If there are no errors reported by the HIR borrow checker, we downgrade
/// all NLL errors to warnings. Calling this disables downgrading.
crate fn disable_error_downgrading(&mut self) {
self.disable_error_downgrading = true;
}
/// Checks an access to the given place to see if it is allowed. Examines the set of borrows
/// that are in scope, as well as which paths have been initialized, to ensure that (a) the
/// place is initialized and (b) it is not borrowed in some way that would prevent this
/// access.
///
/// Returns `true` if an error is reported.
fn access_place(
&mut self,
location: Location,
place_span: (&Place<'tcx>, Span),
kind: (AccessDepth, ReadOrWrite),
is_local_mutation_allowed: LocalMutationIsAllowed,
flow_state: &Flows<'cx, 'tcx>,
) {
let (sd, rw) = kind;
if let Activation(_, borrow_index) = rw {
if self.reservation_error_reported.contains(&place_span.0) {
debug!(
"skipping access_place for activation of invalid reservation \
place: {:?} borrow_index: {:?}",
place_span.0, borrow_index
);
return;
}
}
// Check is_empty() first because it's the common case, and doing that
// way we avoid the clone() call.
if !self.access_place_error_reported.is_empty() &&
self
.access_place_error_reported
.contains(&(place_span.0.clone(), place_span.1))
{
debug!(
"access_place: suppressing error place_span=`{:?}` kind=`{:?}`",
place_span, kind
);
return;
}
let mutability_error =
self.check_access_permissions(
place_span,
rw,
is_local_mutation_allowed,
flow_state,
location,
);
let conflict_error =
self.check_access_for_conflict(location, place_span, sd, rw, flow_state);
if let (Activation(_, borrow_idx), true) = (kind.1, conflict_error) {
// Suppress this warning when there's an error being emited for the
// same borrow: fixing the error is likely to fix the warning.
self.reservation_warnings.remove(&borrow_idx);
}
if conflict_error || mutability_error {
debug!(
"access_place: logging error place_span=`{:?}` kind=`{:?}`",
place_span, kind
);
self.access_place_error_reported
.insert((place_span.0.clone(), place_span.1));
}
}
fn check_access_for_conflict(
&mut self,
location: Location,
place_span: (&Place<'tcx>, Span),
sd: AccessDepth,
rw: ReadOrWrite,
flow_state: &Flows<'cx, 'tcx>,
) -> bool {
debug!(
"check_access_for_conflict(location={:?}, place_span={:?}, sd={:?}, rw={:?})",
location, place_span, sd, rw,
);
let mut error_reported = false;
let tcx = self.infcx.tcx;
let body = self.body;
let param_env = self.param_env;
let location_table = self.location_table.start_index(location);
let borrow_set = self.borrow_set.clone();
each_borrow_involving_path(
self,
tcx,
param_env,
body,
location,
(sd, place_span.0),
&borrow_set,
flow_state.borrows_in_scope(location_table),
|this, borrow_index, borrow| match (rw, borrow.kind) {
// Obviously an activation is compatible with its own
// reservation (or even prior activating uses of same
// borrow); so don't check if they interfere.
//
// NOTE: *reservations* do conflict with themselves;
// thus aren't injecting unsoundenss w/ this check.)
(Activation(_, activating), _) if activating == borrow_index => {
debug!(
"check_access_for_conflict place_span: {:?} sd: {:?} rw: {:?} \
skipping {:?} b/c activation of same borrow_index",
place_span,
sd,
rw,
(borrow_index, borrow),
);
Control::Continue
}
(Read(_), BorrowKind::Shared)
| (Read(_), BorrowKind::Shallow)
| (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Unique)
| (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Mut { .. }) => {
Control::Continue
}
(Write(WriteKind::Move), BorrowKind::Shallow) => {
// Handled by initialization checks.
Control::Continue
}
(Read(kind), BorrowKind::Unique) | (Read(kind), BorrowKind::Mut { .. }) => {
// Reading from mere reservations of mutable-borrows is OK.
if !is_active(&this.dominators, borrow, location) {
assert!(allow_two_phase_borrow(borrow.kind));
return Control::Continue;
}
error_reported = true;
match kind {
ReadKind::Copy => {
this.report_use_while_mutably_borrowed(location, place_span, borrow)
.buffer(&mut this.errors_buffer);
}
ReadKind::Borrow(bk) => {
this.report_conflicting_borrow(location, place_span, bk, borrow)
.buffer(&mut this.errors_buffer);
}
}
Control::Break
}
(Reservation(WriteKind::MutableBorrow(bk)), BorrowKind::Shallow)
| (Reservation(WriteKind::MutableBorrow(bk)), BorrowKind::Shared) if {
tcx.migrate_borrowck() && this.borrow_set.location_map.contains_key(&location)
} => {
let bi = this.borrow_set.location_map[&location];
debug!(
"recording invalid reservation of place: {:?} with \
borrow index {:?} as warning",
place_span.0,
bi,
);
// rust-lang/rust#56254 - This was previously permitted on
// the 2018 edition so we emit it as a warning. We buffer
// these sepately so that we only emit a warning if borrow
// checking was otherwise successful.
this.reservation_warnings.insert(
bi,
(place_span.0.clone(), place_span.1, location, bk, borrow.clone()),
);
// Don't suppress actual errors.
Control::Continue
}
(Reservation(kind), _)
| (Activation(kind, _), _)
| (Write(kind), _) => {
match rw {
Reservation(..) => {
debug!(
"recording invalid reservation of \
place: {:?}",
place_span.0
);
this.reservation_error_reported.insert(place_span.0.clone());
}
Activation(_, activating) => {
debug!(
"observing check_place for activation of \
borrow_index: {:?}",
activating
);
}
Read(..) | Write(..) => {}
}
error_reported = true;
match kind {
WriteKind::MutableBorrow(bk) => {
this.report_conflicting_borrow(location, place_span, bk, borrow)
.buffer(&mut this.errors_buffer);
}
WriteKind::StorageDeadOrDrop => {
this.report_borrowed_value_does_not_live_long_enough(
location,
borrow,
place_span,
Some(kind))
}
WriteKind::Mutate => {
this.report_illegal_mutation_of_borrowed(location, place_span, borrow)
}
WriteKind::Move => {
this.report_move_out_while_borrowed(location, place_span, borrow)
}
}
Control::Break
}
},
);
error_reported
}
fn mutate_place(
&mut self,
location: Location,
place_span: (&'cx Place<'tcx>, Span),
kind: AccessDepth,
mode: MutateMode,
flow_state: &Flows<'cx, 'tcx>,
) {
// Write of P[i] or *P, or WriteAndRead of any P, requires P init'd.
match mode {
MutateMode::WriteAndRead => {
self.check_if_path_or_subpath_is_moved(
location,
InitializationRequiringAction::Update,
(place_span.0.as_ref(), place_span.1),
flow_state,
);
}
MutateMode::JustWrite => {
self.check_if_assigned_path_is_moved(location, place_span, flow_state);
}
}
// Special case: you can assign a immutable local variable
// (e.g., `x = ...`) so long as it has never been initialized
// before (at this point in the flow).
if let Place {
base: PlaceBase::Local(local),
projection: box [],
} = place_span.0 {
if let Mutability::Not = self.body.local_decls[*local].mutability {
// check for reassignments to immutable local variables
self.check_if_reassignment_to_immutable_state(
location,
*local,
place_span,
flow_state,
);
return;
}
}
// Otherwise, use the normal access permission rules.
self.access_place(
location,
place_span,
(kind, Write(WriteKind::Mutate)),
LocalMutationIsAllowed::No,
flow_state,
);
}
fn consume_rvalue(
&mut self,
location: Location,
(rvalue, span): (&'cx Rvalue<'tcx>, Span),
flow_state: &Flows<'cx, 'tcx>,
) {
match *rvalue {
Rvalue::Ref(_ /*rgn*/, bk, ref place) => {
let access_kind = match bk {
BorrowKind::Shallow => {
(Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
},
BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
BorrowKind::Unique | BorrowKind::Mut { .. } => {
let wk = WriteKind::MutableBorrow(bk);
if allow_two_phase_borrow(bk) {
(Deep, Reservation(wk))
} else {
(Deep, Write(wk))
}
}
};
self.access_place(
location,
(place, span),
access_kind,
LocalMutationIsAllowed::No,
flow_state,
);
let action = if bk == BorrowKind::Shallow {
InitializationRequiringAction::MatchOn
} else {
InitializationRequiringAction::Borrow
};
self.check_if_path_or_subpath_is_moved(
location,
action,
(place.as_ref(), span),
flow_state,
);
}
Rvalue::Use(ref operand)
| Rvalue::Repeat(ref operand, _)
| Rvalue::UnaryOp(_ /*un_op*/, ref operand)
| Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/) => {
self.consume_operand(location, (operand, span), flow_state)
}
Rvalue::Len(ref place) | Rvalue::Discriminant(ref place) => {
let af = match *rvalue {
Rvalue::Len(..) => Some(ArtificialField::ArrayLength),
Rvalue::Discriminant(..) => None,
_ => unreachable!(),
};
self.access_place(
location,
(place, span),
(Shallow(af), Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state,
);
self.check_if_path_or_subpath_is_moved(
location,
InitializationRequiringAction::Use,
(place.as_ref(), span),
flow_state,
);
}
Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
| Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
self.consume_operand(location, (operand1, span), flow_state);
self.consume_operand(location, (operand2, span), flow_state);
}
Rvalue::NullaryOp(_op, _ty) => {
// nullary ops take no dynamic input; no borrowck effect.
//
// FIXME: is above actually true? Do we want to track
// the fact that uninitialized data can be created via
// `NullOp::Box`?
}
Rvalue::Aggregate(ref aggregate_kind, ref operands) => {
// We need to report back the list of mutable upvars that were
// moved into the closure and subsequently used by the closure,
// in order to populate our used_mut set.
match **aggregate_kind {
AggregateKind::Closure(def_id, _)
| AggregateKind::Generator(def_id, _, _) => {
let BorrowCheckResult {
used_mut_upvars, ..
} = self.infcx.tcx.mir_borrowck(def_id);
debug!("{:?} used_mut_upvars={:?}", def_id, used_mut_upvars);
for field in used_mut_upvars {
self.propagate_closure_used_mut_upvar(&operands[field.index()]);
}
}
AggregateKind::Adt(..)
| AggregateKind::Array(..)
| AggregateKind::Tuple { .. } => (),
}
for operand in operands {
self.consume_operand(location, (operand, span), flow_state);
}
}
}
}
fn propagate_closure_used_mut_upvar(&mut self, operand: &Operand<'tcx>) {
let propagate_closure_used_mut_place = |this: &mut Self, place: &Place<'tcx>| {
if !place.projection.is_empty() {
if let Some(field) = this.is_upvar_field_projection(place.as_ref()) {
this.used_mut_upvars.push(field);
}
} else if let PlaceBase::Local(local) = place.base {
this.used_mut.insert(local);
}
};
// This relies on the current way that by-value
// captures of a closure are copied/moved directly
// when generating MIR.
match *operand {
Operand::Move(Place {
base: PlaceBase::Local(local),
projection: box [],
}) |
Operand::Copy(Place {
base: PlaceBase::Local(local),
projection: box [],
}) if self.body.local_decls[local].is_user_variable.is_none() => {
if self.body.local_decls[local].ty.is_mutable_ptr() {
// The variable will be marked as mutable by the borrow.
return;
}
// This is an edge case where we have a `move` closure
// inside a non-move closure, and the inner closure
// contains a mutation:
//
// let mut i = 0;
// || { move || { i += 1; }; };
//
// In this case our usual strategy of assuming that the
// variable will be captured by mutable reference is
// wrong, since `i` can be copied into the inner
// closure from a shared reference.
//
// As such we have to search for the local that this
// capture comes from and mark it as being used as mut.
let temp_mpi = self.move_data.rev_lookup.find_local(local);
let init = if let [init_index] = *self.move_data.init_path_map[temp_mpi] {
&self.move_data.inits[init_index]
} else {
bug!("temporary should be initialized exactly once")
};
let loc = match init.location {
InitLocation::Statement(stmt) => stmt,
_ => bug!("temporary initialized in arguments"),
};
let bbd = &self.body[loc.block];
let stmt = &bbd.statements[loc.statement_index];
debug!("temporary assigned in: stmt={:?}", stmt);
if let StatementKind::Assign(box(_, Rvalue::Ref(_, _, ref source))) = stmt.kind {
propagate_closure_used_mut_place(self, source);
} else {
bug!("closures should only capture user variables \
or references to user variables");
}
}
Operand::Move(ref place)
| Operand::Copy(ref place) => {
propagate_closure_used_mut_place(self, place);
}
Operand::Constant(..) => {}
}
}
fn consume_operand(
&mut self,
location: Location,
(operand, span): (&'cx Operand<'tcx>, Span),
flow_state: &Flows<'cx, 'tcx>,
) {
match *operand {
Operand::Copy(ref place) => {
// copy of place: check if this is "copy of frozen path"
// (FIXME: see check_loans.rs)
self.access_place(
location,
(place, span),
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state,
);
// Finally, check if path was already moved.
self.check_if_path_or_subpath_is_moved(
location,
InitializationRequiringAction::Use,
(place.as_ref(), span),
flow_state,
);
}
Operand::Move(ref place) => {
// move of place: check if this is move of already borrowed path
self.access_place(
location,
(place, span),
(Deep, Write(WriteKind::Move)),
LocalMutationIsAllowed::Yes,
flow_state,
);
// Finally, check if path was already moved.
self.check_if_path_or_subpath_is_moved(
location,
InitializationRequiringAction::Use,
(place.as_ref(), span),
flow_state,
);
}
Operand::Constant(_) => {}
}
}
/// Checks whether a borrow of this place is invalidated when the function
/// exits
fn check_for_invalidation_at_exit(
&mut self,
location: Location,
borrow: &BorrowData<'tcx>,
span: Span,
) {
debug!("check_for_invalidation_at_exit({:?})", borrow);
let place = &borrow.borrowed_place;
let root_place = self.prefixes(place.as_ref(), PrefixSet::All).last().unwrap();
// FIXME(nll-rfc#40): do more precise destructor tracking here. For now
// we just know that all locals are dropped at function exit (otherwise
// we'll have a memory leak) and assume that all statics have a destructor.
//
// FIXME: allow thread-locals to borrow other thread locals?
assert!(root_place.projection.is_empty());
let (might_be_alive, will_be_dropped) = match root_place.base {
PlaceBase::Static(box Static {
kind: StaticKind::Promoted(..),
..
}) => {
(true, false)
}
PlaceBase::Static(box Static {
kind: StaticKind::Static,
..
}) => {
// Thread-locals might be dropped after the function exits, but
// "true" statics will never be.
(true, self.is_place_thread_local(root_place))
}
PlaceBase::Local(_) => {
// Locals are always dropped at function exit, and if they
// have a destructor it would've been called already.
(false, self.locals_are_invalidated_at_exit)
}
};
if !will_be_dropped {
debug!(
"place_is_invalidated_at_exit({:?}) - won't be dropped",
place
);
return;
}
let sd = if might_be_alive { Deep } else { Shallow(None) };
if places_conflict::borrow_conflicts_with_place(
self.infcx.tcx,
self.param_env,
self.body,
place,
borrow.kind,
root_place,
sd,
places_conflict::PlaceConflictBias::Overlap,
) {
debug!("check_for_invalidation_at_exit({:?}): INVALID", place);
// FIXME: should be talking about the region lifetime instead
// of just a span here.
let span = self.infcx.tcx.sess.source_map().end_point(span);
self.report_borrowed_value_does_not_live_long_enough(
location,
borrow,
(place, span),
None,
)
}
}
/// Reports an error if this is a borrow of local data.
/// This is called for all Yield statements on movable generators
fn check_for_local_borrow(&mut self, borrow: &BorrowData<'tcx>, yield_span: Span) {
debug!("check_for_local_borrow({:?})", borrow);
if borrow_of_local_data(&borrow.borrowed_place) {
let err = self.cannot_borrow_across_generator_yield(
self.retrieve_borrow_spans(borrow).var_or_use(),
yield_span,
);
err.buffer(&mut self.errors_buffer);
}
}
fn check_activations(&mut self, location: Location, span: Span, flow_state: &Flows<'cx, 'tcx>) {
// Two-phase borrow support: For each activation that is newly
// generated at this statement, check if it interferes with
// another borrow.
let borrow_set = self.borrow_set.clone();
for &borrow_index in borrow_set.activations_at_location(location) {
let borrow = &borrow_set[borrow_index];
// only mutable borrows should be 2-phase
assert!(match borrow.kind {
BorrowKind::Shared | BorrowKind::Shallow => false,
BorrowKind::Unique | BorrowKind::Mut { .. } => true,
});
self.access_place(
location,
(&borrow.borrowed_place, span),
(
Deep,
Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index),
),
LocalMutationIsAllowed::No,
flow_state,
);
// We do not need to call `check_if_path_or_subpath_is_moved`
// again, as we already called it when we made the
// initial reservation.
}
}
}
impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
fn check_if_reassignment_to_immutable_state(
&mut self,
location: Location,
local: Local,
place_span: (&Place<'tcx>, Span),
flow_state: &Flows<'cx, 'tcx>,
) {
debug!("check_if_reassignment_to_immutable_state({:?})", local);
// Check if any of the initializiations of `local` have happened yet:
if let Some(init_index) = self.is_local_ever_initialized(local, flow_state) {
// And, if so, report an error.
let init = &self.move_data.inits[init_index];
let span = init.span(&self.body);
self.report_illegal_reassignment(
location, place_span, span, place_span.0
);
}
}
fn check_if_full_path_is_moved(
&mut self,
location: Location,
desired_action: InitializationRequiringAction,
place_span: (PlaceRef<'cx, 'tcx>, Span),
flow_state: &Flows<'cx, 'tcx>,
) {
let maybe_uninits = &flow_state.uninits;
// Bad scenarios:
//
// 1. Move of `a.b.c`, use of `a.b.c`
// 2. Move of `a.b.c`, use of `a.b.c.d` (without first reinitializing `a.b.c.d`)
// 3. Uninitialized `(a.b.c: &_)`, use of `*a.b.c`; note that with
// partial initialization support, one might have `a.x`
// initialized but not `a.b`.
//
// OK scenarios:
//
// 4. Move of `a.b.c`, use of `a.b.d`
// 5. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
// 6. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
// must have been initialized for the use to be sound.
// 7. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
// The dataflow tracks shallow prefixes distinctly (that is,
// field-accesses on P distinctly from P itself), in order to
// track substructure initialization separately from the whole
// structure.
//
// E.g., when looking at (*a.b.c).d, if the closest prefix for
// which we have a MovePath is `a.b`, then that means that the
// initialization state of `a.b` is all we need to inspect to
// know if `a.b.c` is valid (and from that we infer that the
// dereference and `.d` access is also valid, since we assume
// `a.b.c` is assigned a reference to a initialized and
// well-formed record structure.)
// Therefore, if we seek out the *closest* prefix for which we
// have a MovePath, that should capture the initialization
// state for the place scenario.
//
// This code covers scenarios 1, 2, and 3.
debug!("check_if_full_path_is_moved place: {:?}", place_span.0);
match self.move_path_closest_to(place_span.0) {
Ok((prefix, mpi)) => {
if maybe_uninits.contains(mpi) {
self.report_use_of_moved_or_uninitialized(
location,
desired_action,
(prefix, place_span.0, place_span.1),
mpi,
);
return; // don't bother finding other problems.
}
}
Err(NoMovePathFound::ReachedStatic) => {
// Okay: we do not build MoveData for static variables
} // Only query longest prefix with a MovePath, not further
// ancestors; dataflow recurs on children when parents
// move (to support partial (re)inits).
//
// (I.e., querying parents breaks scenario 7; but may want
// to do such a query based on partial-init feature-gate.)
}
}
fn check_if_path_or_subpath_is_moved(
&mut self,
location: Location,
desired_action: InitializationRequiringAction,
place_span: (PlaceRef<'cx, 'tcx>, Span),
flow_state: &Flows<'cx, 'tcx>,
) {
let maybe_uninits = &flow_state.uninits;
// Bad scenarios:
//
// 1. Move of `a.b.c`, use of `a` or `a.b`
// partial initialization support, one might have `a.x`
// initialized but not `a.b`.
// 2. All bad scenarios from `check_if_full_path_is_moved`
//
// OK scenarios:
//
// 3. Move of `a.b.c`, use of `a.b.d`
// 4. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
// 5. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
// must have been initialized for the use to be sound.
// 6. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
self.check_if_full_path_is_moved(location, desired_action, place_span, flow_state);
// A move of any shallow suffix of `place` also interferes
// with an attempt to use `place`. This is scenario 3 above.
//
// (Distinct from handling of scenarios 1+2+4 above because
// `place` does not interfere with suffixes of its prefixes,
// e.g., `a.b.c` does not interfere with `a.b.d`)
//
// This code covers scenario 1.
debug!("check_if_path_or_subpath_is_moved place: {:?}", place_span.0);
if let Some(mpi) = self.move_path_for_place(place_span.0) {
if let Some(child_mpi) = maybe_uninits.has_any_child_of(mpi) {
self.report_use_of_moved_or_uninitialized(
location,
desired_action,
(place_span.0, place_span.0, place_span.1),
child_mpi,
);
return; // don't bother finding other problems.
}
}
}
/// Currently MoveData does not store entries for all places in
/// the input MIR. For example it will currently filter out
/// places that are Copy; thus we do not track places of shared
/// reference type. This routine will walk up a place along its
/// prefixes, searching for a foundational place that *is*
/// tracked in the MoveData.
///
/// An Err result includes a tag indicated why the search failed.
/// Currently this can only occur if the place is built off of a
/// static variable, as we do not track those in the MoveData.
fn move_path_closest_to(
&mut self,
place: PlaceRef<'cx, 'tcx>,
) -> Result<(PlaceRef<'cx, 'tcx>, MovePathIndex), NoMovePathFound> {
let mut last_prefix = place.base;
for prefix in self.prefixes(place, PrefixSet::All) {
if let Some(mpi) = self.move_path_for_place(prefix) {
return Ok((prefix, mpi));
}
last_prefix = prefix.base;
}
match last_prefix {
PlaceBase::Local(_) => panic!("should have move path for every Local"),
PlaceBase::Static(_) => Err(NoMovePathFound::ReachedStatic),
}
}
fn move_path_for_place(&mut self, place: PlaceRef<'cx, 'tcx>) -> Option<MovePathIndex> {
// If returns None, then there is no move path corresponding
// to a direct owner of `place` (which means there is nothing
// that borrowck tracks for its analysis).
match self.move_data.rev_lookup.find(place) {
LookupResult::Parent(_) => None,
LookupResult::Exact(mpi) => Some(mpi),
}
}
fn check_if_assigned_path_is_moved(
&mut self,
location: Location,
(place, span): (&'cx Place<'tcx>, Span),
flow_state: &Flows<'cx, 'tcx>,
) {
debug!("check_if_assigned_path_is_moved place: {:?}", place);
// None case => assigning to `x` does not require `x` be initialized.
let mut cursor = &*place.projection;
while let [proj_base @ .., elem] = cursor {
cursor = proj_base;
match elem {
ProjectionElem::Index(_/*operand*/) |
ProjectionElem::ConstantIndex { .. } |
// assigning to P[i] requires P to be valid.
ProjectionElem::Downcast(_/*adt_def*/, _/*variant_idx*/) =>
// assigning to (P->variant) is okay if assigning to `P` is okay
//
// FIXME: is this true even if P is a adt with a dtor?
{ }
// assigning to (*P) requires P to be initialized
ProjectionElem::Deref => {
self.check_if_full_path_is_moved(
location, InitializationRequiringAction::Use,
(PlaceRef {
base: &place.base,
projection: proj_base,
}, span), flow_state);
// (base initialized; no need to
// recur further)
break;
}
ProjectionElem::Subslice { .. } => {
panic!("we don't allow assignments to subslices, location: {:?}",
location);
}
ProjectionElem::Field(..) => {
// if type of `P` has a dtor, then
// assigning to `P.f` requires `P` itself
// be already initialized
let tcx = self.infcx.tcx;
let base_ty = Place::ty_from(&place.base, proj_base, self.body, tcx).ty;
match base_ty.sty {
ty::Adt(def, _) if def.has_dtor(tcx) => {
self.check_if_path_or_subpath_is_moved(
location, InitializationRequiringAction::Assignment,
(PlaceRef {
base: &place.base,
projection: proj_base,
}, span), flow_state);
// (base initialized; no need to
// recur further)
break;
}
// Once `let s; s.x = V; read(s.x);`,
// is allowed, remove this match arm.
ty::Adt(..) | ty::Tuple(..) => {
check_parent_of_field(self, location, PlaceRef {
base: &place.base,
projection: proj_base,
}, span, flow_state);
if let PlaceBase::Local(local) = place.base {
// rust-lang/rust#21232,
// #54499, #54986: during
// period where we reject
// partial initialization, do
// not complain about
// unnecessary `mut` on an
// attempt to do a partial
// initialization.
self.used_mut.insert(local);
}
}
_ => {}
}
}
}
}
fn check_parent_of_field<'cx, 'tcx>(
this: &mut MirBorrowckCtxt<'cx, 'tcx>,
location: Location,
base: PlaceRef<'cx, 'tcx>,
span: Span,
flow_state: &Flows<'cx, 'tcx>,
) {
// rust-lang/rust#21232: Until Rust allows reads from the
// initialized parts of partially initialized structs, we
// will, starting with the 2018 edition, reject attempts
// to write to structs that are not fully initialized.
//
// In other words, *until* we allow this:
//
// 1. `let mut s; s.x = Val; read(s.x);`
//
// we will for now disallow this:
//
// 2. `let mut s; s.x = Val;`
//
// and also this:
//
// 3. `let mut s = ...; drop(s); s.x=Val;`
//
// This does not use check_if_path_or_subpath_is_moved,
// because we want to *allow* reinitializations of fields:
// e.g., want to allow
//
// `let mut s = ...; drop(s.x); s.x=Val;`
//
// This does not use check_if_full_path_is_moved on
// `base`, because that would report an error about the
// `base` as a whole, but in this scenario we *really*
// want to report an error about the actual thing that was
// moved, which may be some prefix of `base`.
// Shallow so that we'll stop at any dereference; we'll
// report errors about issues with such bases elsewhere.
let maybe_uninits = &flow_state.uninits;
// Find the shortest uninitialized prefix you can reach
// without going over a Deref.
let mut shortest_uninit_seen = None;
for prefix in this.prefixes(base, PrefixSet::Shallow) {
let mpi = match this.move_path_for_place(prefix) {
Some(mpi) => mpi, None => continue,
};
if maybe_uninits.contains(mpi) {
debug!("check_parent_of_field updating shortest_uninit_seen from {:?} to {:?}",
shortest_uninit_seen, Some((prefix, mpi)));
shortest_uninit_seen = Some((prefix, mpi));
} else {
debug!("check_parent_of_field {:?} is definitely initialized", (prefix, mpi));
}
}
if let Some((prefix, mpi)) = shortest_uninit_seen {
// Check for a reassignment into a uninitialized field of a union (for example,
// after a move out). In this case, do not report a error here. There is an
// exception, if this is the first assignment into the union (that is, there is
// no move out from an earlier location) then this is an attempt at initialization
// of the union - we should error in that case.
let tcx = this.infcx.tcx;
if let ty::Adt(def, _) =
Place::ty_from(base.base, base.projection, this.body, tcx).ty.sty
{
if def.is_union() {
if this.move_data.path_map[mpi].iter().any(|moi| {
this.move_data.moves[*moi].source.is_predecessor_of(
location, this.body,
)
}) {
return;
}
}
}
this.report_use_of_moved_or_uninitialized(
location,
InitializationRequiringAction::PartialAssignment,
(prefix, base, span),
mpi,
);
}
}
}
/// Checks the permissions for the given place and read or write kind
///
/// Returns `true` if an error is reported.
fn check_access_permissions(
&mut self,
(place, span): (&Place<'tcx>, Span),
kind: ReadOrWrite,
is_local_mutation_allowed: LocalMutationIsAllowed,
flow_state: &Flows<'cx, 'tcx>,
location: Location,
) -> bool {
debug!(
"check_access_permissions({:?}, {:?}, is_local_mutation_allowed: {:?})",
place, kind, is_local_mutation_allowed
);
let error_access;
let the_place_err;
// rust-lang/rust#21232, #54986: during period where we reject
// partial initialization, do not complain about mutability
// errors except for actual mutation (as opposed to an attempt
// to do a partial initialization).
let previously_initialized = if let PlaceBase::Local(local) = place.base {
self.is_local_ever_initialized(local, flow_state).is_some()
} else {
true
};
match kind {
Reservation(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Unique))
| Reservation(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Mut { .. }))
| Write(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Unique))
| Write(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Mut { .. })) => {
let is_local_mutation_allowed = match borrow_kind {
BorrowKind::Unique => LocalMutationIsAllowed::Yes,
BorrowKind::Mut { .. } => is_local_mutation_allowed,
BorrowKind::Shared | BorrowKind::Shallow => unreachable!(),
};
match self.is_mutable(place.as_ref(), is_local_mutation_allowed) {
Ok(root_place) => {
self.add_used_mut(root_place, flow_state);
return false;
}
Err(place_err) => {
error_access = AccessKind::MutableBorrow;
the_place_err = place_err;
}
}
}
Reservation(WriteKind::Mutate) | Write(WriteKind::Mutate) => {
match self.is_mutable(place.as_ref(), is_local_mutation_allowed) {
Ok(root_place) => {
self.add_used_mut(root_place, flow_state);
return false;
}
Err(place_err) => {
error_access = AccessKind::Mutate;
the_place_err = place_err;
}
}
}
Reservation(wk @ WriteKind::Move)
| Write(wk @ WriteKind::Move)
| Reservation(wk @ WriteKind::StorageDeadOrDrop)
| Reservation(wk @ WriteKind::MutableBorrow(BorrowKind::Shared))
| Reservation(wk @ WriteKind::MutableBorrow(BorrowKind::Shallow))
| Write(wk @ WriteKind::StorageDeadOrDrop)
| Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shared))
| Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shallow)) => {
if let (Err(place_err), true) = (
self.is_mutable(place.as_ref(), is_local_mutation_allowed),
self.errors_buffer.is_empty()
) {
if self.infcx.tcx.migrate_borrowck() {
// rust-lang/rust#46908: In pure NLL mode this
// code path should be unreachable (and thus
// we signal an ICE in the else branch
// here). But we can legitimately get here
// under borrowck=migrate mode, so instead of
// ICE'ing we instead report a legitimate
// error (which will then be downgraded to a
// warning by the migrate machinery).
error_access = match wk {
WriteKind::MutableBorrow(_) => AccessKind::MutableBorrow,
WriteKind::Move => AccessKind::Move,
WriteKind::StorageDeadOrDrop |
WriteKind::Mutate => AccessKind::Mutate,
};
self.report_mutability_error(
place,
span,
place_err,
error_access,
location,
);
} else {
span_bug!(
span,
"Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
place,
kind,
);
}
}
return false;
}
Activation(..) => {
// permission checks are done at Reservation point.
return false;
}
Read(ReadKind::Borrow(BorrowKind::Unique))
| Read(ReadKind::Borrow(BorrowKind::Mut { .. }))
| Read(ReadKind::Borrow(BorrowKind::Shared))
| Read(ReadKind::Borrow(BorrowKind::Shallow))
| Read(ReadKind::Copy) => {
// Access authorized
return false;
}
}
// at this point, we have set up the error reporting state.
return if previously_initialized {
self.report_mutability_error(
place,
span,
the_place_err,
error_access,
location,
);
true
} else {
false
};
}
fn is_local_ever_initialized(
&self,
local: Local,
flow_state: &Flows<'cx, 'tcx>,
) -> Option<InitIndex> {
let mpi = self.move_data.rev_lookup.find_local(local);
let ii = &self.move_data.init_path_map[mpi];
for &index in ii {
if flow_state.ever_inits.contains(index) {
return Some(index);
}
}
None
}
/// Adds the place into the used mutable variables set
fn add_used_mut<'d>(&mut self, root_place: RootPlace<'d, 'tcx>, flow_state: &Flows<'cx, 'tcx>) {
match root_place {
RootPlace {
place_base: PlaceBase::Local(local),
place_projection: [],
is_local_mutation_allowed,
} => {
// If the local may have been initialized, and it is now currently being
// mutated, then it is justified to be annotated with the `mut`
// keyword, since the mutation may be a possible reassignment.
if is_local_mutation_allowed != LocalMutationIsAllowed::Yes &&
self.is_local_ever_initialized(*local, flow_state).is_some()
{
self.used_mut.insert(*local);
}
}
RootPlace {
place_base: _,
place_projection: _,
is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
} => {}
RootPlace {
place_base,
place_projection: place_projection @ [.., _],
is_local_mutation_allowed: _,
} => {
if let Some(field) = self.is_upvar_field_projection(PlaceRef {
base: &place_base,
projection: &place_projection,
}) {
self.used_mut_upvars.push(field);
}
}
RootPlace {
place_base: PlaceBase::Static(..),
place_projection: [],
is_local_mutation_allowed: _,
} => {}
}
}
/// Whether this value can be written or borrowed mutably.
/// Returns the root place if the place passed in is a projection.
fn is_mutable<'d>(
&self,
place: PlaceRef<'d, 'tcx>,
is_local_mutation_allowed: LocalMutationIsAllowed,
) -> Result<RootPlace<'d, 'tcx>, PlaceRef<'d, 'tcx>> {
match place {
PlaceRef {
base: PlaceBase::Local(local),
projection: [],
} => {
let local = &self.body.local_decls[*local];
match local.mutability {
Mutability::Not => match is_local_mutation_allowed {
LocalMutationIsAllowed::Yes => Ok(RootPlace {
place_base: place.base,
place_projection: place.projection,
is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
}),
LocalMutationIsAllowed::ExceptUpvars => Ok(RootPlace {
place_base: place.base,
place_projection: place.projection,
is_local_mutation_allowed: LocalMutationIsAllowed::ExceptUpvars,
}),
LocalMutationIsAllowed::No => Err(place),
},
Mutability::Mut => Ok(RootPlace {
place_base: place.base,
place_projection: place.projection,
is_local_mutation_allowed,
}),
}
}
// The rules for promotion are made by `qualify_consts`, there wouldn't even be a
// `Place::Promoted` if the promotion weren't 100% legal. So we just forward this
PlaceRef {
base: PlaceBase::Static(box Static {
kind: StaticKind::Promoted(..),
..
}),
projection: [],
} =>
Ok(RootPlace {
place_base: place.base,
place_projection: place.projection,
is_local_mutation_allowed,
}),
PlaceRef {
base: PlaceBase::Static(box Static {
kind: StaticKind::Static,
def_id,
..
}),
projection: [],
} => {
if !self.infcx.tcx.is_mutable_static(*def_id) {
Err(place)
} else {
Ok(RootPlace {
place_base: place.base,
place_projection: place.projection,
is_local_mutation_allowed,
})
}
}
PlaceRef {
base: _,
projection: [proj_base @ .., elem],
} => {
match elem {
ProjectionElem::Deref => {
let base_ty =
Place::ty_from(place.base, proj_base, self.body, self.infcx.tcx).ty;
// Check the kind of deref to decide
match base_ty.sty {
ty::Ref(_, _, mutbl) => {
match mutbl {
// Shared borrowed data is never mutable
hir::MutImmutable => Err(place),
// Mutably borrowed data is mutable, but only if we have a
// unique path to the `&mut`
hir::MutMutable => {
let mode = match self.is_upvar_field_projection(place) {
Some(field)
if self.upvars[field.index()].by_ref =>
{
is_local_mutation_allowed
}
_ => LocalMutationIsAllowed::Yes,
};
self.is_mutable(PlaceRef {
base: place.base,
projection: proj_base,
}, mode)
}
}
}
ty::RawPtr(tnm) => {
match tnm.mutbl {
// `*const` raw pointers are not mutable
hir::MutImmutable => Err(place),
// `*mut` raw pointers are always mutable, regardless of
// context. The users have to check by themselves.
hir::MutMutable => {
Ok(RootPlace {
place_base: place.base,
place_projection: place.projection,
is_local_mutation_allowed,
})
}
}
}
// `Box<T>` owns its content, so mutable if its location is mutable
_ if base_ty.is_box() => {
self.is_mutable(PlaceRef {
base: place.base,
projection: proj_base,
}, is_local_mutation_allowed)
}
// Deref should only be for reference, pointers or boxes
_ => bug!("Deref of unexpected type: {:?}", base_ty),
}
}
// All other projections are owned by their base path, so mutable if
// base path is mutable
ProjectionElem::Field(..)
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(..) => {
let upvar_field_projection = self.is_upvar_field_projection(place);
if let Some(field) = upvar_field_projection {
let upvar = &self.upvars[field.index()];
debug!(
"upvar.mutability={:?} local_mutation_is_allowed={:?} \
place={:?}",
upvar, is_local_mutation_allowed, place
);
match (upvar.mutability, is_local_mutation_allowed) {
(Mutability::Not, LocalMutationIsAllowed::No)
| (Mutability::Not, LocalMutationIsAllowed::ExceptUpvars) => {
Err(place)
}
(Mutability::Not, LocalMutationIsAllowed::Yes)
| (Mutability::Mut, _) => {
// Subtle: this is an upvar
// reference, so it looks like
// `self.foo` -- we want to double
// check that the location `*self`
// is mutable (i.e., this is not a
// `Fn` closure). But if that
// check succeeds, we want to
// *blame* the mutability on
// `place` (that is,
// `self.foo`). This is used to
// propagate the info about
// whether mutability declarations
// are used outwards, so that we register
// the outer variable as mutable. Otherwise a
// test like this fails to record the `mut`
// as needed:
//
// ```
// fn foo<F: FnOnce()>(_f: F) { }
// fn main() {
// let var = Vec::new();
// foo(move || {
// var.push(1);
// });
// }
// ```
let _ = self.is_mutable(PlaceRef {
base: place.base,
projection: proj_base,
}, is_local_mutation_allowed)?;
Ok(RootPlace {
place_base: place.base,
place_projection: place.projection,
is_local_mutation_allowed,
})
}
}
} else {
self.is_mutable(PlaceRef {
base: place.base,
projection: proj_base,
}, is_local_mutation_allowed)
}
}
}
}
}
}
/// If `place` is a field projection, and the field is being projected from a closure type,
/// then returns the index of the field being projected. Note that this closure will always
/// be `self` in the current MIR, because that is the only time we directly access the fields
/// of a closure type.
pub fn is_upvar_field_projection(&self, place_ref: PlaceRef<'cx, 'tcx>) -> Option<Field> {
let mut place_projection = place_ref.projection;
let mut by_ref = false;
if let [proj_base @ .., ProjectionElem::Deref] = place_projection {
place_projection = proj_base;
by_ref = true;
}
match place_projection {
[base @ .., ProjectionElem::Field(field, _ty)] => {
let tcx = self.infcx.tcx;
let base_ty = Place::ty_from(place_ref.base, base, self.body, tcx).ty;
if (base_ty.is_closure() || base_ty.is_generator()) &&
(!by_ref || self.upvars[field.index()].by_ref) {
Some(*field)
} else {
None
}
}
_ => None,
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum NoMovePathFound {
ReachedStatic,
}
/// The degree of overlap between 2 places for borrow-checking.
enum Overlap {
/// The places might partially overlap - in this case, we give
/// up and say that they might conflict. This occurs when
/// different fields of a union are borrowed. For example,
/// if `u` is a union, we have no way of telling how disjoint
/// `u.a.x` and `a.b.y` are.
Arbitrary,
/// The places have the same type, and are either completely disjoint
/// or equal - i.e., they can't "partially" overlap as can occur with
/// unions. This is the "base case" on which we recur for extensions
/// of the place.
EqualOrDisjoint,
/// The places are disjoint, so we know all extensions of them
/// will also be disjoint.
Disjoint,
}
| 40.022344 | 100 | 0.507758 |
f8aba7a0f97fa77cd88449fb84770decd44854cf | 3,762 | /**
* [0648] Replace Words
*
* In English, we have a concept called root, which can be followed by some other word to form another longer word - let's call this word successor. For example, when the root "an" is followed by the successor word "other", we can form a new word "another".
* Given a dictionary consisting of many roots and a sentence consisting of words separated by spaces, replace all the successors in the sentence with the root forming it. If a successor can be replaced by more than one root, replace it with the root that has the shortest length.
* Return the sentence after the replacement.
*
* Example 1:
*
* Input: dictionary = ["cat","bat","rat"], sentence = "the cattle was rattled by the battery"
* Output: "the cat was rat by the bat"
*
* Example 2:
*
* Input: dictionary = ["a","b","c"], sentence = "aadsfasf absbs bbab cadsfafs"
* Output: "a a b c"
*
*
* Constraints:
*
* 1 <= dictionary.length <= 1000
* 1 <= dictionary[i].length <= 100
* dictionary[i] consists of only lower-case letters.
* 1 <= sentence.length <= 10^6
* sentence consists of only lower-case letters and spaces.
* The number of words in sentence is in the range [1, 1000]
* The length of each word in sentence is in the range [1, 1000]
* Every two consecutive words in sentence will be separated by exactly one space.
* sentence does not have leading or trailing spaces.
*
*/
pub struct Solution {}
// problem: https://leetcode.com/problems/replace-words/
// discuss: https://leetcode.com/problems/replace-words/discuss/?currentPage=1&orderBy=most_votes&query=
// submission codes start here
// Credit: https://leetcode.com/problems/replace-words/discuss/942708/Rust-4ms
#[derive(Default)]
pub struct Trie {
pub children: [Option<Box<Trie>>; 26],
pub is_word: bool,
}
impl Trie {
pub fn new() -> Self {
Default::default()
}
pub fn insert(&mut self, word: String) {
let mut cur = self;
for c in word.chars() {
let idx = (c as i8 - 'a' as i8) as usize;
cur = cur.children[idx].get_or_insert_with(|| Box::new(Trie::new()));
}
cur.is_word = true;
}
pub fn get_root(&self, word: &str) -> Option<String> {
let mut cur = self;
let mut chars: Vec<char> = vec![];
for c in word.chars() {
let idx = (c as i8 - 'a' as i8) as usize;
match cur.children[idx].as_ref() {
Some(a) => cur = a,
None => return None,
}
chars.push(c);
if cur.is_word {
return Some(chars.into_iter().collect());
}
}
return None;
}
}
impl Solution {
pub fn replace_words(dictionary: Vec<String>, sentence: String) -> String {
let mut trie = Trie::new();
for word in dictionary {
trie.insert(word)
}
sentence
.split(' ')
.map(|w| trie.get_root(w).unwrap_or(w.to_string()))
.collect::<Vec<String>>()
.join(" ")
}
}
// submission codes end
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_0648_example_1() {
let dictionary = vec_string!["cat", "bat", "rat"];
let sentence = "the cattle was rattled by the battery".to_string();
let result = "the cat was rat by the bat".to_string();
assert_eq!(Solution::replace_words(dictionary, sentence), result);
}
#[test]
fn test_0648_example_2() {
let dictionary = vec_string!["a", "b", "c"];
let sentence = "aadsfasf absbs bbab cadsfafs".to_string();
let result = "a a b c".to_string();
assert_eq!(Solution::replace_words(dictionary, sentence), result);
}
}
| 32.153846 | 280 | 0.607124 |
feb8026f501a871c113cd32b46038b2be9df8550 | 1,461 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// revisions: rpass1 rpass2
// compile-flags: -Zquery-dep-graph
#![feature(rustc_attrs)]
#![allow(private_no_mangle_fns)]
#![rustc_partition_codegened(module="change_symbol_export_status-mod1", cfg="rpass2")]
#![rustc_partition_reused(module="change_symbol_export_status-mod2", cfg="rpass2")]
// This test case makes sure that a change in symbol visibility is detected by
// our dependency tracking. We do this by changing a module's visibility to
// `private` in rpass2, causing the contained function to go from `default` to
// `hidden` visibility.
// The function is marked with #[no_mangle] so it is considered for exporting
// even from an executable. Plain Rust functions are only exported from Rust
// libraries, which our test infrastructure does not support.
#[cfg(rpass1)]
pub mod mod1 {
#[no_mangle]
pub fn foo() {}
}
#[cfg(rpass2)]
mod mod1 {
#[no_mangle]
pub fn foo() {}
}
pub mod mod2 {
#[no_mangle]
pub fn bar() {}
}
fn main() {
mod1::foo();
}
| 30.4375 | 86 | 0.720739 |
235563adc0980fd443a02ff0ebb55209bea73eaf | 884 | // Take a look at the license at the top of the repository in the LICENSE file.
use crate::EventType;
use glib::translate::*;
use std::fmt;
define_event! {
TouchEvent,
ffi::GdkTouchEvent,
ffi::gdk_touch_event_get_type,
&[EventType::TouchBegin, EventType::TouchUpdate, EventType::TouchEnd, EventType::TouchCancel]
}
impl TouchEvent {
#[doc(alias = "gdk_touch_event_get_emulating_pointer")]
#[doc(alias = "get_emulating_pointer")]
pub fn emulates_pointer(&self) -> bool {
unsafe {
from_glib(ffi::gdk_touch_event_get_emulating_pointer(
self.to_glib_none().0,
))
}
}
}
impl fmt::Display for TouchEvent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TouchEvent")
.field("emulating_pointer", &self.emulates_pointer())
.finish()
}
}
| 26.787879 | 97 | 0.638009 |
2fc07e2cdd855b5ef03b9a508c9166707112916e | 46,568 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::io::{ErrorKind, Read, Write};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::os::unix::net::{UnixListener, UnixStream};
use std::path::Path;
use mio::unix::SourceFd;
use mio::{Interest, Poll, Token};
use crate::common::sock_ctrl_msg::ScmSocket;
use crate::common::{Body, Version};
pub use crate::common::{ConnectionError, RequestError, ServerError};
use crate::connection::HttpConnection;
use crate::request::Request;
use crate::response::{Response, StatusCode};
static SERVER_FULL_ERROR_MESSAGE: &[u8] = b"HTTP/1.1 503\r\n\
Server: Firecracker API\r\n\
Connection: close\r\n\
Content-Length: 40\r\n\r\n{ \"error\": \"Too many open connections\" }";
const MAX_CONNECTIONS: usize = 10;
/// Payload max size
pub(crate) const MAX_PAYLOAD_SIZE: usize = 51200;
type Result<T> = std::result::Result<T, ServerError>;
/// Wrapper over `Request` which adds an identification token.
#[derive(Debug)]
pub struct ServerRequest {
/// Inner request.
pub request: Request,
/// Identification token.
id: mio::Token,
}
impl ServerRequest {
/// Creates a new `ServerRequest` object from an existing `Request`,
/// adding an identification token.
pub fn new(request: Request, id: mio::Token) -> Self {
Self { request, id }
}
/// Returns a reference to the inner request.
pub fn inner(&self) -> &Request {
&self.request
}
/// Calls the function provided on the inner request to obtain the response.
/// The response is then wrapped in a `ServerResponse`.
///
/// Returns a `ServerResponse` ready for yielding to the server
pub fn process<F>(&self, mut callable: F) -> ServerResponse
where
F: FnMut(&Request) -> Response,
{
let http_response = callable(self.inner());
ServerResponse::new(http_response, self.id)
}
}
/// Wrapper over `Response` which adds an identification token.
#[derive(Debug)]
pub struct ServerResponse {
/// Inner response.
response: Response,
/// Identification token.
id: mio::Token,
}
impl ServerResponse {
fn new(response: Response, id: mio::Token) -> Self {
Self { response, id }
}
}
/// Describes the state of the connection as far as data exchange
/// on the stream is concerned.
#[derive(PartialOrd, PartialEq)]
enum ClientConnectionState {
AwaitingIncoming,
AwaitingOutgoing,
Closed,
}
/// Wrapper over `HttpConnection` which keeps track of yielded
/// requests and absorbed responses.
struct ClientConnection<T> {
/// The `HttpConnection` object which handles data exchange.
connection: HttpConnection<T>,
/// The state of the connection in the `epoll` structure.
state: ClientConnectionState,
/// Represents the difference between yielded requests and
/// absorbed responses.
/// This has to be `0` if we want to drop the connection.
in_flight_response_count: u32,
}
// impl<T: Read + Write + ScmSocket> ClientConnection<T> {
impl<T: Read + Write + ScmSocket> ClientConnection<T> {
fn new(connection: HttpConnection<T>) -> Self {
Self {
connection,
state: ClientConnectionState::AwaitingIncoming,
in_flight_response_count: 0,
}
}
fn read(&mut self) -> Result<Vec<Request>> {
// Data came into the connection.
let mut parsed_requests = vec![];
'out: loop {
match self.connection.try_read() {
Err(ConnectionError::ConnectionClosed) => {
// Connection timeout.
self.state = ClientConnectionState::Closed;
// We don't want to propagate this to the server and we will
// return no requests and wait for the connection to become
// safe to drop.
return Ok(vec![]);
}
Err(ConnectionError::StreamReadError(inner)) => {
// Reading from the connection failed.
// We should try to write an error message regardless.
let mut internal_error_response =
Response::new(Version::Http11, StatusCode::InternalServerError);
internal_error_response.set_body(Body::new(inner.to_string()));
self.connection.enqueue_response(internal_error_response);
break;
}
Err(ConnectionError::ParseError(inner)) => {
// An error occurred while parsing the read bytes.
// Check if there are any valid parsed requests in the queue.
while let Some(_discarded_request) = self.connection.pop_parsed_request() {}
// Send an error response for the request that gave us the error.
let mut error_response = Response::new(Version::Http11, StatusCode::BadRequest);
error_response.set_body(Body::new(format!(
"{{ \"error\": \"{}\nAll previous unanswered requests will be dropped.\" }}",
inner
)));
self.connection.enqueue_response(error_response);
break;
}
Err(ConnectionError::InvalidWrite) | Err(ConnectionError::StreamWriteError(_)) => {
// This is unreachable because `HttpConnection::try_read()` cannot return this error variant.
unreachable!();
}
Ok(()) => {
if self.connection.has_parsed_requests() {
while let Some(request) = self.connection.pop_parsed_request() {
// Add all valid requests to `parsed_requests`.
parsed_requests.push(request);
}
break 'out;
}
}
}
}
self.in_flight_response_count = self
.in_flight_response_count
.checked_add(parsed_requests.len() as u32)
.ok_or(ServerError::Overflow)?;
// If the state of the connection has changed, we need to update
// the event set in the `epoll` structure.
if self.connection.pending_write() {
self.state = ClientConnectionState::AwaitingOutgoing;
}
Ok(parsed_requests)
}
fn write(&mut self) -> Result<()> {
// The stream is available for writing.
match self.connection.try_write() {
Err(ConnectionError::ConnectionClosed) | Err(ConnectionError::StreamWriteError(_)) => {
// Writing to the stream failed so it will be removed.
self.state = ClientConnectionState::Closed;
}
Err(ConnectionError::InvalidWrite) => {
// A `try_write` call was performed on a connection that has nothing
// to write.
return Err(ServerError::ConnectionError(ConnectionError::InvalidWrite));
}
_ => {
// Check if we still have bytes to write for this connection.
if !self.connection.pending_write() {
self.state = ClientConnectionState::AwaitingIncoming;
}
}
}
Ok(())
}
fn enqueue_response(&mut self, response: Response) -> Result<()> {
if self.state != ClientConnectionState::Closed {
self.connection.enqueue_response(response);
}
self.in_flight_response_count = self
.in_flight_response_count
.checked_sub(1)
.ok_or(ServerError::Underflow)?;
Ok(())
}
/// Discards all pending writes from the inner connection.
fn clear_write_buffer(&mut self) {
self.connection.clear_write_buffer();
}
// Returns `true` if the connection is closed and safe to drop.
fn is_done(&self) -> bool {
self.state == ClientConnectionState::Closed
&& !self.connection.pending_write()
&& self.in_flight_response_count == 0
}
// close the connection and clear the inflight response, so that the client can be deleted
fn close(&mut self) {
self.clear_write_buffer();
self.state = ClientConnectionState::Closed;
self.in_flight_response_count = 0;
}
}
/// HTTP Server implementation using Unix Domain Sockets and `EPOLL` to
/// handle multiple connections on the same thread.
///
/// The function that handles incoming connections, parses incoming
/// requests and sends responses for awaiting requests is `requests`.
/// It can be called in a loop, which will render the thread that the
/// server runs on incapable of performing other operations, or it can
/// be used in another `EPOLL` structure, as it provides its `epoll`,
/// which is a wrapper over the file descriptor of the epoll structure
/// used within the server, and it can be added to another one using
/// the `EPOLLIN` flag. Whenever there is a notification on that fd,
/// `requests` should be called once.
///
/// # Example
///
/// ## Starting and running the server
///
/// ```
/// use dbs_uhttp::{HttpServer, Response, StatusCode};
///
/// let path_to_socket = "/tmp/example.sock";
/// std::fs::remove_file(path_to_socket).unwrap_or_default();
///
/// // Start the server.
/// let mut server = HttpServer::new(path_to_socket).unwrap();
/// server.start_server().unwrap();
///
/// // Connect a client to the server so it doesn't block in our example.
/// let mut socket = std::os::unix::net::UnixStream::connect(path_to_socket).unwrap();
///
/// // Server loop processing requests.
/// loop {
/// for request in server.requests().unwrap() {
/// let response = request.process(|request| {
/// // Your code here.
/// Response::new(request.http_version(), StatusCode::NoContent)
/// });
/// server.respond(response);
/// }
/// // Break this example loop.
/// break;
/// }
/// ```
pub struct HttpServer {
/// Socket on which we listen for new connections.
socket: UnixListener,
/// Server's epoll instance.
// epoll: epoll::Epoll,
poll: Poll,
/// Holds the token-connection pairs of the server.
/// Each connection has an associated identification token, which is
/// the file descriptor of the underlying stream.
/// We use the file descriptor of the stream as the key for mapping
/// connections because the 1-to-1 relation is guaranteed by the OS.
connections: HashMap<mio::Token, ClientConnection<UnixStream>>,
/// Payload max size
payload_max_size: usize,
}
impl HttpServer {
/// Constructor for `HttpServer`.
///
/// Returns the newly formed `HttpServer`.
///
/// # Errors
/// Returns an `IOError` when binding or `epoll::create` fails.
pub fn new<P: AsRef<Path>>(path_to_socket: P) -> Result<Self> {
let socket = UnixListener::bind(path_to_socket).map_err(ServerError::IOError)?;
Self::new_from_socket(socket)
}
/// Constructor for `HttpServer`.
///
/// Note that this function requires the socket_fd to be solely owned
/// and not be associated with another File in the caller as it uses
/// the unsafe `UnixListener::from_raw_fd method`.
///
/// Returns the newly formed `HttpServer`.
///
/// # Errors
/// Returns an `IOError` when `epoll::create` fails.
pub fn new_from_fd(socket_fd: RawFd) -> Result<Self> {
let socket = unsafe { UnixListener::from_raw_fd(socket_fd) };
Self::new_from_socket(socket)
}
fn new_from_socket(socket: UnixListener) -> Result<Self> {
// as mio use edge trigger epoll under the hook, we should set nonblocking on socket
// otherwise we will miss event in some cases
socket.set_nonblocking(true).map_err(ServerError::IOError)?;
let poll = Poll::new().map_err(ServerError::IOError)?;
Ok(HttpServer {
socket,
poll,
connections: HashMap::new(),
payload_max_size: MAX_PAYLOAD_SIZE,
})
}
/// This function sets the limit for PUT/PATCH requests. It overwrites the
/// default limit of 0.05MiB with the one allowed by server.
pub fn set_payload_max_size(&mut self, request_payload_max_size: usize) {
self.payload_max_size = request_payload_max_size;
}
/// Starts the HTTP Server.
pub fn start_server(&mut self) -> Result<()> {
// Add the socket on which we listen for new connections to the
// `epoll` structure.
Self::epoll_add(
&self.poll,
Token(self.socket.as_raw_fd() as usize),
self.socket.as_raw_fd(),
)
}
/// poll event use mio poll method, and handle Interrupted error explictly
fn poll_events(&mut self, events: &mut mio::Events) -> Result<()> {
loop {
if let Err(e) = self.poll.poll(events, None) {
if e.kind() == ErrorKind::Interrupted || e.kind() == ErrorKind::WouldBlock {
continue;
}
return Err(ServerError::IOError(e));
}
return Ok(());
}
}
/// This function is responsible for the data exchange with the clients and should
/// be called when we are either notified through `epoll` that we need to exchange
/// data with at least a client or when we don't need to perform any other operations
/// on this thread and we can afford to call it in a loop.
///
/// Note that this function will block the current thread if there are no notifications
/// to be handled by the server.
///
/// Returns a collection of complete and valid requests to be processed by the user
/// of the server. Once processed, responses should be sent using `enqueue_responses()`.
///
/// # Errors
/// `IOError` is returned when `read`, `write` or `epoll::ctl` operations fail.
/// `ServerFull` is returned when a client is trying to connect to the server, but
/// full capacity has already been reached.
/// `InvalidWrite` is returned when the server attempted to perform a write operation
/// on a connection on which it is not possible.
pub fn requests(&mut self) -> Result<Vec<ServerRequest>> {
let mut parsed_requests: Vec<ServerRequest> = vec![];
let mut events = mio::Events::with_capacity(MAX_CONNECTIONS);
// let mut events = vec![epoll::EpollEvent::default(); MAX_CONNECTIONS];
// This is a wrapper over the syscall `epoll_wait` and it will block the
// current thread until at least one event is received.
// The received notifications will then populate the `events` array with
// `event_count` elements, where 1 <= event_count <= MAX_CONNECTIONS.
self.poll_events(&mut events)?;
// We use `take()` on the iterator over `events` as, even though only
// `events_count` events have been inserted into `events`, the size of
// the array is still `MAX_CONNECTIONS`, so we discard empty elements
// at the end of the array.
for e in events.iter() {
// Check the file descriptor which produced the notification `e`.
// It could be that we have a new connection, or one of our open
// connections is ready to exchange data with a client.
match e.token() {
Token(fd) if fd == self.socket.as_raw_fd() as usize => {
match self.handle_new_connection() {
Err(ServerError::ServerFull) => {
self.socket
.accept()
.map_err(ServerError::IOError)
.and_then(move |(mut stream, _)| {
stream
.write(SERVER_FULL_ERROR_MESSAGE)
.map_err(ServerError::IOError)
})?;
}
// An internal error will compromise any in-flight requests.
Err(error) => return Err(error),
Ok(()) => {}
}
}
t => {
let client_connection = self.connections.get_mut(&t).unwrap();
// If we receive a hang up on a connection, we clear the write buffer and set
// the connection state to closed to mark it ready for removal from the
// connections map, which will gracefully close the socket.
// The connection is also marked for removal when encountering `EPOLLERR`,
// since this is an "error condition happened on the associated file
// descriptor", according to the `epoll_ctl` man page.
if e.is_error() || e.is_read_closed() || e.is_write_closed() {
client_connection.close();
continue;
}
if e.is_readable() {
// We have bytes to read from this connection.
// If our `read` yields `Request` objects, we wrap them with an ID before
// handing them to the user.
parsed_requests.append(
&mut client_connection
.read()?
.into_iter()
.map(|request| ServerRequest::new(request, e.token()))
.collect(),
);
// If the connection was incoming before we read and we now have to write
// either an error message or an `expect` response, we change its `epoll`
// event set to notify us when the stream is ready for writing.
if client_connection.state == ClientConnectionState::AwaitingOutgoing {
Self::epoll_mod(
&self.poll,
client_connection.connection.as_raw_fd(),
t,
Interest::WRITABLE.add(Interest::READABLE),
// epoll::EventSet::OUT | epoll::EventSet::READ_HANG_UP,
)?;
}
} else if e.is_writable() {
// We have bytes to write on this connection.
client_connection.write()?;
// If the connection was outgoing before we tried to write the responses
// and we don't have any more responses to write, we change the `epoll`
// event set to notify us when we have bytes to read from the stream.
if client_connection.state == ClientConnectionState::AwaitingIncoming {
Self::epoll_mod(
&self.poll,
client_connection.connection.as_raw_fd(),
t,
Interest::READABLE,
)?;
}
}
}
}
}
// Remove dead connections.
let epoll = &self.poll;
self.connections.retain(|_token, client_connection| {
if client_connection.is_done() {
// The rawfd should have been registered to the epoll fd.
Self::epoll_del(epoll, client_connection.connection.as_raw_fd()).unwrap();
false
} else {
true
}
});
Ok(parsed_requests)
}
/// This function is responsible with flushing any remaining outgoing
/// requests on the server.
///
/// Note that this function can block the thread on write, since the
/// operation is blocking.
pub fn flush_outgoing_writes(&mut self) {
for (_, connection) in self.connections.iter_mut() {
while connection.state == ClientConnectionState::AwaitingOutgoing {
if let Err(e) = connection.write() {
if let ServerError::ConnectionError(ConnectionError::InvalidWrite) = e {
// Nothing is logged since an InvalidWrite means we have successfully
// flushed the connection
}
break;
}
}
}
}
/// The file descriptor of the `epoll` structure can enable the server to become
/// a non-blocking structure in an application.
///
/// Returns a reference to the instance of the server's internal `epoll` structure.
///
/// # Example
///
/// ## Non-blocking server
/// ```
/// use std::os::unix::io::AsRawFd;
///
/// use dbs_uhttp::{HttpServer, Response, StatusCode};
/// use vmm_sys_util::epoll;
///
/// // Create our epoll manager.
/// let epoll = epoll::Epoll::new().unwrap();
///
/// let path_to_socket = "/tmp/epoll_example.sock";
/// std::fs::remove_file(path_to_socket).unwrap_or_default();
///
/// // Start the server.
/// let mut server = HttpServer::new(path_to_socket).unwrap();
/// server.start_server().unwrap();
///
/// // Add our server to the `epoll` manager.
/// epoll.ctl(
/// epoll::ControlOperation::Add,
/// server.epoll().as_raw_fd(),
/// epoll::EpollEvent::new(epoll::EventSet::IN, 1234u64),
/// )
/// .unwrap();
///
/// // Connect a client to the server so it doesn't block in our example.
/// let mut socket = std::os::unix::net::UnixStream::connect(path_to_socket).unwrap();
///
/// // Control loop of the application.
/// let mut events = Vec::with_capacity(10);
/// loop {
/// let num_ev = epoll.wait(-1, events.as_mut_slice());
/// for event in events {
/// match event.data() {
/// // The server notification.
/// 1234 => {
/// let request = server.requests();
/// // Process...
/// }
/// // Other `epoll` notifications.
/// _ => {
/// // Do other computation.
/// }
/// }
/// }
/// // Break this example loop.
/// break;
/// }
/// ```
pub fn epoll(&self) -> &Poll {
&self.poll
}
/// Enqueues the provided responses in the outgoing connection.
///
/// # Errors
/// `IOError` is returned when an `epoll::ctl` operation fails.
pub fn enqueue_responses(&mut self, responses: Vec<ServerResponse>) -> Result<()> {
for response in responses {
self.respond(response)?;
}
Ok(())
}
/// Adds the provided response to the outgoing buffer in the corresponding connection.
///
/// # Errors
/// `IOError` is returned when an `epoll::ctl` operation fails.
/// `Underflow` is returned when `enqueue_response` fails.
pub fn respond(&mut self, response: ServerResponse) -> Result<()> {
if let Some(client_connection) = self.connections.get_mut(&response.id) {
// If the connection was incoming before we enqueue the response, we change its
// `epoll` event set to notify us when the stream is ready for writing.
if let ClientConnectionState::AwaitingIncoming = client_connection.state {
client_connection.state = ClientConnectionState::AwaitingOutgoing;
Self::epoll_mod(
&self.poll,
client_connection.connection.as_raw_fd(),
response.id,
Interest::WRITABLE,
// epoll::EventSet::OUT | epoll::EventSet::READ_HANG_UP,
)?;
}
client_connection.enqueue_response(response.response)?;
}
Ok(())
}
/// Accepts a new incoming connection and adds it to the `epoll` notification structure.
///
/// # Errors
/// `IOError` is returned when socket or epoll operations fail.
/// `ServerFull` is returned if server full capacity has been reached.
fn handle_new_connection(&mut self) -> Result<()> {
if self.connections.len() == MAX_CONNECTIONS {
// If we want a replacement policy for connections
// this is where we will have it.
return Err(ServerError::ServerFull);
}
loop {
if let Err(e) = self
.socket
.accept()
.and_then(|(stream, _)| stream.set_nonblocking(true).map(|_| stream))
.and_then(|stream| {
let raw_fd = stream.as_raw_fd();
let token = mio::Token(raw_fd as usize);
self.poll.registry().register(
&mut SourceFd(&raw_fd),
token,
Interest::READABLE,
)?;
let mut conn = HttpConnection::new(stream);
conn.set_payload_max_size(self.payload_max_size);
self.connections.insert(token, ClientConnection::new(conn));
Ok(())
})
{
if e.kind() == ErrorKind::Interrupted {
continue;
}
if e.kind() == ErrorKind::WouldBlock {
break;
}
return Err(ServerError::IOError(e));
}
}
Ok(())
}
/// Changes the event type for a connection to either listen for incoming bytes
/// or for when the stream is ready for writing.
///
/// # Errors
/// `IOError` is returned when an `EPOLL_CTL_MOD` control operation fails.
fn epoll_mod(
epoll: &Poll,
stream_fd: RawFd,
token: mio::Token,
evset: mio::Interest,
) -> Result<()> {
epoll
.registry()
.reregister(&mut SourceFd(&stream_fd), token, evset)
.map_err(ServerError::IOError)
}
/// Adds a stream to the `epoll` notification structure with the `EPOLLIN` event set.
///
/// # Errors
/// `IOError` is returned when an `EPOLL_CTL_ADD` control operation fails.
fn epoll_add(poll: &Poll, token: mio::Token, stream_fd: RawFd) -> Result<()> {
poll.registry()
.register(&mut SourceFd(&stream_fd), token, Interest::READABLE)
.map_err(ServerError::IOError)
}
/// Removes a stream to the `epoll` notification structure.
fn epoll_del(poll: &Poll, stream_fd: RawFd) -> Result<()> {
poll.registry()
.deregister(&mut SourceFd(&stream_fd))
.map_err(ServerError::IOError)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Read, Write};
use std::net::Shutdown;
use std::os::unix::net::UnixStream;
use crate::common::Body;
use vmm_sys_util::tempfile::TempFile;
fn get_temp_socket_file() -> TempFile {
let mut path_to_socket = TempFile::new().unwrap();
path_to_socket.remove().unwrap();
path_to_socket
}
#[test]
fn test_wait_one_connection() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
// Test one incoming connection.
let mut socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 13\r\n\
Content-Type: application/json\r\n\r\nwhatever body",
)
.unwrap();
let mut req_vec = server.requests().unwrap();
let server_request = req_vec.remove(0);
server
.respond(server_request.process(|_request| {
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response body";
response.set_body(Body::new(response_body.to_vec()));
response
}))
.unwrap();
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 1024] = [0; 1024];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
}
#[test]
fn test_large_payload() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
// Test one incoming connection.
let mut socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
let mut packets = String::from(
"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 1028\r\n\
Content-Type: application/json\r\n\r\n",
);
for i in 0..1028 {
packets.push_str(&i.to_string());
}
socket.write_all(packets.as_bytes()).unwrap();
let mut req_vec = server.requests().unwrap();
let server_request = req_vec.remove(0);
server
.respond(server_request.process(|_request| {
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response body";
response.set_body(Body::new(response_body.to_vec()));
response
}))
.unwrap();
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 1024] = [0; 1024];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
}
#[test]
fn test_connection_size_limit_exceeded() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
// Test one incoming connection.
let mut socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 51201\r\n\
Content-Type: application/json\r\n\r\naaaaa",
)
.unwrap();
assert!(server.requests().unwrap().is_empty());
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 265] = [0; 265];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
let error_message = b"HTTP/1.1 400 \r\n\
Server: Firecracker API\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: 149\r\n\r\n{ \"error\": \"\
Request payload with size 51201 is larger than \
the limit of 51200 allowed by server.\nAll \
previous unanswered requests will be dropped.";
assert_eq!(&buf[..], &error_message[..]);
}
#[test]
fn test_set_payload_size() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
server.set_payload_max_size(4);
// Test one incoming connection.
let mut socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 5\r\n\
Content-Type: application/json\r\n\r\naaaaa",
)
.unwrap();
assert!(server.requests().unwrap().is_empty());
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 260] = [0; 260];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
let error_message = b"HTTP/1.1 400 \r\n\
Server: Firecracker API\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: 141\r\n\r\n{ \"error\": \"\
Request payload with size 5 is larger than the \
limit of 4 allowed by server.\nAll previous \
unanswered requests will be dropped.\" }";
assert_eq!(&buf[..], &error_message[..]);
}
#[test]
fn test_wait_one_fd_connection() {
use std::os::unix::io::IntoRawFd;
let path_to_socket = get_temp_socket_file();
let socket_listener = UnixListener::bind(path_to_socket.as_path()).unwrap();
let socket_fd = socket_listener.into_raw_fd();
let mut server = HttpServer::new_from_fd(socket_fd).unwrap();
server.start_server().unwrap();
// Test one incoming connection.
let mut socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 13\r\n\
Content-Type: application/json\r\n\r\nwhatever body",
)
.unwrap();
let mut req_vec = server.requests().unwrap();
let server_request = req_vec.remove(0);
server
.respond(server_request.process(|request| {
assert_eq!(
std::str::from_utf8(&request.body.as_ref().unwrap().body).unwrap(),
"whatever body"
);
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response body";
response.set_body(Body::new(response_body.to_vec()));
response
}))
.unwrap();
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 1024] = [0; 1024];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
assert!(String::from_utf8_lossy(&buf).contains("response body"));
}
#[test]
fn test_wait_concurrent_connections() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
// Test two concurrent connections.
let mut first_socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
first_socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 13\r\n\
Content-Type: application/json\r\n\r\nwhatever body",
)
.unwrap();
let mut second_socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
let mut req_vec = server.requests().unwrap();
let server_request = req_vec.remove(0);
server
.respond(server_request.process(|_request| {
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response body";
response.set_body(Body::new(response_body.to_vec()));
response
}))
.unwrap();
second_socket
.write_all(
b"GET /machine-config HTTP/1.1\r\n\
Content-Type: application/json\r\n\r\n",
)
.unwrap();
let mut req_vec = server.requests().unwrap();
let second_server_request = req_vec.remove(0);
assert_eq!(
second_server_request.request,
Request::try_from(
b"GET /machine-config HTTP/1.1\r\n\
Content-Type: application/json\r\n\r\n",
None
)
.unwrap()
);
let mut buf: [u8; 1024] = [0; 1024];
assert!(first_socket.read(&mut buf[..]).unwrap() > 0);
first_socket.shutdown(std::net::Shutdown::Both).unwrap();
server
.respond(second_server_request.process(|_request| {
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response second body";
response.set_body(Body::new(response_body.to_vec()));
response
}))
.unwrap();
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 1024] = [0; 1024];
assert!(second_socket.read(&mut buf[..]).unwrap() > 0);
second_socket.shutdown(std::net::Shutdown::Both).unwrap();
assert!(server.requests().unwrap().is_empty());
}
#[test]
fn test_wait_expect_connection() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
// Test one incoming connection with `Expect: 100-continue`.
let mut socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 13\r\n\
Expect: 100-continue\r\n\r\n",
)
.unwrap();
// `wait` on server to receive what the client set on the socket.
// This will set the stream direction to `Outgoing`, as we need to send a `100 CONTINUE` response.
let req_vec = server.requests().unwrap();
assert!(req_vec.is_empty());
// Another `wait`, this time to send the response.
// Will be called because of an `EPOLLOUT` notification.
let req_vec = server.requests().unwrap();
assert!(req_vec.is_empty());
let mut buf: [u8; 1024] = [0; 1024];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
socket.write_all(b"whatever body").unwrap();
let mut req_vec = server.requests().unwrap();
let server_request = req_vec.remove(0);
server
.respond(server_request.process(|_request| {
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response body";
response.set_body(Body::new(response_body.to_vec()));
response
}))
.unwrap();
let req_vec = server.requests().unwrap();
assert!(req_vec.is_empty());
let mut buf: [u8; 1024] = [0; 1024];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
}
#[test]
fn test_wait_many_connections() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
let mut sockets: Vec<UnixStream> = Vec::with_capacity(MAX_CONNECTIONS + 1);
for _ in 0..MAX_CONNECTIONS {
sockets.push(UnixStream::connect(path_to_socket.as_path()).unwrap());
assert!(server.requests().unwrap().is_empty());
}
sockets.push(UnixStream::connect(path_to_socket.as_path()).unwrap());
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 120] = [0; 120];
sockets[MAX_CONNECTIONS].read_exact(&mut buf).unwrap();
assert_eq!(&buf[..], SERVER_FULL_ERROR_MESSAGE);
assert_eq!(server.connections.len(), 10);
{
// Drop this stream.
let _refused_stream = sockets.pop().unwrap();
}
assert_eq!(server.connections.len(), 10);
// Check that the server detects a connection shutdown.
let sock: &UnixStream = sockets.get(0).unwrap();
sock.shutdown(Shutdown::Both).unwrap();
assert!(server.requests().unwrap().is_empty());
// Server should drop a closed connection.
assert_eq!(server.connections.len(), 9);
// Close the backing FD of this connection by dropping
// it out of scope.
{
// Enforce the drop call on the stream
let _sock = sockets.pop().unwrap();
}
assert!(server.requests().unwrap().is_empty());
// Server should drop a closed connection.
assert_eq!(server.connections.len(), 8);
let sock: &UnixStream = sockets.get(1).unwrap();
// Close both the read and write sides of the socket
// separately and check that the server detects it.
sock.shutdown(Shutdown::Read).unwrap();
sock.shutdown(Shutdown::Write).unwrap();
assert!(server.requests().unwrap().is_empty());
// Server should drop a closed connection.
assert_eq!(server.connections.len(), 7);
}
#[test]
fn test_wait_parse_error() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
// Test one incoming connection.
let mut socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
socket.set_nonblocking(true).unwrap();
assert!(server.requests().unwrap().is_empty());
socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: alpha\r\n\
Content-Type: application/json\r\n\r\nwhatever body",
)
.unwrap();
assert!(server.requests().unwrap().is_empty());
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 255] = [0; 255];
assert!(socket.read(&mut buf[..]).unwrap() > 0);
let error_message = b"HTTP/1.1 400 \r\n\
Server: Firecracker API\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: 136\r\n\r\n{ \"error\": \"Invalid header. \
Reason: Invalid value. Key:Content-Length; Value: alpha\nAll previous unanswered requests will be dropped.\" }";
assert_eq!(&buf[..], &error_message[..]);
socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: alpha\r\n\
Content-Type: application/json\r\n\r\nwhatever body",
)
.unwrap();
}
#[test]
fn test_wait_in_flight_responses() {
let path_to_socket = get_temp_socket_file();
let mut server = HttpServer::new(path_to_socket.as_path()).unwrap();
server.start_server().unwrap();
// Test a connection dropped and then a new one appearing
// before the user had a chance to send the response to the
// first one.
let mut first_socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
assert!(server.requests().unwrap().is_empty());
first_socket
.write_all(
b"PATCH /machine-config HTTP/1.1\r\n\
Content-Length: 13\r\n\
Content-Type: application/json\r\n\r\nwhatever body",
)
.unwrap();
let mut req_vec = server.requests().unwrap();
let server_request = req_vec.remove(0);
first_socket.shutdown(std::net::Shutdown::Both).unwrap();
assert!(server.requests().unwrap().is_empty());
let mut second_socket = UnixStream::connect(path_to_socket.as_path()).unwrap();
second_socket.set_nonblocking(true).unwrap();
assert!(server.requests().unwrap().is_empty());
server
.enqueue_responses(vec![server_request.process(|_request| {
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response body";
response.set_body(Body::new(response_body.to_vec()));
response
})])
.unwrap();
// assert!(server.requests().unwrap().is_empty());
assert_eq!(server.connections.len(), 1);
let mut buf: [u8; 1024] = [0; 1024];
assert!(second_socket.read(&mut buf[..]).is_err());
second_socket
.write_all(
b"GET /machine-config HTTP/1.1\r\n\
Content-Type: application/json\r\n\r\n",
)
.unwrap();
let mut req_vec = server.requests().unwrap();
let second_server_request = req_vec.remove(0);
assert_eq!(
second_server_request.request,
Request::try_from(
b"GET /machine-config HTTP/1.1\r\n\
Content-Type: application/json\r\n\r\n",
None
)
.unwrap()
);
server
.respond(second_server_request.process(|_request| {
let mut response = Response::new(Version::Http11, StatusCode::OK);
let response_body = b"response second body";
response.set_body(Body::new(response_body.to_vec()));
response
}))
.unwrap();
assert!(server.requests().unwrap().is_empty());
let mut buf: [u8; 1024] = [0; 1024];
assert!(second_socket.read(&mut buf[..]).unwrap() > 0);
second_socket.shutdown(std::net::Shutdown::Both).unwrap();
assert!(server.requests().is_ok());
}
}
| 39.904027 | 142 | 0.554071 |
4ab35e74d575502ae591f8bf71930cccf7d42203 | 5,663 | //! A module that contains all the actions related to reading input from the terminal.
//! Like reading a line, reading a character and reading asynchronously.
mod input;
#[cfg(unix)]
mod unix_input;
#[cfg(windows)]
mod windows_input;
#[cfg(unix)]
pub use self::unix_input::SyncReader;
#[cfg(unix)]
use self::unix_input::UnixInput;
#[cfg(windows)]
pub use self::windows_input::SyncReader;
#[cfg(windows)]
use self::windows_input::WindowsInput;
use self::input::parse_event;
pub use self::input::{input, TerminalInput};
use crossterm_utils::{ErrorKind, Result};
use std::io;
use std::sync::{mpsc, Arc};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, Sender};
use std::thread;
/// This trait defines the actions that can be preformed with the terminal input.
/// This trait can be implemented so that a concrete implementation of the ITerminalInput can fulfill
/// the wishes to work on a specific platform.
///
/// ## For example:
///
/// This trait is implemented for Windows and UNIX systems.
/// Unix is using the 'TTY' and windows is using 'libc' C functions to read the input.
trait ITerminalInput {
/// Read one character from the user input
fn read_char(&self) -> io::Result<char>;
/// Read the input asynchronously from the user.
fn read_async(&self) -> AsyncReader;
/// Read the input asynchronously until a certain character is hit.
fn read_until_async(&self, delimiter: u8) -> AsyncReader;
/// Read the input synchronously from the user.
fn read_sync(&self) -> SyncReader;
fn enable_mouse_mode(&self) -> Result<()>;
fn disable_mouse_mode(&self) -> Result<()>;
}
/// Enum to specify which input event has occurred.
#[derive(Debug, PartialOrd, PartialEq)]
pub enum InputEvent {
/// A single key or a combination is pressed.
Keyboard(KeyEvent),
/// A mouse event occurred.
Mouse(MouseEvent),
/// A unsupported event has occurred.
Unsupported(Vec<u8>),
/// An unknown event has occurred.
Unknown,
}
/// Enum to specify which mouse event has occurred.
#[derive(Debug, PartialOrd, PartialEq)]
pub enum MouseEvent {
/// A mouse press has occurred, this contains the pressed button and the position of the press.
Press(MouseButton, u16, u16),
/// A mouse button was released.
Release(u16, u16),
/// A mouse button was hold.
Hold(u16, u16),
/// An unknown mouse event has occurred.
Unknown,
}
/// Enum to define mouse buttons.
#[derive(Debug, PartialOrd, PartialEq)]
pub enum MouseButton {
/// Left mouse button
Left,
/// Right mouse button
Right,
/// Middle mouse button
Middle,
/// Scroll up
WheelUp,
/// Scroll down
WheelDown,
}
/// Enum with different key or key combinations.
#[derive(Debug, PartialOrd, PartialEq)]
pub enum KeyEvent {
Backspace,
Left,
Right,
Up,
Down,
Home,
End,
PageUp,
PageDown,
Delete,
Insert,
F(u8),
Char(char),
Alt(char),
Ctrl(char),
Null,
Esc,
}
/// This type allows you to read the input asynchronously which means that input events are gathered on the background and will be queued for you to read.
///
/// **[SyncReader](./LINK)**
/// If you want a blocking, or less resource consuming read to happen use `SyncReader`, this will leave a way all the thread and queueing and will be a blocking read.
///
/// This type is an iterator, and could be used to iterate over input events.
///
/// # Remarks
/// - Threads spawned will be disposed of as soon the `AsyncReader` goes out of scope.
/// - MPSC-channels are used to queue input events, this type implements an iterator of the rx side of the queue.
pub struct AsyncReader {
event_rx: Receiver<u8>,
shutdown: Arc<AtomicBool>,
}
impl AsyncReader {
/// Construct a new instance of the `AsyncReader`.
/// The reading will immediately start when calling this function.
pub fn new(function: Box<Fn(&Sender<u8>, &Arc<AtomicBool>) + Send>) -> AsyncReader {
let shutdown_handle = Arc::new(AtomicBool::new(false));
let (event_tx, event_rx) = mpsc::channel();
let thread_shutdown = shutdown_handle.clone();
thread::spawn(move || loop {
function(&event_tx, &thread_shutdown);
});
AsyncReader {
event_rx,
shutdown: shutdown_handle,
}
}
/// Stop the input event reading.
///
/// You don't necessarily have to call this function because it will automatically be called when this reader goes out of scope.
///
/// # Remarks
/// - Background thread will be closed.
/// - This will consume the handle you won't be able to restart the reading with this handle, create a new `AsyncReader` instead.
pub fn stop_reading(&mut self) {
self.shutdown.store(true, Ordering::SeqCst);
}
}
impl Iterator for AsyncReader {
type Item = InputEvent;
/// Check if there are input events to read.
///
/// It will return `None` when nothing is there to read, `Some(InputEvent)` if there are events to read.
///
/// # Remark
/// - This is **not** a blocking call.
fn next(&mut self) -> Option<Self::Item> {
let mut iterator = self.event_rx.try_iter();
match iterator.next() {
Some(char_value) => {
if let Ok(char_value) = parse_event(char_value, &mut iterator) {
Some(char_value)
} else {
None
}
}
None => None,
}
}
}
impl Drop for AsyncReader {
fn drop(&mut self) {
self.stop_reading();
}
}
| 29.649215 | 166 | 0.648596 |
db40898f27a09110d950e800a689ae372238ffd1 | 26,593 | use crate::error::{ErrorResponse, RestApiResult};
use async_std::sync::{Arc, RwLock};
use factorio_bot_core::process::process_control::InstanceState;
use factorio_bot_core::types::{AreaFilter, FactorioEntity};
use rocket::serde::json::Json;
use rocket::State;
#[derive(FromForm, Debug, JsonSchema)]
pub struct FindEntitiesQueryParams {
/// w/h e.g. 2,3
area: Option<String>,
/// x/y e.g. 2,3
position: Option<String>,
/// radius to search in
radius: Option<f64>,
/// name to search for
name: Option<String>,
/// entity type to search for, see http://...
entity_type: Option<String>,
}
/// Finds entities in given area
#[openapi(tag = "Query")]
#[get("/testFindEntities?<info..>")]
pub async fn test(info: FindEntitiesQueryParams) -> RestApiResult<String> {
info!("find entities called with {:?}", info);
Ok(Json("test".into()))
}
/// Finds entities in given area
#[openapi(tag = "Query")]
#[get("/findEntities?<area>&<position>&<radius>&<name>&<entity_type>")]
pub async fn find_entities(
area: Option<String>,
position: Option<String>,
radius: Option<f64>,
name: Option<String>,
entity_type: Option<String>,
instance_state: &State<Arc<RwLock<Option<InstanceState>>>>,
) -> RestApiResult<Vec<FactorioEntity>> {
let area_filter = match &area {
Some(area) => AreaFilter::Rect(area.parse().unwrap()),
None => {
if let Some(position) = &position {
AreaFilter::PositionRadius((position.parse().unwrap(), radius))
} else {
return Err(ErrorResponse::new(
"area or position + optional radius needed".into(),
1,
));
}
}
};
let instance_state = instance_state.read().await;
if let Some(instance_state) = &*instance_state {
Ok(Json(
instance_state
.rcon
.find_entities_filtered(&area_filter, name.clone(), entity_type.clone())
.await
.unwrap(),
))
} else {
Err(ErrorResponse::new("not started".into(), 2))
}
}
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct PlanPathQueryParams {
// entity_name: String,
// entity_type: String,
// underground_entity_name: String,
// underground_entity_type: String,
// underground_max: u8,
// from_position: String,
// to_position: String,
// to_direction: u8,
// }
//
// pub async fn plan_path(
// rcon: web::Data<Arc<FactorioRcon>>,
// world: web::Data<Arc<FactorioWorld>>,
// info: actix_web::web::Query<PlanPathQueryParams>,
// ) -> Result<Json<Vec<FactorioEntity>>, ActixAnyhowError> {
// Ok(Json(
// rcon.plan_path(
// &world,
// &info.entity_name.clone(),
// &info.entity_type.clone(),
// &info.underground_entity_name.clone(),
// &info.underground_entity_type.clone(),
// info.underground_max,
// &info.from_position.parse()?,
// &info.to_position.parse()?,
// Direction::from_u8(info.to_direction).unwrap(),
// )
// .await?,
// ))
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct FindTilesQueryParams {
// area: Option<String>,
// position: Option<String>,
// radius: Option<f64>,
// name: Option<String>,
// }
// // #[get("/findTiles?<area>&<position>&<radius>&<name>")]
// pub async fn find_tiles(
// rcon: web::Data<Arc<FactorioRcon>>,
// info: actix_web::web::Query<FindTilesQueryParams>,
// ) -> Result<Json<Vec<FactorioTile>>, ActixAnyhowError> {
// let area_filter = match &info.area {
// Some(area) => AreaFilter::Rect(area.parse()?),
// None => {
// if let Some(position) = &info.position {
// AreaFilter::PositionRadius((position.parse()?, info.radius))
// } else {
// return Err(ActixAnyhowError::from(anyhow!(
// "area or position + optional radius needed"
// )));
// }
// }
// };
// Ok(Json(
// rcon.find_tiles_filtered(&area_filter, info.name.clone())
// .await?,
// ))
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct InventoryContentsAtQueryParams {
// query: String,
// }
// // #[get("/inventoryContentsAt?<query>")]
// pub async fn inventory_contents_at(
// rcon: web::Data<Arc<FactorioRcon>>,
// info: actix_web::web::Query<InventoryContentsAtQueryParams>,
// ) -> Result<Json<Vec<Option<InventoryResponse>>>, ActixAnyhowError> {
// let parts: Vec<&str> = info.query.split(';').collect();
// let entities: Vec<RequestEntity> = parts
// .iter()
// .map(|part| {
// let parts: Vec<&str> = part.split('@').collect();
// RequestEntity {
// name: String::from(parts[0]),
// position: parts[1].parse().unwrap(),
// }
// })
// .collect();
// Ok(Json(rcon.inventory_contents_at(entities).await?))
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct MovePlayerQueryParams {
// goal: String,
// radius: Option<f64>,
// }
// // #[get("/<player_id>/move?<goal>&<radius>")]
// pub async fn move_player(
// info: actix_web::web::Query<MovePlayerQueryParams>,
// path: PathInfo<u32>,
// rcon: web::Data<Arc<FactorioRcon>>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<FactorioPlayer>, ActixAnyhowError> {
// let player_id = *path;
// let goal: Position = info.goal.parse()?;
// rcon.move_player(&world, player_id, &goal, info.radius)
// .await?;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(player.clone())),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// // #[get("/<player_id>/playerInfo")]
// pub async fn player_info(
// path: PathInfo<u32>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<FactorioPlayer>, ActixAnyhowError> {
// let player_id = *path;
//
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(player.clone())),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct PlaceEntityQueryParams {
// item: String,
// position: String,
// direction: u8,
// }
//
// // #[get("/<player_id>/placeEntity?<item>&<position>&<direction>")]
// pub async fn place_entity(
// path: PathInfo<u32>,
// info: actix_web::web::Query<PlaceEntityQueryParams>,
// rcon: web::Data<Arc<FactorioRcon>>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<PlaceEntityResult>, ActixAnyhowError> {
// let player_id = *path;
// let entity = rcon
// .place_entity(
// player_id,
// info.item.clone(),
// info.position.parse()?,
// info.direction,
// &world,
// )
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(PlaceEntityResult {
// entity,
// player: player.clone(),
// })),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct CheatItemQueryParams {
// name: String,
// count: u32,
// }
// // #[get("/<player_id>/cheatItem?<name>&<count>")]
// #[allow(clippy::too_many_arguments)]
// pub async fn cheat_item(
// path: PathInfo<u32>,
// info: actix_web::web::Query<CheatItemQueryParams>,
// world: web::Data<Arc<FactorioWorld>>,
// rcon: web::Data<Arc<FactorioRcon>>,
// ) -> Result<Json<FactorioPlayer>, ActixAnyhowError> {
// let player_id = *path;
// rcon.cheat_item(player_id, &info.name, info.count).await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(player.clone())),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct CheatTechnologyQueryParams {
// tech: String,
// }
//
// // #[get("/cheatTechnology?<tech>")]
// pub async fn cheat_technology(
// info: actix_web::web::Query<CheatTechnologyQueryParams>,
// rcon: web::Data<Arc<FactorioRcon>>,
// ) -> Result<Json<Value>, ActixAnyhowError> {
// rcon.cheat_technology(&info.tech).await?;
// Ok(Json(json!({"status": "ok"})))
// }
//
// // #[get("/cheatAllTechnologies")]
// pub async fn cheat_all_technologies(
// rcon: web::Data<Arc<FactorioRcon>>,
// ) -> Result<Json<Value>, ActixAnyhowError> {
// rcon.cheat_all_technologies().await?;
// Ok(Json(json!({"status": "ok"})))
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct InsertToInventoryQueryParams {
// entity_name: String,
// entity_position: String,
// inventory_type: u32,
// item_name: String,
// item_count: u32,
// }
// // #[get("/<player_id>/insertToInventory?<entity_name>&<entity_position>&<inventory_type>&<item_name>&<item_count>")]
// #[allow(clippy::too_many_arguments)]
// pub async fn insert_to_inventory(
// info: actix_web::web::Query<InsertToInventoryQueryParams>,
// path: PathInfo<u32>,
// world: web::Data<Arc<FactorioWorld>>,
// rcon: web::Data<Arc<FactorioRcon>>,
// ) -> Result<Json<FactorioPlayer>, ActixAnyhowError> {
// let player_id = *path;
// rcon.insert_to_inventory(
// player_id,
// info.entity_name.clone(),
// info.entity_position.parse()?,
// info.inventory_type,
// info.item_name.clone(),
// info.item_count,
// &world,
// )
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(player.clone())),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct RemoveFromInventoryQueryParams {
// entity_name: String,
// entity_position: String,
// inventory_type: u32,
// item_name: String,
// item_count: u32,
// }
//
// // #[get(
// // "/<player_id>/removeFromInventory?<entity_name>&<entity_position>&<inventory_type>&<item_name>&<item_count>"
// // )]
// // #[allow(clippy::too_many_arguments)]
// pub async fn remove_from_inventory(
// path: PathInfo<u32>,
// info: actix_web::web::Query<RemoveFromInventoryQueryParams>,
// rcon: web::Data<Arc<FactorioRcon>>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<FactorioPlayer>, ActixAnyhowError> {
// let player_id = *path;
// rcon.remove_from_inventory(
// player_id,
// info.entity_name.clone(),
// info.entity_position.parse()?,
// info.inventory_type,
// info.item_name.clone(),
// info.item_count,
// &world,
// )
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(player.clone())),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// // #[get("/players")]
// pub async fn all_players(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<Vec<FactorioPlayer>>, ActixAnyhowError> {
// let mut all_players: Vec<FactorioPlayer> = Vec::new();
// for player in world.players.iter() {
// all_players.push(player.clone());
// }
// Ok(Json(all_players))
// }
//
// // #[get("/itemPrototypes")]
// pub async fn item_prototypes(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<HashMap<String, FactorioItemPrototype>>, ActixAnyhowError> {
// let mut data: HashMap<String, FactorioItemPrototype> = HashMap::new();
// for item_prototype in world.item_prototypes.iter() {
// data.insert(item_prototype.name.clone(), item_prototype.clone());
// }
// Ok(Json(data))
// }
//
// // #[get("/entityPrototypes")]
// pub async fn entity_prototypes(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<HashMap<String, FactorioEntityPrototype>>, ActixAnyhowError> {
// let mut data: HashMap<String, FactorioEntityPrototype> = HashMap::new();
// for prototype in world.entity_prototypes.iter() {
// data.insert(prototype.name.clone(), prototype.clone());
// }
// Ok(Json(data))
// }
//
// // #[get("/serverSave")]
// pub async fn server_save(
// rcon: web::Data<Arc<FactorioRcon>>,
// ) -> Result<Json<Value>, ActixAnyhowError> {
// rcon.server_save().await?;
// Ok(Json(json!({"status": "ok"})))
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct AddResearchQueryParams {
// tech: String,
// }
// // #[get("/addResearch?<tech>")]
// pub async fn add_research(
// info: actix_web::web::Query<AddResearchQueryParams>,
// rcon: web::Data<Arc<FactorioRcon>>,
// ) -> Result<Json<Value>, ActixAnyhowError> {
// rcon.add_research(&info.tech).await?;
// Ok(Json(json!({"status": "ok"})))
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct StoreMapDataQueryParams {
// key: String,
// }
//
// // #[post("/storeMapData?<key>", format = "application/json", data = "<value>")]
// pub async fn store_map_data(
// rcon: web::Data<Arc<FactorioRcon>>,
// data: Json<Value>,
// info: actix_web::web::Query<StoreMapDataQueryParams>,
// ) -> Result<Json<Value>, ActixAnyhowError> {
// rcon.store_map_data(&info.key, data.into_inner()).await?;
// Ok(Json(json!({"status": "ok"})))
// }
// // #[get("/retrieveMapData?<key>")]
// pub async fn retrieve_map_data(
// rcon: web::Data<Arc<FactorioRcon>>,
// info: actix_web::web::Query<StoreMapDataQueryParams>,
// ) -> Result<Json<Value>, ActixAnyhowError> {
// let res = rcon.retrieve_map_data(&info.key).await?;
// match res {
// Some(result) => Ok(Json(result)),
// None => Ok(Json(json!(null))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct PlaceBlueprintQueryParams {
// blueprint: String,
// position: String,
// direction: Option<u8>,
// force_build: Option<bool>,
// only_ghosts: Option<bool>,
// inventory_player_ids: Option<String>,
// }
// // #[get("/<player_id>/placeBlueprint?<position>&<direction>&<force_build>&<blueprint>&<only_ghosts>")]
// // #[allow(clippy::too_many_arguments)]
// pub async fn place_blueprint(
// world: web::Data<Arc<FactorioWorld>>,
// rcon: web::Data<Arc<FactorioRcon>>,
// path: PathInfo<u32>,
// info: actix_web::web::Query<PlaceBlueprintQueryParams>,
// ) -> Result<Json<PlaceEntitiesResult>, ActixAnyhowError> {
// let player_id = *path;
// let inventory_player_ids: Vec<u32> = match info.inventory_player_ids.as_ref() {
// Some(inventory_player_ids) => inventory_player_ids
// .split(',')
// .map(|id| id.parse().unwrap())
// .collect(),
// None => vec![],
// };
// let entities = rcon
// .place_blueprint(
// player_id,
// info.blueprint.clone(),
// &info.position.parse()?,
// info.direction.unwrap_or(0),
// info.force_build.unwrap_or(false),
// info.only_ghosts.unwrap_or(false),
// inventory_player_ids,
// &world,
// )
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(PlaceEntitiesResult {
// player: player.clone(),
// entities,
// })),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct ReviveGhostQueryParams {
// name: String,
// position: String,
// }
// // #[get("/<player_id>/reviveGhost?<position>&<name>")]
// // #[allow(clippy::too_many_arguments)]
// pub async fn revive_ghost(
// info: actix_web::web::Query<ReviveGhostQueryParams>,
// path: PathInfo<u32>,
// world: web::Data<Arc<FactorioWorld>>,
// rcon: web::Data<Arc<FactorioRcon>>,
// ) -> Result<Json<PlaceEntityResult>, ActixAnyhowError> {
// let player_id = *path;
// let entity = rcon
// .revive_ghost(player_id, &info.name, &info.position.parse()?, &world)
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(PlaceEntityResult {
// player: player.clone(),
// entity,
// })),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct CheatBlueprintQueryParams {
// blueprint: String,
// position: String,
// direction: Option<u8>,
// force_build: Option<bool>,
// }
// // #[get("/<player_id>/cheatBlueprint?<position>&<direction>&<force_build>&<blueprint>")]
// pub async fn cheat_blueprint(
// world: web::Data<Arc<FactorioWorld>>,
// rcon: web::Data<Arc<FactorioRcon>>,
// info: actix_web::web::Query<CheatBlueprintQueryParams>,
// path: PathInfo<u32>,
// ) -> Result<Json<PlaceEntitiesResult>, ActixAnyhowError> {
// let player_id = *path;
// let entities = rcon
// .cheat_blueprint(
// player_id,
// info.blueprint.clone(),
// &info.position.parse()?,
// info.direction.unwrap_or(0),
// info.force_build.unwrap_or(false),
// )
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(PlaceEntitiesResult {
// player: player.clone(),
// entities,
// })),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct ParseBlueprintQueryParams {
// label: String,
// blueprint: String,
// }
//
// // #[get("/parseBlueprint?<blueprint>")]
// pub async fn parse_blueprint(
// world: web::Data<Arc<FactorioWorld>>,
// info: actix_web::web::Query<ParseBlueprintQueryParams>,
// ) -> Result<Json<FactorioBlueprintInfo>, ActixAnyhowError> {
// let decoded =
// BlueprintCodec::decode_string(&info.blueprint).expect("failed to parse blueprint");
// let rect = blueprint_build_area(world.entity_prototypes.clone(), &info.blueprint);
// let response = FactorioBlueprintInfo {
// rect: rect.clone(),
// label: info.label.clone(),
// blueprint: info.blueprint.clone(),
// width: rect.width() as u16,
// height: rect.height() as u16,
// data: serde_json::to_value(decoded).unwrap(),
// };
// Ok(Json(response))
// }
//
// // #[get("/recipes")]
// pub async fn all_recipes(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<HashMap<String, FactorioRecipe>>, ActixAnyhowError> {
// let mut map: HashMap<String, FactorioRecipe> = HashMap::new();
// for recipe in world.recipes.iter() {
// map.insert(recipe.name.clone(), recipe.clone());
// }
// Ok(Json(map))
// }
// // #[get("/playerForce")]
// pub async fn player_force(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<FactorioForce>, ActixAnyhowError> {
// Ok(Json(
// world
// .forces
// .get("player")
// .expect("player force not found")
// .clone(),
// ))
// }
// pub async fn all_forces(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<Vec<FactorioForce>>, ActixAnyhowError> {
// let mut forces: Vec<FactorioForce> = vec![];
// for force in world.forces.iter() {
// forces.push(force.clone());
// }
// Ok(Json(forces))
// }
//
// // #[get("/<player_id>/mine?<name>&<position>&<count>")]
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct MineQueryParams {
// name: String,
// position: String,
// count: u32,
// }
// pub async fn mine(
// info: actix_web::web::Query<MineQueryParams>,
// path: PathInfo<u32>,
// rcon: web::Data<Arc<FactorioRcon>>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<FactorioPlayer>, ActixAnyhowError> {
// let player_id = *path;
// rcon.player_mine(
// &world,
// player_id,
// &info.name,
// &info.position.parse()?,
// info.count,
// )
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(player.clone())),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// // #[get("/<player_id>/craft?<recipe>&<count>")]
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct CraftQueryParams {
// recipe: String,
// count: u32,
// }
// pub async fn craft(
// info: actix_web::web::Query<CraftQueryParams>,
// path: PathInfo<u32>,
// rcon: web::Data<Arc<FactorioRcon>>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<FactorioPlayer>, ActixAnyhowError> {
// let player_id = *path;
// rcon.player_craft(&world, player_id, &info.recipe, info.count)
// .await?;
// async_std::task::sleep(Duration::from_millis(50)).await;
// let player = world.players.get(&player_id);
// match player {
// Some(player) => Ok(Json(player.clone())),
// None => Err(ActixAnyhowError::from(anyhow!("player not found"))),
// }
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct FindOffshorePumpPlacementOptionsQueryParams {
// search_center: String,
// pump_direction: u8,
// }
// pub async fn find_offshore_pump_placement_options(
// info: actix_web::web::Query<FindOffshorePumpPlacementOptionsQueryParams>,
// rcon: web::Data<Arc<FactorioRcon>>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<Json<Vec<Position>>, ActixAnyhowError> {
// Ok(Json(
// rcon.find_offshore_pump_placement_options(
// &world,
// info.search_center.parse()?,
// Direction::from_u8(info.pump_direction).expect("invalid direction"),
// )
// .await?
// .iter()
// .map(|pos| pos.into())
// .collect(),
// ))
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct PlanQueryParams {
// name: String,
// bot_count: u32,
// }
// pub async fn run_plan(
// info: actix_web::web::Query<PlanQueryParams>,
// planner: web::Data<Arc<RwLock<Planner>>>,
// ) -> Result<String, ActixAnyhowError> {
// let lua_path_str = format!("plans/{}.lua", info.name);
// let lua_path = Path::new(&lua_path_str);
// let lua_path = std::fs::canonicalize(lua_path).unwrap();
// if !lua_path.exists() {
// return Err(anyhow!("plan {} not found at {}", info.name, lua_path_str));
// }
// let lua_code = read_to_string(lua_path).unwrap();
// let graph = std::thread::spawn(move || {
// let mut planner = planner.write();
// planner.reset();
// planner.plan(lua_code, info.bot_count).unwrap();
// planner.graph()
// })
// .join()?;
// Ok(graph.graphviz_dot())
// }
//
// #[derive(Deserialize)]
// #[serde(rename_all = "camelCase")]
// pub struct ExecuteTaskGraphQueryParams {
// name: String,
// }
// pub async fn execute_taskgraph(
// info: actix_web::web::Query<ExecuteTaskGraphQueryParams>,
// planner: web::Data<Arc<RwLock<Planner>>>,
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<String, ActixAnyhowError> {
// let lua_path_str = format!("plans/{}.lua", info.name);
// let lua_path = Path::new(&lua_path_str);
// let lua_path = std::fs::canonicalize(lua_path).unwrap();
// if !lua_path.exists() {
// panic!("plan {} not found at {}", info.name, lua_path_str);
// }
// let lua_code = read_to_string(lua_path).unwrap();
// let graph = std::thread::spawn(move || {
// let mut planner = planner.write();
// planner.plan(lua_code, world.players.len() as u32).unwrap();
// planner.graph()
// })
// .join()
// .unwrap();
// let dot = graph.graphviz_dot();
// Ok(dot)
// }
//
// pub async fn plans() -> Result<Json<Vec<String>>, ActixAnyhowError> {
// let entries: Vec<String> = read_dir("plans/")
// .unwrap()
// .map(|res| res.map(|e| e.path()).unwrap())
// .filter(|p| p.extension().is_some() && p.extension().unwrap() == "lua")
// .map(|p| p.with_extension(""))
// .map(|p| p.file_name().unwrap().to_str().unwrap().into())
// .collect();
// Ok(Json(entries))
// }
// pub async fn web_entity_graph(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<String, ActixAnyhowError> {
// world.entity_graph.connect()?;
// let dot = world.entity_graph.graphviz_dot_condensed();
// Ok(dot)
// }
// pub async fn web_task_graph(
// planner: web::Data<Arc<RwLock<Planner>>>,
// ) -> Result<String, ActixAnyhowError> {
// let planner = planner.read();
// let dot = planner.graph().graphviz_dot();
// Ok(dot)
// }
// pub async fn web_flow_graph(
// world: web::Data<Arc<FactorioWorld>>,
// ) -> Result<String, ActixAnyhowError> {
// world.entity_graph.connect()?;
// world.flow_graph.update()?;
// let dot = world.flow_graph.graphviz_dot_condensed();
// Ok(dot)
// }
| 34.225225 | 120 | 0.585116 |
fe5925f26f0ac206507734f27f46353339bd3818 | 9,153 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use driver::session::Session;
use middle::resolve;
use middle::ty;
use middle::typeck;
use util::ppaux;
use syntax::ast::*;
use syntax::codemap;
use syntax::{ast_util, ast_map};
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor {
sess: Session,
ast_map: ast_map::map,
def_map: resolve::DefMap,
method_map: typeck::method_map,
tcx: ty::ctxt,
}
impl Visitor<bool> for CheckCrateVisitor {
fn visit_item(&mut self, i:@item, env:bool) {
check_item(self, self.sess, self.ast_map, self.def_map, i, env);
}
fn visit_pat(&mut self, p:@Pat, env:bool) {
check_pat(self, p, env);
}
fn visit_expr(&mut self, ex:@Expr, env:bool) {
check_expr(self, self.sess, self.def_map, self.method_map,
self.tcx, ex, env);
}
}
pub fn check_crate(sess: Session,
crate: &Crate,
ast_map: ast_map::map,
def_map: resolve::DefMap,
method_map: typeck::method_map,
tcx: ty::ctxt) {
let mut v = CheckCrateVisitor {
sess: sess,
ast_map: ast_map,
def_map: def_map,
method_map: method_map,
tcx: tcx,
};
visit::walk_crate(&mut v, crate, false);
sess.abort_if_errors();
}
pub fn check_item(v: &mut CheckCrateVisitor,
sess: Session,
ast_map: ast_map::map,
def_map: resolve::DefMap,
it: @item,
_is_const: bool) {
match it.node {
item_static(_, _, ex) => {
v.visit_expr(ex, true);
check_item_recursion(sess, ast_map, def_map, it);
}
item_enum(ref enum_definition, _) => {
for var in (*enum_definition).variants.iter() {
for ex in var.node.disr_expr.iter() {
v.visit_expr(*ex, true);
}
}
}
_ => visit::walk_item(v, it, false)
}
}
pub fn check_pat(v: &mut CheckCrateVisitor, p: @Pat, _is_const: bool) {
fn is_str(e: @Expr) -> bool {
match e.node {
ExprVstore(
@Expr { node: ExprLit(@codemap::Spanned {
node: lit_str(_),
_}),
_ },
ExprVstoreUniq
) => true,
_ => false
}
}
match p.node {
// Let through plain ~-string literals here
PatLit(a) => if !is_str(a) { v.visit_expr(a, true); },
PatRange(a, b) => {
if !is_str(a) { v.visit_expr(a, true); }
if !is_str(b) { v.visit_expr(b, true); }
}
_ => visit::walk_pat(v, p, false)
}
}
pub fn check_expr(v: &mut CheckCrateVisitor,
sess: Session,
def_map: resolve::DefMap,
method_map: typeck::method_map,
tcx: ty::ctxt,
e: @Expr,
is_const: bool) {
if is_const {
match e.node {
ExprUnary(_, UnDeref, _) => { }
ExprUnary(_, UnBox(_), _) | ExprUnary(_, UnUniq, _) => {
sess.span_err(e.span,
"disallowed operator in constant expression");
return;
}
ExprLit(@codemap::Spanned {node: lit_str(_), _}) => { }
ExprBinary(*) | ExprUnary(*) => {
if method_map.contains_key(&e.id) {
sess.span_err(e.span, "user-defined operators are not \
allowed in constant expressions");
}
}
ExprLit(_) => (),
ExprCast(_, _) => {
let ety = ty::expr_ty(tcx, e);
if !ty::type_is_numeric(ety) && !ty::type_is_unsafe_ptr(ety) {
sess.span_err(e.span, ~"can not cast to `" +
ppaux::ty_to_str(tcx, ety) +
"` in a constant expression");
}
}
ExprPath(ref pth) => {
// NB: In the future you might wish to relax this slightly
// to handle on-demand instantiation of functions via
// foo::<bar> in a const. Currently that is only done on
// a path in trans::callee that only works in block contexts.
if !pth.segments.iter().all(|segment| segment.types.is_empty()) {
sess.span_err(
e.span, "paths in constants may only refer to \
items without type parameters");
}
match def_map.find(&e.id) {
Some(&DefStatic(*)) |
Some(&DefFn(_, _)) |
Some(&DefVariant(_, _, _)) |
Some(&DefStruct(_)) => { }
Some(&def) => {
debug!("(checking const) found bad def: %?", def);
sess.span_err(
e.span,
"paths in constants may only refer to \
constants or functions");
}
None => {
sess.span_bug(e.span, "unbound path in const?!");
}
}
}
ExprCall(callee, _, NoSugar) => {
match def_map.find(&callee.id) {
Some(&DefStruct(*)) => {} // OK.
Some(&DefVariant(*)) => {} // OK.
_ => {
sess.span_err(
e.span,
"function calls in constants are limited to \
struct and enum constructors");
}
}
}
ExprParen(e) => { check_expr(v, sess, def_map, method_map,
tcx, e, is_const); }
ExprVstore(_, ExprVstoreSlice) |
ExprVec(_, MutImmutable) |
ExprAddrOf(MutImmutable, _) |
ExprField(*) |
ExprIndex(*) |
ExprTup(*) |
ExprRepeat(*) |
ExprStruct(*) => { }
ExprAddrOf(*) => {
sess.span_err(
e.span,
"borrowed pointers in constants may only refer to \
immutable values");
}
_ => {
sess.span_err(e.span,
"constant contains unimplemented expression type");
return;
}
}
}
match e.node {
ExprLit(@codemap::Spanned {node: lit_int(v, t), _}) => {
if (v as u64) > ast_util::int_ty_max(
if t == ty_i { sess.targ_cfg.int_type } else { t }) {
sess.span_err(e.span, "literal out of range for its type");
}
}
ExprLit(@codemap::Spanned {node: lit_uint(v, t), _}) => {
if v > ast_util::uint_ty_max(
if t == ty_u { sess.targ_cfg.uint_type } else { t }) {
sess.span_err(e.span, "literal out of range for its type");
}
}
_ => ()
}
visit::walk_expr(v, e, is_const);
}
#[deriving(Clone)]
struct env {
root_it: @item,
sess: Session,
ast_map: ast_map::map,
def_map: resolve::DefMap,
idstack: @mut ~[NodeId]
}
struct CheckItemRecursionVisitor;
// Make sure a const item doesn't recursively refer to itself
// FIXME: Should use the dependency graph when it's available (#1356)
pub fn check_item_recursion(sess: Session,
ast_map: ast_map::map,
def_map: resolve::DefMap,
it: @item) {
let env = env {
root_it: it,
sess: sess,
ast_map: ast_map,
def_map: def_map,
idstack: @mut ~[]
};
let mut visitor = CheckItemRecursionVisitor;
visitor.visit_item(it, env);
}
impl Visitor<env> for CheckItemRecursionVisitor {
fn visit_item(&mut self, it: @item, env: env) {
if env.idstack.iter().any(|x| x == &(it.id)) {
env.sess.span_fatal(env.root_it.span, "recursive constant");
}
env.idstack.push(it.id);
visit::walk_item(self, it, env);
env.idstack.pop();
}
fn visit_expr(&mut self, e: @Expr, env: env) {
match e.node {
ExprPath(*) => match env.def_map.find(&e.id) {
Some(&DefStatic(def_id, _)) if ast_util::is_local(def_id) =>
match env.ast_map.get_copy(&def_id.node) {
ast_map::node_item(it, _) => {
self.visit_item(it, env);
}
_ => fail!("const not bound to an item")
},
_ => ()
},
_ => ()
}
visit::walk_expr(self, e, env);
}
}
| 33.163043 | 77 | 0.488474 |
23175127f08d9bca29e4cffe7d9fb33a6aaa3986 | 7,768 | //! # WASM Encrypt/Decrypt Functions
//!
//! The library contains the `encrypt` and `decrypt` functions that are
//! exported for use in javascript via the WebAssembly interface (wasm).
//!
//! # Example Javascript Import
//! ```js
//! // Import the encrypt and decrypt functions.
//! // Must be in a module.
//! import { encrypt, decrypt, default as init } from './crypt.js';
//! async function load_wasm() {
//! await init('./crypt_bg.wasm');
//! window.encrypt = encrypt;
//! window.decrypt = decrypt;
//! }
//! ```
//!
//! # Example Javascript Encrypt Usage
//! ```js
//! function encrypt(password, plaintext) {
//! var result = window.encrypt("crypt-aes-256-gcm", pass, plaintext);
//! return result;
//! }
//! ```
//!
//! # Example Javascript Decrypt Usage
//! ```js
//! function decrypt(password, ciphertext) {
//! var result = window.decrypt("crypt-aes-256-gcm", pass, ciphertext);
//! return result;
//! }
//! ```
extern crate wasm_bindgen;
use wasm_bindgen::prelude::*;
mod shared;
use shared::ALGORITHMS;
mod aes_256_gcm;
mod aes_256_gcm_siv;
/// Return the module name.
#[wasm_bindgen]
pub fn get_name() -> String {
"crypt".to_string()
}
/// Return the number of algorithms available.
#[wasm_bindgen]
pub fn get_num_algorithms() -> usize {
ALGORITHMS.len()
}
/// Returns the n-th algorithm, zero based index.
#[wasm_bindgen]
pub fn get_algorithm(i: usize) -> String {
if i < ALGORITHMS.len() {
return ALGORITHMS[i].to_string();
}
format!("error:algorithms:invalid-index:{}", i)
}
/// Return the header prefix.
///
/// # Arguments
/// * `algorithm`: The algorithm identifier.
///
/// # Returns
/// The header prefix.
#[wasm_bindgen]
pub fn header_prefix(algorithm: String) -> String {
shared::header_prefix(algorithm)
}
/// Return the header suffix.
///
/// # Arguments
/// * `algorithm`: The algorithm identifier.
///
/// # Returns
/// The header suffix.
#[wasm_bindgen]
pub fn header_suffix(algorithm: String) -> String {
shared::header_suffix(algorithm)
}
/// Encrypts a string coming from Javascript using the specified algorithm.
///
/// It accepts a plaintext string and converts it a MIME encoded block
/// with a prefix and suffix.
///
/// # Arguments
/// * `algorithm`: The algorithm identifier.
/// * `password`: Used to encrypt the plaintext.
/// * `plaintext`: The string to encrypt.
///
/// # Returns
/// The encrypted, mime encoded ciphertext.
#[wasm_bindgen]
pub fn encrypt(algorithm: String, password: String, plaintext: String) -> String {
if !shared::is_valid_algorithm(algorithm.to_string()) {
return format!("error:encrypt:invalid:{}", algorithm);
}
if algorithm == "crypt-aes-256-gcm" {
return aes_256_gcm::encrypt(password, plaintext);
}
if algorithm == "crypt-aes-256-gcm-siv" {
return aes_256_gcm_siv::encrypt(password, plaintext);
}
format!("error:encrypt:not-implemented:{}", algorithm)
}
/// Decrypt a string.
///
/// It accepts a ciphertext string created by the `encrypt` function
/// and converts it back to a plaintext string.
///
/// # Arguments
/// * `algorithm`: The algorithm identifier.
/// * `password`: Used to encrypt the plaintext.
/// * `ciphertext`: he encrypted, mime encoded plaintext.
///
/// # Returns
/// The unencrypted plaintext to the caller.
#[wasm_bindgen]
pub fn decrypt(algorithm: String, password: String, ciphertext: String) -> String {
if !shared::is_valid_algorithm(algorithm.to_string()) {
return format!("error:decrypt:invalid:{}", algorithm);
}
if algorithm == "crypt-aes-256-gcm" {
return aes_256_gcm::decrypt(password, ciphertext);
}
if algorithm == "crypt-aes-256-gcm-siv" {
return aes_256_gcm_siv::decrypt(password, ciphertext);
}
format!("error:decrypt:not-implemented:{}", algorithm)
}
#[cfg(test)]
mod tests {
use wasm_bindgen_test::*;
extern crate wasm_bindgen;
use crate::decrypt;
use crate::encrypt;
use crate::get_algorithm;
use crate::get_num_algorithms;
use crate::header_prefix;
#[wasm_bindgen_test]
pub fn test01() {
// Verify that bad algorithms are caught.
println!("test01: start");
let algorithm = "bad-bad-bad";
println!("test01: algorithm: {}", algorithm.to_string());
let prefix = header_prefix(algorithm.to_string());
println!("test02: prefix: {}", prefix.to_string());
assert!(prefix.starts_with("error:header:invalid-algorithm"));
let password = "secret";
let plaintext = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.";
println!("test01: encrypting");
let ciphertext = encrypt(
algorithm.to_string(),
password.to_string(),
plaintext.to_string(),
);
println!("test01: ciphertext={}", ciphertext.to_string());
assert!(ciphertext.starts_with("error:encrypt:invalid:"));
println!("test01: done");
}
#[wasm_bindgen_test]
pub fn test02() {
// Verify the algorithms interface.
println!("test02: start");
let num = get_num_algorithms();
println!("test02: num={}", num);
assert!(num > 0);
assert!(num == 2);
let al0 = get_algorithm(0);
println!("test02: al0={}", al0);
assert!(!al0.starts_with("error:"));
let aln = get_algorithm(num + 1);
println!("test02: aln={}", aln);
assert!(aln.starts_with("error:"));
println!("test02: done");
}
#[wasm_bindgen_test]
pub fn test03() {
// Verify that the aes-256-gcm encryption works.
println!("test03: start");
let algorithm = "crypt-aes-256-gcm";
println!("test03: algorithm: {}", algorithm.to_string());
let prefix = header_prefix(algorithm.to_string());
println!("test03: prefix: {}", prefix.to_string());
let password = "secret";
let plaintext = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.";
println!("test03: encrypting");
let ciphertext = encrypt(
algorithm.to_string(),
password.to_string(),
plaintext.to_string(),
);
println!("test03: decrypting");
let testtext = decrypt(
algorithm.to_string(),
password.to_string(),
ciphertext.to_string(),
);
println!("test03: '{}' ==? {}", &plaintext, &testtext);
assert_eq!(&plaintext, &testtext);
println!("test03: done");
}
#[wasm_bindgen_test]
pub fn test04() {
// Verify that the aes-256-gcm-siv encryption works.
println!("test04: start");
let algorithm = "crypt-aes-256-gcm-siv";
println!("test04: algorithm: {}", algorithm.to_string());
let prefix = header_prefix(algorithm.to_string());
println!("test04: prefix: {}", prefix.to_string());
let password = "secret";
let plaintext = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.";
println!("test04: encrypting");
let ciphertext = encrypt(
algorithm.to_string(),
password.to_string(),
plaintext.to_string(),
);
println!("test04: decrypting");
let testtext = decrypt(
algorithm.to_string(),
password.to_string(),
ciphertext.to_string(),
);
println!("test04: '{}' ==? {}", &plaintext, &testtext);
assert_eq!(&plaintext, &testtext);
println!("test04: done");
}
}
| 30.825397 | 150 | 0.619851 |
d7fca6a8c8d2cd45cb8854b84331c1f9339d1876 | 11,548 | use serde::{Deserialize, Serialize};
use serde_json::json;
use serde_json::Value as Json;
use std::convert::TryFrom;
use svm_abi_decoder::CallData;
use svm_abi_encoder::{ByteSize, Encoder};
use svm_sdk_types::value::{Composite, Primitive, Value as SdkValue};
use svm_sdk_types::{Address, Amount};
use super::get_field;
use super::parse_json;
use super::serde_types::{AddressWrapper, HexBlob};
use crate::api::json::JsonError;
/// Given an `Input Data` JSON, encodes it into a binary `Input Data`
/// and returns the result wrapped with a JSON.
///
/// ```json
/// {
/// "data": "FFC103..."
/// }
/// ```
pub fn encode_inputdata(json: &str) -> Result<Json, JsonError> {
let decoded = DecodedInputData::new(json)?;
let calldata = HexBlob(decoded.encode().unwrap());
Ok(json!({ "data": calldata }))
}
pub fn decode_raw_input(data: &[u8]) -> Result<Json, JsonError> {
let calldata = CallData::new(data);
Ok(calldata_to_json(calldata))
}
/// Given a binary `Calldata` (wrapped within a JSON), decodes it into a JSON
pub fn decode_inputdata(json: &str) -> Result<Json, JsonError> {
let json = &mut parse_json(json)?;
let encoded_data = get_field::<HexBlob<Vec<u8>>>(json, "data")?.0;
Ok(calldata_to_json(CallData::new(&encoded_data)))
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub(crate) struct DecodedInputData {
abi: Vec<TySig>,
data: Vec<Json>,
}
impl DecodedInputData {
pub fn new(json: &str) -> Result<Self, JsonError> {
let json = &mut parse_json(json)?;
let abi = get_field::<Vec<TySig>>(json, "abi")?;
let data = get_field::<Vec<Json>>(json, "data")?;
if abi.len() != data.len() {
Err(JsonError::InvalidField {
path: "data".to_string(),
})
} else {
Ok(Self { abi, data })
}
}
/// Like `Self::zip`, but in borrowed form.
fn zip_ref(&self) -> impl Iterator<Item = (&TySig, &Json)> {
self.abi.iter().zip(self.data.iter())
}
fn zip(self) -> impl Iterator<Item = (TySig, Json)> {
self.abi.into_iter().zip(self.data.into_iter())
}
fn cap(&self) -> Result<usize, JsonError> {
self.zip_ref()
.map(|(ty, raw)| ty.value_byte_size(&raw))
.sum()
}
pub fn encode(self) -> Result<Vec<u8>, JsonError> {
let cap = self.cap()?;
let mut buf = svm_sdk_std::Vec::with_capacity(cap);
self.zip()
.try_for_each(|(ty, raw)| encode_value(ty, raw).map(|value| value.encode(&mut buf)))?;
Ok(buf.as_slice().to_vec())
}
}
pub(crate) fn calldata_to_json(mut calldata: CallData) -> Json {
let mut abi = vec![];
let mut data = vec![];
while let Some(value) = calldata.next().into() {
abi.push(sdk_value_utils::ty_sig_of_sdk_value(&value));
data.push(sdk_value_utils::sdk_value_to_json(value));
}
json!({ "abi": abi, "data": data })
}
mod sdk_value_utils {
use svm_types::{Address, BytesPrimitive};
use super::*;
/// Given a [`svm_sdk_types::value::Value`], encodes its value as a
/// JSON value. This function, together with [`ty_sig_of_sdk_value`], can
/// give a **ful** overview over some values, with both its type signature
/// and its value.
pub fn sdk_value_to_json(value: SdkValue) -> Json {
match value {
SdkValue::Primitive(prim) => match prim {
Primitive::Bool(x) => json!(x),
Primitive::I8(x) => json!(x),
Primitive::U8(x) => json!(x),
Primitive::I16(x) => json!(x),
Primitive::U16(x) => json!(x),
Primitive::I32(x) => json!(x),
Primitive::U32(x) => json!(x),
Primitive::I64(x) => json!(x),
Primitive::U64(x) => json!(x),
Primitive::Amount(x) => json!(x.0),
Primitive::Address(x) => json!(AddressWrapper(Address::new(x.as_slice()))),
_ => unreachable!(),
},
SdkValue::Composite(Composite::Vec(values)) => Json::Array(
values
.into_iter()
.map(|sdk_value| sdk_value_to_json(sdk_value))
.collect(),
),
}
}
/// Given a [`svm_sdk_types::value::Value`], encodes its type signature as a
/// JSON value.
pub fn ty_sig_of_sdk_value(value: &SdkValue) -> Json {
match value {
SdkValue::Primitive(prim) => match prim {
Primitive::Bool(_) => "bool",
Primitive::I8(_) => "i8",
Primitive::U8(_) => "u8",
Primitive::I16(_) => "i16",
Primitive::U16(_) => "u16",
Primitive::I32(_) => "i32",
Primitive::U32(_) => "u32",
Primitive::I64(_) => "i64",
Primitive::U64(_) => "u64",
Primitive::Amount(_) => "amount",
Primitive::Address(_) => "address",
_ => unreachable!(),
}
.into(),
SdkValue::Composite(Composite::Vec(values)) => {
if values.is_empty() {
Json::Null
} else {
let ty = &values.last().unwrap();
Json::Array(vec![ty_sig_of_sdk_value(ty)])
}
}
}
}
pub fn sdk_value_from_json(json: Json, ty_sig: TySigPrim) -> Option<SdkValue> {
fn json_as_numeric<N>(json: Json) -> Option<SdkValue>
where
N: TryFrom<i64> + Into<SdkValue>,
{
json.as_i64()
.and_then(|n| N::try_from(n).ok())
.map(Into::into)
}
match ty_sig {
TySigPrim::Bool => json.as_bool().map(Into::into),
TySigPrim::Amount => json
.as_u64()
.map(|val| SdkValue::Primitive(Primitive::Amount(Amount(val)))),
TySigPrim::Address => serde_json::from_value::<AddressWrapper>(json)
.ok()
.map(|addr| {
let addr = svm_sdk_types::Address::from(*addr.0.as_ref());
SdkValue::Primitive(Primitive::Address(addr))
}),
TySigPrim::I8 => json_as_numeric::<i8>(json),
TySigPrim::U8 => json_as_numeric::<u8>(json),
TySigPrim::I16 => json_as_numeric::<i16>(json),
TySigPrim::U16 => json_as_numeric::<u16>(json),
TySigPrim::I32 => json_as_numeric::<i32>(json),
TySigPrim::U32 => json_as_numeric::<u32>(json),
TySigPrim::I64 => json_as_numeric::<i64>(json),
// [`u64`] is the only JSON integer type which doesn't fit into `i64`.
TySigPrim::U64 => json.as_u64().map(Into::into),
}
}
}
// See <https://serde.rs/enum-representations.html>.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(untagged)]
enum TySig {
Prim(TySigPrim),
Array(Vec<TySig>),
}
impl TySig {
fn value_byte_size(&self, value: &Json) -> Result<usize, JsonError> {
let byte_size = match self {
TySig::Array(types) => {
assert_eq!(types.len(), 1);
let ty = &types[0];
// we initialize `byte_size` for the `length` marker.
let mut byte_size = 1;
let elems = value.as_array().ok_or(JsonError::InvalidField {
path: "calldata".to_string(),
})?;
for elem in elems {
byte_size += ty.value_byte_size(elem)?;
}
byte_size
}
TySig::Prim(prim) => match prim {
TySigPrim::Bool => bool::max_byte_size(),
TySigPrim::I8 => i8::max_byte_size(),
TySigPrim::U8 => u8::max_byte_size(),
TySigPrim::I16 => i16::max_byte_size(),
TySigPrim::U16 => u16::max_byte_size(),
TySigPrim::I32 => i32::max_byte_size(),
TySigPrim::U32 => u32::max_byte_size(),
TySigPrim::I64 => i64::max_byte_size(),
TySigPrim::U64 => u64::max_byte_size(),
TySigPrim::Amount => Amount::max_byte_size(),
TySigPrim::Address => Address::max_byte_size(),
},
};
Ok(byte_size)
}
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum TySigPrim {
Bool,
I8,
U8,
I16,
U16,
I32,
U32,
I64,
U64,
Amount,
Address,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(untagged)]
enum TyPrimSdkValue {
Bool(bool),
I8(i8),
U8(u8),
I16(i16),
U16(u16),
I32(i32),
U32(u32),
I64(i64),
U64(u64),
Amount(u64),
Address(AddressWrapper),
}
fn encode_value(ty: TySig, value: Json) -> Result<SdkValue, JsonError> {
match ty {
TySig::Array(types) => encode_array(&types, value),
TySig::Prim(prim) => {
sdk_value_utils::sdk_value_from_json(value, prim).ok_or(JsonError::InvalidField {
path: "calldata".to_string(),
})
}
}
}
fn encode_array(types: &[TySig], mut value: Json) -> Result<SdkValue, JsonError> {
assert_eq!(types.len(), 1);
let ty = &types[0];
let mut value = value.take();
let elems = value.as_array_mut().ok_or(JsonError::InvalidField {
path: "calldata".to_string(),
})?;
let mut vec = svm_sdk_std::Vec::with_capacity(10);
for elem in elems.iter_mut() {
let elem = encode_value(ty.clone(), elem.take())?;
vec.push(elem);
}
let c = Composite::Vec(vec);
Ok(SdkValue::Composite(c))
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
struct CalldataEncoded {
calldata: HexBlob<Vec<u8>>,
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! test {
($abi:expr, $data:expr) => {{
let json = json!({"abi": $abi, "data": $data });
let encoded = encode_inputdata(&json.to_string()).unwrap();
let decoded = decode_inputdata(&encoded.to_string()).unwrap();
assert_eq!(decoded, json);
}}
}
#[test]
fn encode_calldata_bool() {
test!(["bool", "bool"], [true, false]);
}
#[test]
fn encode_calldata_i8_u8() {
test!(["i8", "u8"], [std::i8::MIN as isize, std::u8::MAX as isize]);
}
#[test]
fn encode_calldata_i16_u16() {
test!(
["i16", "u16"],
[std::i16::MIN as isize, std::u16::MAX as isize]
);
}
#[test]
fn encode_calldata_i32_u32() {
test!(
["i32", "u32"],
[std::i32::MIN as isize, std::u32::MAX as isize]
);
}
#[test]
fn encode_calldata_i64_u64() {
test!(["i64"], [std::i64::MIN as isize]);
test!(["u64"], [std::u64::MAX as usize]);
}
#[test]
fn encode_calldata_amount() {
test!(["amount", "amount"], [10 as u64, 20 as u64]);
}
#[test]
fn encode_calldata_address() {
let addr = "1020304050607080900010203040506070809000";
test!(["address"], [addr]);
}
#[test]
fn encode_calldata_array() {
test!([["u32"]], [[10, 20, 30]]);
test!([["i8"]], [[-10, 0, 30]]);
test!([["u32"], ["i8"]], [[10, 20, 30], [-10, 0, 20]]);
}
}
| 29.839793 | 98 | 0.52797 |
1128daf9c90ff2f8b8eea755d1bce3bb9574aa4f | 156 | // cargo-deps: reqwest
extern crate reqwest;
fn main() {
println!("{}", reqwest::get("https://www.rust-lang.org/en-US/").unwrap().text().unwrap());
}
| 19.5 | 94 | 0.615385 |
1c1b338236c71fa9e816ad7713737b94c003abef | 90 | use std::fs::File;
use std::io;
pub fn sync_files(source: ,target:) {
} | 11.25 | 37 | 0.522222 |
e8bdacfcd0d54c9efd6f0200d40aae891cfaba17 | 57,299 | use crate::internal_prelude::*;
use core::{
cmp::Ordering::{self, Equal, Greater, Less},
ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign},
time::Duration as StdDuration,
};
use core::convert::TryFrom;
use core::convert::TryInto;
/// A span of time with nanosecond precision.
///
/// Each `Duration` is composed of a whole number of seconds and a fractional
/// part represented in nanoseconds.
///
/// `Duration` implements many traits, including [`Add`], [`Sub`], [`Mul`], and
/// [`Div`], among others.
///
/// This implementation allows for negative durations, unlike
/// [`core::time::Duration`].
#[cfg_attr(serde, derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
serde,
serde(from = "crate::serde::Duration", into = "crate::serde::Duration")
)]
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
pub struct Duration {
/// Number of whole seconds.
pub(crate) seconds: i64,
/// Number of nanoseconds within the second. The sign always matches the
/// `seconds` field.
pub(crate) nanoseconds: i32, // always -10^9 < nanoseconds < 10^9
}
/// The number of seconds in one minute.
const SECONDS_PER_MINUTE: i64 = 60;
/// The number of seconds in one hour.
const SECONDS_PER_HOUR: i64 = 60 * SECONDS_PER_MINUTE;
/// The number of seconds in one day.
const SECONDS_PER_DAY: i64 = 24 * SECONDS_PER_HOUR;
/// The number of seconds in one week.
const SECONDS_PER_WEEK: i64 = 7 * SECONDS_PER_DAY;
impl Duration {
/// Equivalent to `0.seconds()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::zero(), 0.seconds());
/// ```
#[inline(always)]
pub const fn zero() -> Self {
Self::seconds(0)
}
/// Equivalent to `1.nanoseconds()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::nanosecond(), 1.nanoseconds());
/// ```
#[inline(always)]
pub const fn nanosecond() -> Self {
Self::nanoseconds(1)
}
/// Equivalent to `1.microseconds()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::microsecond(), 1.microseconds());
/// ```
#[inline(always)]
pub const fn microsecond() -> Self {
Self::microseconds(1)
}
/// Equivalent to `1.milliseconds()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::millisecond(), 1.milliseconds());
/// ```
#[inline(always)]
pub const fn millisecond() -> Self {
Self::milliseconds(1)
}
/// Equivalent to `1.seconds()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::second(), 1.seconds());
/// ```
#[inline(always)]
pub const fn second() -> Self {
Self::seconds(1)
}
/// Equivalent to `1.minutes()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::minute(), 1.minutes());
/// ```
#[inline(always)]
pub const fn minute() -> Self {
Self::minutes(1)
}
/// Equivalent to `1.hours()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::hour(), 1.hours());
/// ```
#[inline(always)]
pub const fn hour() -> Self {
Self::hours(1)
}
/// Equivalent to `1.days()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::day(), 1.days());
/// ```
#[inline(always)]
pub const fn day() -> Self {
Self::days(1)
}
/// Equivalent to `1.weeks()`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::week(), 1.weeks());
/// ```
#[inline(always)]
pub const fn week() -> Self {
Self::weeks(1)
}
/// The maximum possible duration. Adding any positive duration to this will
/// cause an overflow.
///
/// The value returned by this method may change at any time.
#[inline(always)]
pub const fn max_value() -> Self {
Self::new(i64::max_value(), 999_999_999)
}
/// The minimum possible duration. Adding any negative duration to this will
/// cause an overflow.
///
/// The value returned by this method may change at any time.
#[inline(always)]
pub const fn min_value() -> Self {
Self::new(i64::min_value(), -999_999_999)
}
/// Check if a duration is exactly zero.
///
/// ```rust
/// # use time::prelude::*;
/// assert!(0.seconds().is_zero());
/// assert!(!1.nanoseconds().is_zero());
/// ```
#[inline(always)]
pub const fn is_zero(self) -> bool {
(self.seconds == 0) & (self.nanoseconds == 0)
}
/// Check if a duration is negative.
///
/// ```rust
/// # use time::prelude::*;
/// assert!((-1).seconds().is_negative());
/// assert!(!0.seconds().is_negative());
/// assert!(!1.seconds().is_negative());
/// ```
#[inline(always)]
pub const fn is_negative(self) -> bool {
(self.seconds < 0) | (self.nanoseconds < 0)
}
/// Check if a duration is positive.
///
/// ```rust
/// # use time::{prelude::*};
/// assert!(1.seconds().is_positive());
/// assert!(!0.seconds().is_positive());
/// assert!(!(-1).seconds().is_positive());
/// ```
#[inline(always)]
pub const fn is_positive(self) -> bool {
(self.seconds > 0) | (self.nanoseconds > 0)
}
/// Get the sign of the duration.
///
/// ```rust
/// # use time::{Sign, prelude::*};
/// assert_eq!(1.seconds().sign(), Sign::Positive);
/// assert_eq!((-1).seconds().sign(), Sign::Negative);
/// assert_eq!(0.seconds().sign(), Sign::Zero);
/// ```
#[deprecated(
since = "0.2.7",
note = "To obtain the sign of a `Duration`, you should use the `is_positive`, \
`is_negative`, and `is_zero` methods."
)]
#[allow(deprecated)]
#[inline(always)]
pub fn sign(self) -> crate::Sign {
use crate::Sign::*;
if self.nanoseconds > 0 {
Positive
} else if self.nanoseconds < 0 {
Negative
} else if self.seconds > 0 {
Positive
} else if self.seconds < 0 {
Negative
} else {
Zero
}
}
/// Get the absolute value of the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.seconds().abs(), 1.seconds());
/// assert_eq!(0.seconds().abs(), 0.seconds());
/// assert_eq!((-1).seconds().abs(), 1.seconds());
/// ```
///
/// This function is `const fn` when using rustc >= 1.39.0.
#[inline(always)]
#[cfg(const_num_abs)]
pub const fn abs(self) -> Self {
Self {
seconds: self.seconds.abs(),
nanoseconds: self.nanoseconds.abs(),
}
}
/// Get the absolute value of the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.seconds().abs(), 1.seconds());
/// assert_eq!(0.seconds().abs(), 0.seconds());
/// assert_eq!((-1).seconds().abs(), 1.seconds());
/// ```
///
/// This function is `const fn` when using rustc >= 1.39.0.
#[inline(always)]
#[cfg(not(const_num_abs))]
pub fn abs(self) -> Self {
Self {
seconds: self.seconds.abs(),
nanoseconds: self.nanoseconds.abs(),
}
}
/// Convert the existing `Duration` to a `std::time::Duration` and its sign.
// This doesn't actually require the standard library, but is currently only
// used when it's enabled.
#[inline(always)]
#[cfg(std)]
pub(crate) fn abs_std(self) -> StdDuration {
StdDuration::new(self.seconds.abs() as u64, self.nanoseconds.abs() as u32)
}
/// Create a new `Duration` with the provided seconds and nanoseconds. If
/// nanoseconds is at least 10<sup>9</sup>, it will wrap to the number of
/// seconds.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::new(1, 0), 1.seconds());
/// assert_eq!(Duration::new(-1, 0), (-1).seconds());
/// assert_eq!(Duration::new(1, 2_000_000_000), 3.seconds());
/// ```
#[inline(always)]
pub const fn new(seconds: i64, nanoseconds: i32) -> Self {
Self {
seconds: seconds + nanoseconds as i64 / 1_000_000_000,
nanoseconds: nanoseconds % 1_000_000_000,
}
}
/// Create a new `Duration` with the given number of weeks. Equivalent to
/// `Duration::seconds(weeks * 604_800)`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::weeks(1), 604_800.seconds());
/// ```
#[inline(always)]
pub const fn weeks(weeks: i64) -> Self {
Self::seconds(weeks * SECONDS_PER_WEEK)
}
/// Get the number of whole weeks in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.weeks().whole_weeks(), 1);
/// assert_eq!((-1).weeks().whole_weeks(), -1);
/// assert_eq!(6.days().whole_weeks(), 0);
/// assert_eq!((-6).days().whole_weeks(), 0);
/// ```
#[inline(always)]
pub const fn whole_weeks(self) -> i64 {
self.whole_seconds() / SECONDS_PER_WEEK
}
/// Create a new `Duration` with the given number of days. Equivalent to
/// `Duration::seconds(days * 86_400)`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::days(1), 86_400.seconds());
/// ```
#[inline(always)]
pub const fn days(days: i64) -> Self {
Self::seconds(days * SECONDS_PER_DAY)
}
/// Get the number of whole days in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.days().whole_days(), 1);
/// assert_eq!((-1).days().whole_days(), -1);
/// assert_eq!(23.hours().whole_days(), 0);
/// assert_eq!((-23).hours().whole_days(), 0);
/// ```
#[inline(always)]
pub const fn whole_days(self) -> i64 {
self.whole_seconds() / SECONDS_PER_DAY
}
/// Create a new `Duration` with the given number of hours. Equivalent to
/// `Duration::seconds(hours * 3_600)`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::hours(1), 3_600.seconds());
/// ```
#[inline(always)]
pub const fn hours(hours: i64) -> Self {
Self::seconds(hours * SECONDS_PER_HOUR)
}
/// Get the number of whole hours in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.hours().whole_hours(), 1);
/// assert_eq!((-1).hours().whole_hours(), -1);
/// assert_eq!(59.minutes().whole_hours(), 0);
/// assert_eq!((-59).minutes().whole_hours(), 0);
/// ```
#[inline(always)]
pub const fn whole_hours(self) -> i64 {
self.whole_seconds() / SECONDS_PER_HOUR
}
/// Create a new `Duration` with the given number of minutes. Equivalent to
/// `Duration::seconds(minutes * 60)`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::minutes(1), 60.seconds());
/// ```
#[inline(always)]
pub const fn minutes(minutes: i64) -> Self {
Self::seconds(minutes * SECONDS_PER_MINUTE)
}
/// Get the number of whole minutes in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.minutes().whole_minutes(), 1);
/// assert_eq!((-1).minutes().whole_minutes(), -1);
/// assert_eq!(59.seconds().whole_minutes(), 0);
/// assert_eq!((-59).seconds().whole_minutes(), 0);
/// ```
#[inline(always)]
pub const fn whole_minutes(self) -> i64 {
self.whole_seconds() / SECONDS_PER_MINUTE
}
/// Create a new `Duration` with the given number of seconds.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::seconds(1), 1_000.milliseconds());
/// ```
#[inline(always)]
pub const fn seconds(seconds: i64) -> Self {
Self {
seconds,
nanoseconds: 0,
}
}
/// Get the number of whole seconds in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.seconds().whole_seconds(), 1);
/// assert_eq!((-1).seconds().whole_seconds(), -1);
/// assert_eq!(1.minutes().whole_seconds(), 60);
/// assert_eq!((-1).minutes().whole_seconds(), -60);
/// ```
#[inline(always)]
pub const fn whole_seconds(self) -> i64 {
self.seconds
}
/// Creates a new `Duration` from the specified number of seconds
/// represented as `f64`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::seconds_f64(0.5), 0.5.seconds());
/// assert_eq!(Duration::seconds_f64(-0.5), -0.5.seconds());
/// ```
#[inline(always)]
pub fn seconds_f64(seconds: f64) -> Self {
Self {
seconds: seconds as i64,
nanoseconds: ((seconds % 1.) * 1_000_000_000.) as i32,
}
}
/// Get the number of fractional seconds in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.5.seconds().as_seconds_f64(), 1.5);
/// assert_eq!((-1.5).seconds().as_seconds_f64(), -1.5);
/// ```
#[inline(always)]
pub fn as_seconds_f64(self) -> f64 {
self.seconds as f64 + self.nanoseconds as f64 / 1_000_000_000.
}
/// Creates a new `Duration` from the specified number of seconds
/// represented as `f32`.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::seconds_f32(0.5), 0.5.seconds());
/// assert_eq!(Duration::seconds_f32(-0.5), (-0.5).seconds());
/// ```
#[inline(always)]
pub fn seconds_f32(seconds: f32) -> Self {
Self {
seconds: seconds as i64,
nanoseconds: ((seconds % 1.) * 1_000_000_000.) as i32,
}
}
/// Get the number of fractional seconds in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.5.seconds().as_seconds_f32(), 1.5);
/// assert_eq!((-1.5).seconds().as_seconds_f32(), -1.5);
/// ```
#[inline(always)]
pub fn as_seconds_f32(self) -> f32 {
self.seconds as f32 + self.nanoseconds as f32 / 1_000_000_000.
}
/// Create a new `Duration` with the given number of milliseconds.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::milliseconds(1), 1_000.microseconds());
/// assert_eq!(Duration::milliseconds(-1), (-1_000).microseconds());
/// ```
#[inline(always)]
pub const fn milliseconds(milliseconds: i64) -> Self {
Self {
seconds: milliseconds / 1_000,
nanoseconds: ((milliseconds % 1_000) * 1_000_000) as i32,
}
}
/// Get the number of whole milliseconds in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.seconds().whole_milliseconds(), 1_000);
/// assert_eq!((-1).seconds().whole_milliseconds(), -1_000);
/// assert_eq!(1.milliseconds().whole_milliseconds(), 1);
/// assert_eq!((-1).milliseconds().whole_milliseconds(), -1);
/// ```
#[inline(always)]
pub const fn whole_milliseconds(self) -> i128 {
self.seconds as i128 * 1_000 + self.nanoseconds as i128 / 1_000_000
}
/// Get the number of milliseconds past the number of whole seconds.
///
/// Always in the range `-1_000..1_000`.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.4.seconds().subsec_milliseconds(), 400);
/// assert_eq!((-1.4).seconds().subsec_milliseconds(), -400);
/// ```
// Allow the lint, as the value is guaranteed to be less than 1000.
#[inline(always)]
pub const fn subsec_milliseconds(self) -> i16 {
(self.nanoseconds / 1_000_000) as i16
}
/// Create a new `Duration` with the given number of microseconds.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::microseconds(1), 1_000.nanoseconds());
/// assert_eq!(Duration::microseconds(-1), (-1_000).nanoseconds());
/// ```
#[inline(always)]
pub const fn microseconds(microseconds: i64) -> Self {
Self {
seconds: microseconds / 1_000_000,
nanoseconds: ((microseconds % 1_000_000) * 1_000) as i32,
}
}
/// Get the number of whole microseconds in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.milliseconds().whole_microseconds(), 1_000);
/// assert_eq!((-1).milliseconds().whole_microseconds(), -1_000);
/// assert_eq!(1.microseconds().whole_microseconds(), 1);
/// assert_eq!((-1).microseconds().whole_microseconds(), -1);
/// ```
#[inline(always)]
pub const fn whole_microseconds(self) -> i128 {
self.seconds as i128 * 1_000_000 + self.nanoseconds as i128 / 1_000
}
/// Get the number of microseconds past the number of whole seconds.
///
/// Always in the range `-1_000_000..1_000_000`.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.0004.seconds().subsec_microseconds(), 400);
/// assert_eq!((-1.0004).seconds().subsec_microseconds(), -400);
/// ```
#[inline(always)]
pub const fn subsec_microseconds(self) -> i32 {
self.nanoseconds / 1_000
}
/// Create a new `Duration` with the given number of nanoseconds.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(Duration::nanoseconds(1), 1.microseconds() / 1_000);
/// assert_eq!(Duration::nanoseconds(-1), (-1).microseconds() / 1_000);
/// ```
#[inline(always)]
pub const fn nanoseconds(nanoseconds: i64) -> Self {
Self {
seconds: nanoseconds / 1_000_000_000,
nanoseconds: (nanoseconds % 1_000_000_000) as i32,
}
}
/// Create a new `Duration` with the given number of nanoseconds.
///
/// As the input range cannot be fully mapped to the output, this should
/// only be used where it's known to result in a valid value.
#[inline(always)]
pub(crate) const fn nanoseconds_i128(nanoseconds: i128) -> Self {
Self {
seconds: (nanoseconds / 1_000_000_000) as i64,
nanoseconds: (nanoseconds % 1_000_000_000) as i32,
}
}
/// Get the number of nanoseconds in the duration.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.microseconds().whole_nanoseconds(), 1_000);
/// assert_eq!((-1).microseconds().whole_nanoseconds(), -1_000);
/// assert_eq!(1.nanoseconds().whole_nanoseconds(), 1);
/// assert_eq!((-1).nanoseconds().whole_nanoseconds(), -1);
/// ```
#[inline(always)]
pub const fn whole_nanoseconds(self) -> i128 {
self.seconds as i128 * 1_000_000_000 + self.nanoseconds as i128
}
/// Get the number of nanoseconds past the number of whole seconds.
///
/// The returned value will always be in the range
/// `-1_000_000_000..1_000_000_000`.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(1.000_000_400.seconds().subsec_nanoseconds(), 400);
/// assert_eq!((-1.000_000_400).seconds().subsec_nanoseconds(), -400);
/// ```
#[inline(always)]
pub const fn subsec_nanoseconds(self) -> i32 {
self.nanoseconds
}
/// Computes `self + rhs`, returning `None` if an overflow occurred.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(5.seconds().checked_add(5.seconds()), Some(10.seconds()));
/// assert_eq!(Duration::max_value().checked_add(1.nanoseconds()), None);
/// assert_eq!((-5).seconds().checked_add(5.seconds()), Some(0.seconds()));
/// ```
#[inline]
pub fn checked_add(self, rhs: Self) -> Option<Self> {
let mut seconds = self.seconds.checked_add(rhs.seconds)?;
let mut nanoseconds = self.nanoseconds + rhs.nanoseconds;
if nanoseconds >= 1_000_000_000 || seconds < 0 && nanoseconds > 0 {
nanoseconds -= 1_000_000_000;
seconds = seconds.checked_add(1)?;
} else if nanoseconds <= -1_000_000_000 || seconds > 0 && nanoseconds < 0 {
nanoseconds += 1_000_000_000;
seconds = seconds.checked_sub(1)?;
}
// Ensure that the signs match _unless_ one of them is zero.
debug_assert_ne!(seconds.signum() * nanoseconds.signum() as i64, -1);
debug_assert!((-999_999_999..1_000_000_000).contains(&nanoseconds));
Some(Self {
seconds,
nanoseconds,
})
}
/// Computes `self - rhs`, returning `None` if an overflow occurred.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(5.seconds().checked_sub(5.seconds()), Some(Duration::zero()));
/// assert_eq!(Duration::min_value().checked_sub(1.nanoseconds()), None);
/// assert_eq!(5.seconds().checked_sub(10.seconds()), Some((-5).seconds()));
/// ```
#[inline(always)]
pub fn checked_sub(self, rhs: Self) -> Option<Self> {
self.checked_add(-rhs)
}
/// Computes `self * rhs`, returning `None` if an overflow occurred.
///
/// ```rust
/// # use time::{Duration, prelude::*};
/// assert_eq!(5.seconds().checked_mul(2), Some(10.seconds()));
/// assert_eq!(5.seconds().checked_mul(-2), Some((-10).seconds()));
/// assert_eq!(5.seconds().checked_mul(0), Some(0.seconds()));
/// assert_eq!(Duration::max_value().checked_mul(2), None);
/// assert_eq!(Duration::min_value().checked_mul(2), None);
/// ```
#[inline(always)]
pub fn checked_mul(self, rhs: i32) -> Option<Self> {
// Multiply nanoseconds as i64, because it cannot overflow that way.
let total_nanos = self.nanoseconds as i64 * rhs as i64;
let extra_secs = total_nanos / 1_000_000_000;
let nanoseconds = (total_nanos % 1_000_000_000) as i32;
let seconds = self
.seconds
.checked_mul(rhs as i64)?
.checked_add(extra_secs)?;
Some(Self {
seconds,
nanoseconds,
})
}
/// Computes `self / rhs`, returning `None` if `rhs == 0`.
///
/// ```rust
/// # use time::prelude::*;
/// assert_eq!(10.seconds().checked_div(2), Some(5.seconds()));
/// assert_eq!(10.seconds().checked_div(-2), Some((-5).seconds()));
/// assert_eq!(1.seconds().checked_div(0), None);
#[inline(always)]
pub fn checked_div(self, rhs: i32) -> Option<Self> {
if rhs == 0 {
return None;
}
let seconds = self.seconds / (rhs as i64);
let carry = self.seconds - seconds * (rhs as i64);
let extra_nanos = carry * 1_000_000_000 / (rhs as i64);
let nanoseconds = self.nanoseconds / rhs + (extra_nanos as i32);
Some(Self {
seconds,
nanoseconds,
})
}
/// Runs a closure, returning the duration of time it took to run. The
/// return value of the closure is provided in the second part of the tuple.
#[inline(always)]
#[cfg(std)]
#[cfg_attr(docs, doc(cfg(feature = "std")))]
pub fn time_fn<T>(f: impl FnOnce() -> T) -> (Self, T) {
let start = Instant::now();
let return_value = f();
let end = Instant::now();
(end - start, return_value)
}
}
/// Functions that have been renamed or had signatures changed since v0.1. As
/// such, they are deprecated.
#[cfg(v01_deprecated_api)]
#[cfg_attr(tarpaulin, skip)]
#[allow(clippy::missing_docs_in_private_items, clippy::missing_const_for_fn)]
impl Duration {
#[inline(always)]
#[deprecated(since = "0.2.0", note = "Use the `whole_weeks` function")]
pub fn num_weeks(&self) -> i64 {
self.whole_weeks()
}
#[inline(always)]
#[deprecated(since = "0.2.0", note = "Use the `whole_days` function")]
pub fn num_days(&self) -> i64 {
self.whole_days()
}
#[inline(always)]
#[deprecated(since = "0.2.0", note = "Use the `whole_hours` function")]
pub fn num_hours(&self) -> i64 {
self.whole_hours()
}
#[inline(always)]
#[deprecated(since = "0.2.0", note = "Use the `whole_minutes` function")]
pub fn num_minutes(&self) -> i64 {
self.whole_minutes()
}
#[allow(clippy::missing_const_for_fn)]
#[inline(always)]
#[deprecated(since = "0.2.0", note = "Use the `whole_seconds` function")]
pub fn num_seconds(&self) -> i64 {
self.whole_seconds()
}
/// [`Duration::whole_milliseconds`] returns an `i128`, rather than
/// panicking on overflow. To avoid panicking, this method currently limits
/// the value to the range `i64::min_value()..=i64::max_value()`.
#[inline]
#[deprecated(
since = "0.2.0",
note = "Use the `whole_milliseconds` function. The value is clamped between \
`i64::min_value()` and `i64::max_value()`."
)]
pub fn num_milliseconds(&self) -> i64 {
let millis = self.whole_milliseconds();
if millis > i64::max_value() as i128 {
return i64::max_value();
}
if millis < i64::min_value() as i128 {
return i64::min_value();
}
millis as i64
}
/// [`Duration::whole_microseconds`] returns an `i128` rather than returning
/// `None` on `i64` overflow.
#[inline(always)]
#[deprecated(since = "0.2.0", note = "Use the `whole_microseconds` function")]
pub fn num_microseconds(&self) -> Option<i64> {
let micros = self.whole_microseconds();
if micros.abs() > i64::max_value() as i128 {
None
} else {
Some(micros as i64)
}
}
/// [`Duration::whole_nanoseconds`] returns an `i128` rather than returning
/// `None` on `i64` overflow.
#[inline(always)]
#[deprecated(since = "0.2.0", note = "Use the `whole_nanoseconds` function")]
pub fn num_nanoseconds(&self) -> Option<i64> {
let nanos = self.whole_nanoseconds();
if nanos.abs() > i64::max_value() as i128 {
None
} else {
Some(nanos as i64)
}
}
#[inline(always)]
#[cfg(std)]
#[deprecated(since = "0.2.0", note = "Use the `time_fn` function")]
pub fn span<F: FnOnce()>(f: F) -> Self {
Self::time_fn(f).0
}
#[inline(always)]
#[allow(deprecated)]
#[deprecated(
since = "0.2.0",
note = "Use `Duration::try_from(value)` or `value.try_into()`"
)]
pub fn from_std(std: StdDuration) -> Result<Self, ConversionRangeError> {
std.try_into()
}
#[inline(always)]
#[allow(deprecated)]
#[deprecated(
since = "0.2.0",
note = "Use `std::time::Duration::try_from(value)` or `value.try_into()`"
)]
pub fn to_std(&self) -> Result<StdDuration, ConversionRangeError> {
(*self).try_into()
}
}
impl TryFrom<StdDuration> for Duration {
type Error = ConversionRangeError;
#[inline(always)]
fn try_from(original: StdDuration) -> Result<Self, ConversionRangeError> {
Ok(Self::new(
original
.as_secs()
.try_into()
.map_err(|_| ConversionRangeError::new())?,
original
.subsec_nanos()
.try_into()
.map_err(|_| ConversionRangeError::new())?,
))
}
}
impl TryFrom<Duration> for StdDuration {
type Error = ConversionRangeError;
#[inline(always)]
fn try_from(duration: Duration) -> Result<Self, ConversionRangeError> {
Ok(Self::new(
duration
.seconds
.try_into()
.map_err(|_| ConversionRangeError::new())?,
duration
.nanoseconds
.try_into()
.map_err(|_| ConversionRangeError::new())?,
))
}
}
impl Add for Duration {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self::Output {
self.checked_add(rhs)
.expect("overflow when adding durations")
}
}
impl Add<StdDuration> for Duration {
type Output = Self;
#[inline(always)]
fn add(self, std_duration: StdDuration) -> Self::Output {
self + Self::try_from(std_duration)
.expect("overflow converting `std::time::Duration` to `time::Duration`")
}
}
impl Add<Duration> for StdDuration {
type Output = Duration;
#[inline(always)]
fn add(self, rhs: Duration) -> Self::Output {
rhs + self
}
}
impl AddAssign for Duration {
#[inline(always)]
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs;
}
}
impl AddAssign<StdDuration> for Duration {
#[inline(always)]
fn add_assign(&mut self, rhs: StdDuration) {
*self = *self + rhs;
}
}
impl Neg for Duration {
type Output = Self;
#[inline(always)]
fn neg(self) -> Self::Output {
-1 * self
}
}
impl Sub for Duration {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self::Output {
self.checked_sub(rhs)
.expect("overflow when subtracting durations")
}
}
impl Sub<StdDuration> for Duration {
type Output = Self;
#[inline(always)]
fn sub(self, rhs: StdDuration) -> Self::Output {
self - Self::try_from(rhs)
.expect("overflow converting `std::time::Duration` to `time::Duration`")
}
}
impl Sub<Duration> for StdDuration {
type Output = Duration;
#[inline(always)]
fn sub(self, rhs: Duration) -> Self::Output {
Duration::try_from(self)
.expect("overflow converting `std::time::Duration` to `time::Duration`")
- rhs
}
}
impl SubAssign for Duration {
#[inline(always)]
fn sub_assign(&mut self, rhs: Self) {
*self = *self - rhs;
}
}
impl SubAssign<StdDuration> for Duration {
#[inline(always)]
fn sub_assign(&mut self, rhs: StdDuration) {
*self = *self - rhs;
}
}
impl SubAssign<Duration> for StdDuration {
#[inline(always)]
fn sub_assign(&mut self, rhs: Duration) {
*self = (*self - rhs).try_into().expect(
"Cannot represent a resulting duration in std. Try `let x = x - rhs;`, which will \
change the type.",
);
}
}
macro_rules! duration_mul_div_int {
($($type:ty),+) => {
$(
impl Mul<$type> for Duration {
type Output = Self;
#[inline(always)]
fn mul(self, rhs: $type) -> Self::Output {
Self::nanoseconds_i128(
self.whole_nanoseconds()
.checked_mul(rhs as i128)
.expect("overflow when multiplying duration")
)
}
}
impl MulAssign<$type> for Duration {
#[inline(always)]
fn mul_assign(&mut self, rhs: $type) {
*self = *self * rhs;
}
}
impl Mul<Duration> for $type {
type Output = Duration;
#[inline(always)]
fn mul(self, rhs: Duration) -> Self::Output {
rhs * self
}
}
impl Div<$type> for Duration {
type Output = Self;
#[inline(always)]
fn div(self, rhs: $type) -> Self::Output {
Self::nanoseconds_i128(self.whole_nanoseconds() / rhs as i128)
}
}
impl DivAssign<$type> for Duration {
#[inline(always)]
fn div_assign(&mut self, rhs: $type) {
*self = *self / rhs;
}
}
)+
};
}
duration_mul_div_int![i8, i16, i32, u8, u16, u32];
impl Mul<f32> for Duration {
type Output = Self;
#[inline(always)]
fn mul(self, rhs: f32) -> Self::Output {
Self::seconds_f32(self.as_seconds_f32() * rhs)
}
}
impl MulAssign<f32> for Duration {
#[inline(always)]
fn mul_assign(&mut self, rhs: f32) {
*self = *self * rhs;
}
}
impl Mul<Duration> for f32 {
type Output = Duration;
#[inline(always)]
fn mul(self, rhs: Duration) -> Self::Output {
rhs * self
}
}
impl Mul<f64> for Duration {
type Output = Self;
#[inline(always)]
fn mul(self, rhs: f64) -> Self::Output {
Self::seconds_f64(self.as_seconds_f64() * rhs)
}
}
impl MulAssign<f64> for Duration {
#[inline(always)]
fn mul_assign(&mut self, rhs: f64) {
*self = *self * rhs;
}
}
impl Mul<Duration> for f64 {
type Output = Duration;
#[inline(always)]
fn mul(self, rhs: Duration) -> Self::Output {
rhs * self
}
}
impl Div<f32> for Duration {
type Output = Self;
#[inline(always)]
fn div(self, rhs: f32) -> Self::Output {
Self::seconds_f32(self.as_seconds_f32() / rhs)
}
}
impl DivAssign<f32> for Duration {
#[inline(always)]
fn div_assign(&mut self, rhs: f32) {
*self = *self / rhs;
}
}
impl Div<f64> for Duration {
type Output = Self;
#[inline(always)]
fn div(self, rhs: f64) -> Self::Output {
Self::seconds_f64(self.as_seconds_f64() / rhs)
}
}
impl DivAssign<f64> for Duration {
#[inline(always)]
fn div_assign(&mut self, rhs: f64) {
*self = *self / rhs;
}
}
impl Div<Duration> for Duration {
type Output = f64;
#[inline(always)]
fn div(self, rhs: Self) -> Self::Output {
self.as_seconds_f64() / rhs.as_seconds_f64()
}
}
impl Div<StdDuration> for Duration {
type Output = f64;
#[inline(always)]
fn div(self, rhs: StdDuration) -> Self::Output {
self.as_seconds_f64() / rhs.as_secs_f64()
}
}
impl Div<Duration> for StdDuration {
type Output = f64;
#[inline(always)]
fn div(self, rhs: Duration) -> Self::Output {
self.as_secs_f64() / rhs.as_seconds_f64()
}
}
impl PartialEq<StdDuration> for Duration {
#[inline(always)]
fn eq(&self, rhs: &StdDuration) -> bool {
Ok(*self) == Self::try_from(*rhs)
}
}
impl PartialEq<Duration> for StdDuration {
#[inline(always)]
fn eq(&self, rhs: &Duration) -> bool {
rhs == self
}
}
impl PartialOrd for Duration {
#[inline(always)]
fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
Some(self.cmp(rhs))
}
}
impl PartialOrd<StdDuration> for Duration {
#[inline(always)]
fn partial_cmp(&self, rhs: &StdDuration) -> Option<Ordering> {
if rhs.as_secs() > i64::max_value() as u64 {
return Some(Greater);
}
match self.seconds.partial_cmp(&(rhs.as_secs() as i64)) {
Some(Less) => Some(Less),
Some(Equal) => self.nanoseconds.partial_cmp(&(rhs.subsec_nanos() as i32)),
Some(Greater) => Some(Greater),
None => None,
}
}
}
impl PartialOrd<Duration> for StdDuration {
#[inline(always)]
fn partial_cmp(&self, rhs: &Duration) -> Option<Ordering> {
match rhs.partial_cmp(self) {
Some(Less) => Some(Greater),
Some(Equal) => Some(Equal),
Some(Greater) => Some(Less),
None => None,
}
}
}
impl Ord for Duration {
#[inline]
fn cmp(&self, rhs: &Self) -> Ordering {
match self.seconds.cmp(&rhs.seconds) {
Less => Less,
Equal => self.nanoseconds.cmp(&rhs.nanoseconds),
Greater => Greater,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn unit_values() {
assert_eq!(Duration::zero(), 0.seconds());
assert_eq!(Duration::nanosecond(), 1.nanoseconds());
assert_eq!(Duration::microsecond(), 1.microseconds());
assert_eq!(Duration::millisecond(), 1.milliseconds());
assert_eq!(Duration::second(), 1.seconds());
assert_eq!(Duration::minute(), 60.seconds());
assert_eq!(Duration::hour(), 3_600.seconds());
assert_eq!(Duration::day(), 86_400.seconds());
assert_eq!(Duration::week(), 604_800.seconds());
}
#[test]
fn is_zero() {
assert!(!(-1).nanoseconds().is_zero());
assert!(0.seconds().is_zero());
assert!(!1.nanoseconds().is_zero());
}
#[test]
fn is_negative() {
assert!((-1).seconds().is_negative());
assert!(!0.seconds().is_negative());
assert!(!1.seconds().is_negative());
}
#[test]
fn is_positive() {
assert!(!(-1).seconds().is_positive());
assert!(!0.seconds().is_positive());
assert!(1.seconds().is_positive());
}
#[allow(deprecated)]
#[test]
fn sign() {
use crate::Sign::*;
assert_eq!(1.seconds().sign(), Positive);
assert_eq!((-1).seconds().sign(), Negative);
assert_eq!(0.seconds().sign(), Zero);
}
#[test]
fn abs() {
assert_eq!(1.seconds().abs(), 1.seconds());
assert_eq!(0.seconds().abs(), 0.seconds());
assert_eq!((-1).seconds().abs(), 1.seconds());
}
#[test]
fn new() {
assert_eq!(Duration::new(1, 0), 1.seconds());
assert_eq!(Duration::new(-1, 0), (-1).seconds());
assert_eq!(Duration::new(1, 2_000_000_000), 3.seconds());
assert!(Duration::new(0, 0).is_zero());
assert!(Duration::new(0, 1_000_000_000).is_positive());
assert!(Duration::new(-1, 1_000_000_000).is_zero());
assert!(Duration::new(-2, 1_000_000_000).is_negative());
}
#[test]
fn weeks() {
assert_eq!(Duration::weeks(1), 604_800.seconds());
assert_eq!(Duration::weeks(2), (2 * 604_800).seconds());
assert_eq!(Duration::weeks(-1), (-604_800).seconds());
assert_eq!(Duration::weeks(-2), (2 * -604_800).seconds());
}
#[test]
fn whole_weeks() {
assert_eq!(Duration::weeks(1).whole_weeks(), 1);
assert_eq!(Duration::weeks(-1).whole_weeks(), -1);
assert_eq!(Duration::days(6).whole_weeks(), 0);
assert_eq!(Duration::days(-6).whole_weeks(), 0);
}
#[test]
fn days() {
assert_eq!(Duration::days(1), 86_400.seconds());
assert_eq!(Duration::days(2), (2 * 86_400).seconds());
assert_eq!(Duration::days(-1), (-86_400).seconds());
assert_eq!(Duration::days(-2), (2 * -86_400).seconds());
}
#[test]
fn whole_days() {
assert_eq!(Duration::days(1).whole_days(), 1);
assert_eq!(Duration::days(-1).whole_days(), -1);
assert_eq!(Duration::hours(23).whole_days(), 0);
assert_eq!(Duration::hours(-23).whole_days(), 0);
}
#[test]
fn hours() {
assert_eq!(Duration::hours(1), 3_600.seconds());
assert_eq!(Duration::hours(2), (2 * 3_600).seconds());
assert_eq!(Duration::hours(-1), (-3_600).seconds());
assert_eq!(Duration::hours(-2), (2 * -3_600).seconds());
}
#[test]
fn whole_hours() {
assert_eq!(Duration::hours(1).whole_hours(), 1);
assert_eq!(Duration::hours(-1).whole_hours(), -1);
assert_eq!(Duration::minutes(59).whole_hours(), 0);
assert_eq!(Duration::minutes(-59).whole_hours(), 0);
}
#[test]
fn minutes() {
assert_eq!(Duration::minutes(1), 60.seconds());
assert_eq!(Duration::minutes(2), (2 * 60).seconds());
assert_eq!(Duration::minutes(-1), (-60).seconds());
assert_eq!(Duration::minutes(-2), (2 * -60).seconds());
}
#[test]
fn whole_minutes() {
assert_eq!(1.minutes().whole_minutes(), 1);
assert_eq!((-1).minutes().whole_minutes(), -1);
assert_eq!(59.seconds().whole_minutes(), 0);
assert_eq!((-59).seconds().whole_minutes(), 0);
}
#[test]
fn seconds() {
assert_eq!(Duration::seconds(1), 1_000.milliseconds());
assert_eq!(Duration::seconds(2), (2 * 1_000).milliseconds());
assert_eq!(Duration::seconds(-1), (-1_000).milliseconds());
assert_eq!(Duration::seconds(-2), (2 * -1_000).milliseconds());
}
#[test]
fn whole_seconds() {
assert_eq!(1.seconds().whole_seconds(), 1);
assert_eq!((-1).seconds().whole_seconds(), -1);
assert_eq!(1.minutes().whole_seconds(), 60);
assert_eq!((-1).minutes().whole_seconds(), -60);
}
#[test]
fn seconds_f64() {
assert_eq!(Duration::seconds_f64(0.5), 0.5.seconds());
assert_eq!(Duration::seconds_f64(-0.5), (-0.5).seconds());
}
#[test]
#[allow(clippy::float_cmp)]
fn as_seconds_f64() {
assert_eq!(1.seconds().as_seconds_f64(), 1.0);
assert_eq!((-1).seconds().as_seconds_f64(), -1.0);
assert_eq!(1.minutes().as_seconds_f64(), 60.0);
assert_eq!((-1).minutes().as_seconds_f64(), -60.0);
assert_eq!(1.5.seconds().as_seconds_f64(), 1.5);
assert_eq!((-1.5).seconds().as_seconds_f64(), -1.5);
}
#[test]
fn seconds_f32() {
assert_eq!(Duration::seconds_f32(0.5), 0.5.seconds());
assert_eq!(Duration::seconds_f32(-0.5), (-0.5).seconds());
}
#[test]
#[allow(clippy::float_cmp)]
fn as_seconds_f32() {
assert_eq!(1.seconds().as_seconds_f32(), 1.0);
assert_eq!((-1).seconds().as_seconds_f32(), -1.0);
assert_eq!(1.minutes().as_seconds_f32(), 60.0);
assert_eq!((-1).minutes().as_seconds_f32(), -60.0);
assert_eq!(1.5.seconds().as_seconds_f32(), 1.5);
assert_eq!((-1.5).seconds().as_seconds_f32(), -1.5);
}
#[test]
fn milliseconds() {
assert_eq!(Duration::milliseconds(1), 1_000.microseconds());
assert_eq!(Duration::milliseconds(-1), (-1000).microseconds());
}
#[test]
fn whole_milliseconds() {
assert_eq!(1.seconds().whole_milliseconds(), 1_000);
assert_eq!((-1).seconds().whole_milliseconds(), -1_000);
assert_eq!(1.milliseconds().whole_milliseconds(), 1);
assert_eq!((-1).milliseconds().whole_milliseconds(), -1);
}
#[test]
fn subsec_milliseconds() {
assert_eq!(1.4.seconds().subsec_milliseconds(), 400);
assert_eq!((-1.4).seconds().subsec_milliseconds(), -400);
}
#[test]
fn microseconds() {
assert_eq!(Duration::microseconds(1), 1_000.nanoseconds());
assert_eq!(Duration::microseconds(-1), (-1_000).nanoseconds());
}
#[test]
fn whole_microseconds() {
assert_eq!(1.milliseconds().whole_microseconds(), 1_000);
assert_eq!((-1).milliseconds().whole_microseconds(), -1_000);
assert_eq!(1.microseconds().whole_microseconds(), 1);
assert_eq!((-1).microseconds().whole_microseconds(), -1);
}
#[test]
fn subsec_microseconds() {
assert_eq!(1.0004.seconds().subsec_microseconds(), 400);
assert_eq!((-1.0004).seconds().subsec_microseconds(), -400);
}
#[test]
fn nanoseconds() {
assert_eq!(Duration::nanoseconds(1), 1.microseconds() / 1_000);
assert_eq!(Duration::nanoseconds(-1), (-1).microseconds() / 1_000);
}
#[test]
fn whole_nanoseconds() {
assert_eq!(1.microseconds().whole_nanoseconds(), 1_000);
assert_eq!((-1).microseconds().whole_nanoseconds(), -1_000);
assert_eq!(1.nanoseconds().whole_nanoseconds(), 1);
assert_eq!((-1).nanoseconds().whole_nanoseconds(), -1);
}
#[test]
fn subsec_nanoseconds() {
assert_eq!(1.000_000_4.seconds().subsec_nanoseconds(), 400);
assert_eq!((-1.000_000_4).seconds().subsec_nanoseconds(), -400);
}
#[test]
#[allow(deprecated)]
fn checked_add() {
assert_eq!(5.seconds().checked_add(5.seconds()), Some(10.seconds()));
assert_eq!(Duration::max_value().checked_add(1.nanoseconds()), None);
assert_eq!((-5).seconds().checked_add(5.seconds()), Some(0.seconds()));
}
#[test]
#[allow(deprecated)]
fn checked_sub() {
assert_eq!(5.seconds().checked_sub(5.seconds()), Some(0.seconds()));
assert_eq!(Duration::min_value().checked_sub(1.nanoseconds()), None);
assert_eq!(5.seconds().checked_sub(10.seconds()), Some((-5).seconds()));
}
#[test]
#[allow(deprecated)]
fn checked_mul() {
assert_eq!(5.seconds().checked_mul(2), Some(10.seconds()));
assert_eq!(5.seconds().checked_mul(-2), Some((-10).seconds()));
assert_eq!(5.seconds().checked_mul(0), Some(Duration::zero()));
assert_eq!(Duration::max_value().checked_mul(2), None);
assert_eq!(Duration::min_value().checked_mul(2), None);
}
#[test]
fn checked_div() {
assert_eq!(10.seconds().checked_div(2), Some(5.seconds()));
assert_eq!(10.seconds().checked_div(-2), Some((-5).seconds()));
assert_eq!(1.seconds().checked_div(0), None);
}
#[test]
#[cfg(std)]
fn time_fn() {
let (time, value) = Duration::time_fn(|| {
std::thread::sleep(100.std_milliseconds());
0
});
assert!(time >= 100.milliseconds());
assert_eq!(value, 0);
}
#[test]
fn try_from_std_duration() {
assert_eq!(Duration::try_from(0.std_seconds()), Ok(0.seconds()));
assert_eq!(Duration::try_from(1.std_seconds()), Ok(1.seconds()));
}
#[test]
fn try_to_std_duration() {
assert_eq!(StdDuration::try_from(0.seconds()), Ok(0.std_seconds()));
assert_eq!(StdDuration::try_from(1.seconds()), Ok(1.std_seconds()));
assert!(StdDuration::try_from((-1).seconds()).is_err());
}
#[test]
fn add() {
assert_eq!(1.seconds() + 1.seconds(), 2.seconds());
assert_eq!(500.milliseconds() + 500.milliseconds(), 1.seconds());
assert_eq!(1.seconds() + (-1).seconds(), 0.seconds());
}
#[test]
fn add_std() {
assert_eq!(1.seconds() + 1.std_seconds(), 2.seconds());
assert_eq!(500.milliseconds() + 500.std_milliseconds(), 1.seconds());
assert_eq!((-1).seconds() + 1.std_seconds(), 0.seconds());
}
#[test]
fn std_add() {
assert_eq!(1.std_seconds() + 1.seconds(), 2.seconds());
assert_eq!(500.std_milliseconds() + 500.milliseconds(), 1.seconds());
assert_eq!(1.std_seconds() + (-1).seconds(), 0.seconds());
}
#[test]
fn add_assign() {
let mut duration = 1.seconds();
duration += 1.seconds();
assert_eq!(duration, 2.seconds());
let mut duration = 500.milliseconds();
duration += 500.milliseconds();
assert_eq!(duration, 1.seconds());
let mut duration = 1.seconds();
duration += (-1).seconds();
assert_eq!(duration, 0.seconds());
}
#[test]
fn add_assign_std() {
let mut duration = 1.seconds();
duration += 1.std_seconds();
assert_eq!(duration, 2.seconds());
let mut duration = 500.milliseconds();
duration += 500.std_milliseconds();
assert_eq!(duration, 1.seconds());
let mut duration = (-1).seconds();
duration += 1.std_seconds();
assert_eq!(duration, 0.seconds());
}
#[test]
fn neg() {
assert_eq!(-(1.seconds()), (-1).seconds());
assert_eq!(-(-1).seconds(), 1.seconds());
assert_eq!(-(0.seconds()), 0.seconds());
}
#[test]
fn sub() {
assert_eq!(1.seconds() - 1.seconds(), 0.seconds());
assert_eq!(1_500.milliseconds() - 500.milliseconds(), 1.seconds());
assert_eq!(1.seconds() - (-1).seconds(), 2.seconds());
}
#[test]
fn sub_std() {
assert_eq!(1.seconds() - 1.std_seconds(), 0.seconds());
assert_eq!(1_500.milliseconds() - 500.std_milliseconds(), 1.seconds());
assert_eq!((-1).seconds() - 1.std_seconds(), (-2).seconds());
}
#[test]
fn std_sub() {
assert_eq!(1.std_seconds() - 1.seconds(), 0.seconds());
assert_eq!(1_500.std_milliseconds() - 500.milliseconds(), 1.seconds());
assert_eq!(1.std_seconds() - (-1).seconds(), 2.seconds());
}
#[test]
fn sub_assign() {
let mut duration = 1.seconds();
duration -= 1.seconds();
assert_eq!(duration, 0.seconds());
let mut duration = 1_500.milliseconds();
duration -= 500.milliseconds();
assert_eq!(duration, 1.seconds());
let mut duration = 1.seconds();
duration -= (-1).seconds();
assert_eq!(duration, 2.seconds());
}
#[test]
fn sub_assign_std() {
let mut duration = 1.seconds();
duration -= 1.std_seconds();
assert_eq!(duration, 0.seconds());
let mut duration = 1_500.milliseconds();
duration -= 500.std_milliseconds();
assert_eq!(duration, 1.seconds());
let mut duration = (-1).seconds();
duration -= 1.std_seconds();
assert_eq!(duration, (-2).seconds());
}
#[test]
fn std_sub_assign() {
let mut duration = 1.std_seconds();
duration -= 1.seconds();
assert_eq!(duration, 0.seconds());
let mut duration = 1_500.std_milliseconds();
duration -= 500.milliseconds();
assert_eq!(duration, 1.seconds());
#[cfg(std)]
{
let mut duration = 1.std_seconds();
assert_panics!(duration -= 2.seconds());
}
}
#[test]
fn mul_int() {
assert_eq!(1.seconds() * 2, 2.seconds());
assert_eq!(1.seconds() * -2, (-2).seconds());
}
#[test]
fn mul_int_assign() {
let mut duration = 1.seconds();
duration *= 2;
assert_eq!(duration, 2.seconds());
let mut duration = 1.seconds();
duration *= -2;
assert_eq!(duration, (-2).seconds());
}
#[test]
fn int_mul() {
assert_eq!(2 * 1.seconds(), 2.seconds());
assert_eq!(-2 * 1.seconds(), (-2).seconds());
}
#[test]
fn div_int() {
assert_eq!(1.seconds() / 2, 500.milliseconds());
assert_eq!(1.seconds() / -2, (-500).milliseconds());
}
#[test]
fn div_int_assign() {
let mut duration = 1.seconds();
duration /= 2;
assert_eq!(duration, 500.milliseconds());
let mut duration = 1.seconds();
duration /= -2;
assert_eq!(duration, (-500).milliseconds());
}
#[test]
fn mul_float() {
assert_eq!(1.seconds() * 1.5_f32, 1_500.milliseconds());
assert_eq!(1.seconds() * 2.5_f32, 2_500.milliseconds());
assert_eq!(1.seconds() * -1.5_f32, (-1_500).milliseconds());
assert_eq!(1.seconds() * 0_f32, 0.seconds());
assert_eq!(1.seconds() * 1.5_f64, 1_500.milliseconds());
assert_eq!(1.seconds() * 2.5_f64, 2_500.milliseconds());
assert_eq!(1.seconds() * -1.5_f64, (-1_500).milliseconds());
assert_eq!(1.seconds() * 0_f64, 0.seconds());
}
#[test]
fn float_mul() {
assert_eq!(1.5_f32 * 1.seconds(), 1_500.milliseconds());
assert_eq!(2.5_f32 * 1.seconds(), 2_500.milliseconds());
assert_eq!(-1.5_f32 * 1.seconds(), (-1_500).milliseconds());
assert_eq!(0_f32 * 1.seconds(), 0.seconds());
assert_eq!(1.5_f64 * 1.seconds(), 1_500.milliseconds());
assert_eq!(2.5_f64 * 1.seconds(), 2_500.milliseconds());
assert_eq!(-1.5_f64 * 1.seconds(), (-1_500).milliseconds());
assert_eq!(0_f64 * 1.seconds(), 0.seconds());
}
#[test]
fn mul_float_assign() {
let mut duration = 1.seconds();
duration *= 1.5_f32;
assert_eq!(duration, 1_500.milliseconds());
let mut duration = 1.seconds();
duration *= 2.5_f32;
assert_eq!(duration, 2_500.milliseconds());
let mut duration = 1.seconds();
duration *= -1.5_f32;
assert_eq!(duration, (-1_500).milliseconds());
let mut duration = 1.seconds();
duration *= 0_f32;
assert_eq!(duration, 0.seconds());
let mut duration = 1.seconds();
duration *= 1.5_f64;
assert_eq!(duration, 1_500.milliseconds());
let mut duration = 1.seconds();
duration *= 2.5_f64;
assert_eq!(duration, 2_500.milliseconds());
let mut duration = 1.seconds();
duration *= -1.5_f64;
assert_eq!(duration, (-1_500).milliseconds());
let mut duration = 1.seconds();
duration *= 0_f64;
assert_eq!(duration, 0.seconds());
}
#[test]
fn div_float() {
assert_eq!(1.seconds() / 1_f32, 1.seconds());
assert_eq!(1.seconds() / 2_f32, 500.milliseconds());
assert_eq!(1.seconds() / -1_f32, (-1).seconds());
assert_eq!(1.seconds() / 1_f64, 1.seconds());
assert_eq!(1.seconds() / 2_f64, 500.milliseconds());
assert_eq!(1.seconds() / -1_f64, (-1).seconds());
}
#[test]
fn div_float_assign() {
let mut duration = 1.seconds();
duration /= 1_f32;
assert_eq!(duration, 1.seconds());
let mut duration = 1.seconds();
duration /= 2_f32;
assert_eq!(duration, 500.milliseconds());
let mut duration = 1.seconds();
duration /= -1_f32;
assert_eq!(duration, (-1).seconds());
let mut duration = 1.seconds();
duration /= 1_f64;
assert_eq!(duration, 1.seconds());
let mut duration = 1.seconds();
duration /= 2_f64;
assert_eq!(duration, 500.milliseconds());
let mut duration = 1.seconds();
duration /= -1_f64;
assert_eq!(duration, (-1).seconds());
}
#[test]
fn partial_eq() {
assert_eq!(1.seconds(), 1.seconds());
assert_eq!(0.seconds(), 0.seconds());
assert_eq!((-1).seconds(), (-1).seconds());
assert_ne!(1.minutes(), (-1).minutes());
assert_ne!(40.seconds(), 1.minutes());
}
#[test]
fn partial_eq_std() {
assert_eq!(1.seconds(), 1.std_seconds());
assert_eq!(0.seconds(), 0.std_seconds());
assert_ne!((-1).seconds(), 1.std_seconds());
assert_ne!((-1).minutes(), 1.std_minutes());
assert_ne!(40.seconds(), 1.std_minutes());
}
#[test]
fn std_partial_eq() {
assert_eq!(1.std_seconds(), 1.seconds());
assert_eq!(0.std_seconds(), 0.seconds());
assert_ne!(1.std_seconds(), (-1).seconds());
assert_ne!(1.std_minutes(), (-1).minutes());
assert_ne!(40.std_seconds(), 1.minutes());
}
#[test]
fn partial_ord() {
assert_eq!(0.seconds().partial_cmp(&0.seconds()), Some(Equal));
assert_eq!(1.seconds().partial_cmp(&0.seconds()), Some(Greater));
assert_eq!(1.seconds().partial_cmp(&(-1).seconds()), Some(Greater));
assert_eq!((-1).seconds().partial_cmp(&1.seconds()), Some(Less));
assert_eq!(0.seconds().partial_cmp(&(-1).seconds()), Some(Greater));
assert_eq!(0.seconds().partial_cmp(&1.seconds()), Some(Less));
assert_eq!((-1).seconds().partial_cmp(&0.seconds()), Some(Less));
assert_eq!(1.minutes().partial_cmp(&1.seconds()), Some(Greater));
assert_eq!((-1).minutes().partial_cmp(&(-1).seconds()), Some(Less));
}
#[test]
fn partial_ord_std() {
assert_eq!(0.seconds().partial_cmp(&0.std_seconds()), Some(Equal));
assert_eq!(1.seconds().partial_cmp(&0.std_seconds()), Some(Greater));
assert_eq!((-1).seconds().partial_cmp(&1.std_seconds()), Some(Less));
assert_eq!(0.seconds().partial_cmp(&1.std_seconds()), Some(Less));
assert_eq!((-1).seconds().partial_cmp(&0.std_seconds()), Some(Less));
assert_eq!(1.minutes().partial_cmp(&1.std_seconds()), Some(Greater));
}
#[test]
fn std_partial_ord() {
assert_eq!(0.std_seconds().partial_cmp(&0.seconds()), Some(Equal));
assert_eq!(1.std_seconds().partial_cmp(&0.seconds()), Some(Greater));
assert_eq!(1.std_seconds().partial_cmp(&(-1).seconds()), Some(Greater));
assert_eq!(0.std_seconds().partial_cmp(&(-1).seconds()), Some(Greater));
assert_eq!(0.std_seconds().partial_cmp(&1.seconds()), Some(Less));
assert_eq!(1.std_minutes().partial_cmp(&1.seconds()), Some(Greater));
}
#[test]
fn ord() {
assert_eq!(0.seconds(), 0.seconds());
assert!(1.seconds() > 0.seconds());
assert!(1.seconds() > (-1).seconds());
assert!((-1).seconds() < 1.seconds());
assert!(0.seconds() > (-1).seconds());
assert!(0.seconds() < 1.seconds());
assert!((-1).seconds() < 0.seconds());
assert!(1.minutes() > 1.seconds());
assert!((-1).minutes() < (-1).seconds());
}
#[test]
fn arithmetic_regression() {
let added = 1.6.seconds() + 1.6.seconds();
assert_eq!(added.whole_seconds(), 3);
assert_eq!(added.subsec_milliseconds(), 200);
let subtracted = 1.6.seconds() - (-1.6).seconds();
assert_eq!(subtracted.whole_seconds(), 3);
assert_eq!(subtracted.subsec_milliseconds(), 200);
}
}
| 30.723324 | 95 | 0.555559 |
d770796d8f9a352b69736195b8d2f67bee62194d | 531 | use actix_web::{post, HttpResponse, web, Error};
use crate::models::{publish_request::PublishRequest, publish_response::PublishResponse};
#[post("/publish")]
pub async fn publish(req: web::Json<PublishRequest>, res: web::Json<PublishResponse>) -> Result<HttpResponse, Error> {
translate_to_cache(req);
Ok(HttpResponse::Ok().json(res.0)) // <- send response
}
fn translate_to_cache(req: web::Json<PublishRequest>) -> Result<web::Json<PublishResponse>, Error> {
// add logic
// add response prep
Ok(todo!())
} | 33.1875 | 118 | 0.702448 |
38ca31bbbcb5985eb2869624ee2e58ad794a8f0e | 65 | #[macro_use]
extern crate log;
pub mod provider;
pub mod runner;
| 13 | 17 | 0.753846 |
c14e43a4964729ee769cb97a27f5715e255bd82c | 13,029 | //! Core traits for rule definitions and rule context.
//! As well as an internal prelude to make imports for rules easier.
#![allow(unused_variables, unused_imports)]
use crate::autofix::Fixer;
use crate::Diagnostic;
use dyn_clone::DynClone;
use rslint_errors::Severity;
use rslint_parser::{SyntaxNode, SyntaxNodeExt, SyntaxToken};
use rslint_text_edit::apply_indels;
use serde::{Deserialize, Serialize};
use std::borrow::Borrow;
use std::fmt::Debug;
use std::marker::{Send, Sync};
use std::ops::{Deref, DerefMut, Drop};
use std::rc::Rc;
use std::sync::Arc;
/// The main type of rule run by the runner. The rule takes individual
/// nodes inside of a Concrete Syntax Tree and checks them.
/// It may also take individual syntax tokens.
/// Rule must be all be [`Send`] + [`Sync`], because rules are run in parallel.
///
/// # Rule Level Configuration
/// Rules do not know about the lint level they were configured for, the runner
/// runs the rules, then maps any error/warning diagnostics to their appropriate severity.
/// This saves on boilerplate code for getting the appropriate diagnostic builder type and config.
///
/// # Guidelines
/// This is a list of guidelines and tips you should generally follow when implementing a rule:
/// - Do not use text based equality, it is inaccurate, instead use [`lexical_eq`](SyntaxNodeExt::lexical_eq).
/// - Avoid using `text_range` on nodes, it is inaccurate because it may include whitespace, instead use [`trimmed_range`](SyntaxNodeExt::trimmed_range).
/// - Avoid using `text` on nodes for the same reason as the previous, use [`trimmed_text`](SyntaxNodeExt::trimmed_text).
/// - If you can offer better diagnostics and more context around a rule error, __always__ do it! It is a central goal
/// of the project to offer very helpful diagnostics.
/// - Do not be afraid to clone syntax nodes, ast nodes, and syntax tokens. They are all backed by an [`Rc`](std::rc::Rc) around Node data.
/// therefore they can be cheaply cloned (but if you can, have your functions take a reference since Rc cloning is not zero cost).
/// - Do not try to rely on the result of other rules, it is impossible because rules are run at the same time.
/// - Do not rely on file data of different files. There is a separate rule type for this.
/// - Do not unwrap pieces of an AST node (sometimes it is ok because they are guaranteed to be there), since that will cause panics
/// with error recovery.
/// - Do not use node or string coloring outside of diagnostic notes, it messes with termcolor and ends up looking horrible.
#[typetag::serde]
pub trait CstRule: Rule {
/// Check an individual node in the syntax tree.
/// You can use the `match_ast` macro to make matching a node to an ast node easier.
/// The reason this uses nodes and not a visitor is because nodes are more flexible,
/// converting them to an AST node has zero cost and you can easily traverse surrounding nodes.
/// Defaults to doing nothing.
///
/// The return type is `Option<()>` to allow usage of `?` on the properties of AST nodes which are all optional.
#[inline]
fn check_node(&self, node: &SyntaxNode, ctx: &mut RuleCtx) -> Option<()> {
None
}
/// Check an individual token in the syntax tree.
/// Defaults to doing nothing.
#[inline]
fn check_token(&self, token: &SyntaxToken, ctx: &mut RuleCtx) -> Option<()> {
None
}
/// Check the root of the tree one time.
/// This method is guaranteed to only be called once.
/// The root's kind will be either `SCRIPT` or `MODULE`.
/// Defaults to doing nothing.
#[inline]
fn check_root(&self, root: &SyntaxNode, ctx: &mut RuleCtx) -> Option<()> {
None
}
}
/// A generic trait which describes things common to a rule regardless on what they run on.
///
/// Each rule should have a `new` function for easy instantiation. We however do not require this
/// for the purposes of allowing more complex rules to instantiate themselves in a different way.
/// However the rules must be easily instantiated because of rule groups.
pub trait Rule: Debug + DynClone + Send + Sync {
/// A unique, kebab-case name for the rule.
fn name(&self) -> &'static str;
/// The name of the group this rule belongs to.
fn group(&self) -> &'static str;
/// Optional docs for the rule, an empty string by default
fn docs(&self) -> &'static str {
""
}
}
dyn_clone::clone_trait_object!(Rule);
dyn_clone::clone_trait_object!(CstRule);
/// The level configured for a rule.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum RuleLevel {
Warning,
Error,
}
/// Context given to a rule when running it.
// This is passed by reference and not by Arc, which is very important,
// Arcs are very expensive to copy, and for 50 rules running on 50 files we will have a total of
// 2500 copies, which is non ideal at best.
#[derive(Debug, Clone)]
pub struct RuleCtx {
/// The file id of the file being linted.
pub file_id: usize,
/// Whether the linter is run with the `--verbose` option.
/// Which dictates whether the linter should include more (potentially spammy) context in diagnostics.
pub verbose: bool,
/// An empty vector of diagnostics which the rule adds to.
pub diagnostics: Vec<Diagnostic>,
pub fixer: Option<Fixer>,
pub src: Arc<String>,
}
impl RuleCtx {
/// Make a new diagnostic builder.
pub fn err(&mut self, code: impl Into<String>, message: impl Into<String>) -> Diagnostic {
Diagnostic::error(self.file_id, code.into(), message.into())
}
pub fn add_err(&mut self, diagnostic: Diagnostic) {
self.diagnostics.push(diagnostic)
}
/// Make a new fixer for this context and return a mutable reference to it
pub fn fix(&mut self) -> &mut Fixer {
let fixer = Fixer::new(self.src.clone());
self.fixer = Some(fixer);
self.fixer.as_mut().unwrap()
}
}
/// The result of running a single rule on a syntax tree.
#[derive(Debug, Clone)]
pub struct RuleResult {
pub diagnostics: Vec<Diagnostic>,
pub fixer: Option<Fixer>,
}
impl RuleResult {
/// Make a new rule result with diagnostics and an optional fixer.
pub fn new(diagnostics: Vec<Diagnostic>, fixer: impl Into<Option<Fixer>>) -> Self {
Self {
diagnostics,
fixer: fixer.into(),
}
}
/// Get the result of running this rule.
pub fn outcome(&self) -> Outcome {
Outcome::from(&self.diagnostics)
}
/// Merge two results, this will join `self` and `other`'s diagnostics and take
/// `self`'s fixer if available or otherwise take `other`'s fixer
pub fn merge(self, other: RuleResult) -> RuleResult {
RuleResult {
diagnostics: [self.diagnostics, other.diagnostics].concat(),
fixer: self.fixer.or(other.fixer),
}
}
/// Attempt to fix the issue if the rule can be autofixed.
pub fn fix(&self) -> Option<String> {
self.fixer.as_ref().map(|x| x.apply())
}
}
/// The overall result of running a single rule or linting a file.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Outcome {
/// Running the rule resulted in one or more errors.
/// The rule result may have also included warnings or notes.
Failure,
/// Running the rule resulted in one or more warnings.
/// May also include notes.
Warning,
/// Running the rule resulted in no errors or warnings.
/// May include note diagnostics (which are very rare).
Success,
}
impl<T> From<T> for Outcome
where
T: IntoIterator,
T::Item: Borrow<Diagnostic>,
{
fn from(diagnostics: T) -> Self {
let mut outcome = Outcome::Success;
for diagnostic in diagnostics {
match diagnostic.borrow().severity {
Severity::Error => outcome = Outcome::Failure,
Severity::Warning if outcome != Outcome::Failure => outcome = Outcome::Warning,
_ => {}
}
}
outcome
}
}
impl Outcome {
pub fn merge(outcomes: impl IntoIterator<Item = impl Borrow<Outcome>>) -> Outcome {
let mut overall = Outcome::Success;
for outcome in outcomes {
match outcome.borrow() {
Outcome::Failure => overall = Outcome::Failure,
Outcome::Warning if overall != Outcome::Failure => overall = Outcome::Warning,
_ => {}
}
}
overall
}
}
#[macro_export]
#[doc(hidden)]
macro_rules! __pre_parse_docs_from_meta {
(
@$cb:tt
@[docs $($docs:tt)*]
@$other:tt
#[doc = $doc:expr]
$($rest:tt)*
) => (
$crate::__pre_parse_docs_from_meta! {
@$cb
@[docs $($docs)* $doc]
@$other
$($rest)*
}
);
(
@$cb:tt
@$docs:tt
@[others $($others:tt)*]
#[$other:meta]
$($rest:tt)*
) => (
$crate::__pre_parse_docs_from_meta! {
@$cb
@$docs
@[others $($others)* $other]
$($rest)*
}
);
(
@[cb $($cb:tt)*]
@[docs $($docs:tt)*]
@[others $($others:tt)*]
$($rest:tt)*
) => (
$($cb)* ! {
#[doc = concat!($(indoc::indoc!($docs), "\n"),*)]
$(
#[$others]
)*
$($rest)*
}
);
(
$(:: $(@ $colon:tt)?)? $($cb:ident)::+ ! {
$($input:tt)*
}
) => (
$crate::__pre_parse_docs_from_meta! {
@[cb $(:: $($colon)?)? $($cb)::+]
@[docs ]
@[others ]
$($input)*
}
);
}
#[macro_export]
#[doc(hidden)]
macro_rules! __declare_lint_inner {
(
#[doc = $doc:expr]
$(#[$outer:meta])*
// The rule struct name
$name:ident,
$group:ident,
// A unique kebab-case name for the rule
$code:expr
$(,
// Any fields for the rule
$(
$(#[$inner:meta])*
$visibility:vis $key:ident : $val:ty
),* $(,)?
)?
) => {
use $crate::Rule;
use serde::{Deserialize, Serialize};
$(#[$outer])*
#[doc = $doc]
#[serde(rename_all = "camelCase")]
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct $name {
$(
$(
$(#[$inner])*
pub $key: $val
),
*)?
}
impl $name {
pub fn new() -> Self {
Self::default()
}
}
impl Rule for $name {
fn name(&self) -> &'static str {
$code
}
fn group(&self) -> &'static str {
stringify!($group)
}
fn docs(&self) -> &'static str {
$doc
}
}
};
}
/// A macro to easily generate rule boilerplate code.
///
/// ```ignore
/// declare_lint! {
/// /// A description of the rule here
/// /// This will be used as the doc for the rule struct
/// RuleName,
/// // The name of the group this rule belongs to.
/// groupname,
/// // Make sure this is kebab-case and unique.
/// "rule-name",
/// /// A description of the attribute here, used for config docs.
/// pub config_attr: u8,
/// pub another_attr: String
/// }
/// ```
///
/// # Rule name and docs
///
/// The macro's first argument is an identifier for the rule structure.
/// This should always be a PascalCase name. You will have to either derive Default for the struct
/// or implement it manually.
///
/// The macro also accepts any doc comments for the rule name. These comments
/// are then used by an xtask script to generate markdown files for user facing docs.
/// Each rule doc should include an `Incorrect Code Examples` header. It may also optionally
/// include a `Correct Code Examples`. Do not include a `Config` header, it is autogenerated
/// from config field docs.
///
/// # Config
///
/// After the rule code, the macro accepts fields for the struct. Any field which is
/// public will be used for config, you can however disable this by using `#[serde(skip)]`.
/// Every public (config) field should have a doc comment, the doc comments will be used for
/// user facing documentation. Therefore try to be non technical and non rust specific with the doc comments.
/// **All config fields will be renamed to camelCase**
///
///
/// This will generate a rule struct with `RuleName`,
/// and use the optional config attributes defined for the config of the rule.
/// You must make sure each config field is Deserializable.
#[macro_export]
macro_rules! declare_lint {
($($input:tt)*) => {
$crate::__pre_parse_docs_from_meta! {
$crate::__declare_lint_inner! { $($input)* }
}
};
}
| 33.753886 | 153 | 0.604267 |
4bb57431beed90664c7e44e7bcbd4b83175120b0 | 34,048 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_affected_accounts_for_organization_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAffectedAccountsForOrganizationOutput,
crate::error::DescribeAffectedAccountsForOrganizationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeAffectedAccountsForOrganizationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(
crate::error::DescribeAffectedAccountsForOrganizationError::unhandled(generic),
)
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InvalidPaginationToken" => crate::error::DescribeAffectedAccountsForOrganizationError { meta: generic, kind: crate::error::DescribeAffectedAccountsForOrganizationErrorKind::InvalidPaginationToken({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::invalid_pagination_token::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_pagination_tokenjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeAffectedAccountsForOrganizationError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
_ => crate::error::DescribeAffectedAccountsForOrganizationError::generic(generic)
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_affected_accounts_for_organization_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAffectedAccountsForOrganizationOutput,
crate::error::DescribeAffectedAccountsForOrganizationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::describe_affected_accounts_for_organization_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_affected_accounts_for_organization(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAffectedAccountsForOrganizationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_affected_entities_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAffectedEntitiesOutput,
crate::error::DescribeAffectedEntitiesError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeAffectedEntitiesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeAffectedEntitiesError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InvalidPaginationToken" => crate::error::DescribeAffectedEntitiesError {
meta: generic,
kind: crate::error::DescribeAffectedEntitiesErrorKind::InvalidPaginationToken({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_pagination_token::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_pagination_tokenjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAffectedEntitiesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnsupportedLocale" => crate::error::DescribeAffectedEntitiesError {
meta: generic,
kind: crate::error::DescribeAffectedEntitiesErrorKind::UnsupportedLocale({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unsupported_locale::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unsupported_localejson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAffectedEntitiesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeAffectedEntitiesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_affected_entities_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAffectedEntitiesOutput,
crate::error::DescribeAffectedEntitiesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_affected_entities_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_affected_entities(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAffectedEntitiesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_affected_entities_for_organization_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAffectedEntitiesForOrganizationOutput,
crate::error::DescribeAffectedEntitiesForOrganizationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeAffectedEntitiesForOrganizationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(
crate::error::DescribeAffectedEntitiesForOrganizationError::unhandled(generic),
)
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InvalidPaginationToken" => crate::error::DescribeAffectedEntitiesForOrganizationError { meta: generic, kind: crate::error::DescribeAffectedEntitiesForOrganizationErrorKind::InvalidPaginationToken({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::invalid_pagination_token::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_pagination_tokenjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeAffectedEntitiesForOrganizationError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
"UnsupportedLocale" => crate::error::DescribeAffectedEntitiesForOrganizationError { meta: generic, kind: crate::error::DescribeAffectedEntitiesForOrganizationErrorKind::UnsupportedLocale({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::unsupported_locale::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unsupported_localejson_err(response.body().as_ref(), output).map_err(crate::error::DescribeAffectedEntitiesForOrganizationError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
_ => crate::error::DescribeAffectedEntitiesForOrganizationError::generic(generic)
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_affected_entities_for_organization_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAffectedEntitiesForOrganizationOutput,
crate::error::DescribeAffectedEntitiesForOrganizationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::describe_affected_entities_for_organization_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_affected_entities_for_organization(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAffectedEntitiesForOrganizationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_entity_aggregates_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEntityAggregatesOutput,
crate::error::DescribeEntityAggregatesError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeEntityAggregatesError::unhandled)?;
Err(crate::error::DescribeEntityAggregatesError::generic(
generic,
))
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_entity_aggregates_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEntityAggregatesOutput,
crate::error::DescribeEntityAggregatesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_entity_aggregates_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_entity_aggregates(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEntityAggregatesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_aggregates_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventAggregatesOutput,
crate::error::DescribeEventAggregatesError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeEventAggregatesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeEventAggregatesError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InvalidPaginationToken" => crate::error::DescribeEventAggregatesError {
meta: generic,
kind: crate::error::DescribeEventAggregatesErrorKind::InvalidPaginationToken({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_pagination_token::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_pagination_tokenjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventAggregatesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeEventAggregatesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_aggregates_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventAggregatesOutput,
crate::error::DescribeEventAggregatesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_event_aggregates_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_event_aggregates(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventAggregatesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_details_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventDetailsOutput,
crate::error::DescribeEventDetailsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeEventDetailsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeEventDetailsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"UnsupportedLocale" => crate::error::DescribeEventDetailsError {
meta: generic,
kind: crate::error::DescribeEventDetailsErrorKind::UnsupportedLocale({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unsupported_locale::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unsupported_localejson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventDetailsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeEventDetailsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_details_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventDetailsOutput,
crate::error::DescribeEventDetailsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_event_details_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_event_details(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventDetailsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_details_for_organization_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventDetailsForOrganizationOutput,
crate::error::DescribeEventDetailsForOrganizationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeEventDetailsForOrganizationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeEventDetailsForOrganizationError::unhandled(generic))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"UnsupportedLocale" => crate::error::DescribeEventDetailsForOrganizationError {
meta: generic,
kind: crate::error::DescribeEventDetailsForOrganizationErrorKind::UnsupportedLocale({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unsupported_locale::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unsupported_localejson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventDetailsForOrganizationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeEventDetailsForOrganizationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_details_for_organization_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventDetailsForOrganizationOutput,
crate::error::DescribeEventDetailsForOrganizationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::describe_event_details_for_organization_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_event_details_for_organization(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventDetailsForOrganizationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_events_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeEventsOutput, crate::error::DescribeEventsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeEventsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeEventsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InvalidPaginationToken" => crate::error::DescribeEventsError {
meta: generic,
kind: crate::error::DescribeEventsErrorKind::InvalidPaginationToken({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_pagination_token::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_pagination_tokenjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnsupportedLocale" => crate::error::DescribeEventsError {
meta: generic,
kind: crate::error::DescribeEventsErrorKind::UnsupportedLocale({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unsupported_locale::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unsupported_localejson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeEventsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_events_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeEventsOutput, crate::error::DescribeEventsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_events_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_describe_events(response.body().as_ref(), output)
.map_err(crate::error::DescribeEventsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_events_for_organization_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventsForOrganizationOutput,
crate::error::DescribeEventsForOrganizationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeEventsForOrganizationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeEventsForOrganizationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InvalidPaginationToken" => crate::error::DescribeEventsForOrganizationError {
meta: generic,
kind: crate::error::DescribeEventsForOrganizationErrorKind::InvalidPaginationToken({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_pagination_token::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_pagination_tokenjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventsForOrganizationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnsupportedLocale" => crate::error::DescribeEventsForOrganizationError {
meta: generic,
kind: crate::error::DescribeEventsForOrganizationErrorKind::UnsupportedLocale({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unsupported_locale::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unsupported_localejson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventsForOrganizationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeEventsForOrganizationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_events_for_organization_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventsForOrganizationOutput,
crate::error::DescribeEventsForOrganizationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_events_for_organization_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_events_for_organization(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventsForOrganizationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_types_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventTypesOutput,
crate::error::DescribeEventTypesError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeEventTypesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeEventTypesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InvalidPaginationToken" => crate::error::DescribeEventTypesError {
meta: generic,
kind: crate::error::DescribeEventTypesErrorKind::InvalidPaginationToken({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_pagination_token::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_pagination_tokenjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventTypesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnsupportedLocale" => crate::error::DescribeEventTypesError {
meta: generic,
kind: crate::error::DescribeEventTypesErrorKind::UnsupportedLocale({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unsupported_locale::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unsupported_localejson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventTypesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeEventTypesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_event_types_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeEventTypesOutput,
crate::error::DescribeEventTypesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_event_types_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_event_types(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeEventTypesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_health_service_status_for_organization_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeHealthServiceStatusForOrganizationOutput,
crate::error::DescribeHealthServiceStatusForOrganizationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeHealthServiceStatusForOrganizationError::unhandled)?;
Err(crate::error::DescribeHealthServiceStatusForOrganizationError::generic(generic))
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_health_service_status_for_organization_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeHealthServiceStatusForOrganizationOutput,
crate::error::DescribeHealthServiceStatusForOrganizationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::describe_health_service_status_for_organization_output::Builder::default(
);
let _ = response;
output =
crate::json_deser::deser_operation_describe_health_service_status_for_organization(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeHealthServiceStatusForOrganizationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_health_service_access_for_organization_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableHealthServiceAccessForOrganizationOutput,
crate::error::DisableHealthServiceAccessForOrganizationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DisableHealthServiceAccessForOrganizationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(
crate::error::DisableHealthServiceAccessForOrganizationError::unhandled(generic),
)
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"ConcurrentModificationException" => crate::error::DisableHealthServiceAccessForOrganizationError { meta: generic, kind: crate::error::DisableHealthServiceAccessForOrganizationErrorKind::ConcurrentModificationException({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::concurrent_modification_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_concurrent_modification_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisableHealthServiceAccessForOrganizationError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
_ => crate::error::DisableHealthServiceAccessForOrganizationError::generic(generic)
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_health_service_access_for_organization_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableHealthServiceAccessForOrganizationOutput,
crate::error::DisableHealthServiceAccessForOrganizationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::disable_health_service_access_for_organization_output::Builder::default(
);
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_health_service_access_for_organization_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::EnableHealthServiceAccessForOrganizationOutput,
crate::error::EnableHealthServiceAccessForOrganizationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::EnableHealthServiceAccessForOrganizationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(
crate::error::EnableHealthServiceAccessForOrganizationError::unhandled(generic),
)
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"ConcurrentModificationException" => crate::error::EnableHealthServiceAccessForOrganizationError { meta: generic, kind: crate::error::EnableHealthServiceAccessForOrganizationErrorKind::ConcurrentModificationException({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::concurrent_modification_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_concurrent_modification_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::EnableHealthServiceAccessForOrganizationError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
_ => crate::error::EnableHealthServiceAccessForOrganizationError::generic(generic)
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_health_service_access_for_organization_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::EnableHealthServiceAccessForOrganizationOutput,
crate::error::EnableHealthServiceAccessForOrganizationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::enable_health_service_access_for_organization_output::Builder::default();
let _ = response;
output.build()
})
}
| 41.07117 | 228 | 0.604294 |
91056c9f8c158db97ec53c96b749ea9cdd7aea8f | 3,730 | use std::time::Duration;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use xt::{Format, Handle};
criterion_main!(small, large);
criterion_group! {
name = small;
config = Criterion::default();
targets = small_json,
small_yaml,
small_toml,
small_msgpack,
}
criterion_group! {
name = large;
config = Criterion::default().measurement_time(Duration::from_secs(30));
targets = large_json,
large_yaml,
large_toml,
large_msgpack,
}
macro_rules! xt_benchmark {
(
name = $name:ident;
sources = $($source:ident),+;
loader = $loader:path;
translation = $from:path => $to:path;
$(group_config { $($setting_name:ident = $setting_value:expr;)* })?
) => {
fn $name(c: &mut Criterion) {
let mut group = c.benchmark_group(stringify!($name));
let input = $loader($from);
$($(group.$setting_name($setting_value);)*)?
$(
group.bench_function(stringify!($source), |b| {
b.iter(|| {
xt::translate(
xt_benchmark!(@input_handle $source &*input),
black_box(Some($from)),
black_box($to),
std::io::sink(),
)
})
});
)+
group.finish();
}
};
(@input_handle buffer $input:expr) => { Handle::from_slice($input) };
(@input_handle reader $input:expr) => { Handle::from_reader($input) };
}
xt_benchmark! {
name = small_json;
sources = buffer, reader;
loader = load_small_data;
translation = Format::Json => Format::Msgpack;
}
xt_benchmark! {
name = small_yaml;
sources = buffer;
loader = load_small_data;
translation = Format::Yaml => Format::Json;
}
xt_benchmark! {
name = small_toml;
sources = buffer;
loader = load_small_data;
translation = Format::Toml => Format::Json;
}
xt_benchmark! {
name = small_msgpack;
sources = buffer, reader;
loader = load_small_data;
translation = Format::Msgpack => Format::Json;
}
xt_benchmark! {
name = large_json;
sources = buffer, reader;
loader = load_large_data;
translation = Format::Json => Format::Msgpack;
}
xt_benchmark! {
name = large_yaml;
sources = buffer;
loader = load_large_data;
translation = Format::Yaml => Format::Json;
group_config {
sample_size = 50;
}
}
xt_benchmark! {
name = large_toml;
sources = buffer;
loader = load_large_data;
translation = Format::Toml => Format::Json;
group_config {
measurement_time = Duration::from_secs(60);
sample_size = 20;
}
}
xt_benchmark! {
name = large_msgpack;
sources = buffer, reader;
loader = load_large_data;
translation = Format::Msgpack => Format::Json;
}
fn load_small_data(format: Format) -> Vec<u8> {
// The K8s data expands to just a few hundred bytes regardless of format.
load_test_data(include_bytes!("k8s-job.msgpack.zst"), format, 512)
}
fn load_large_data(format: Format) -> Vec<u8> {
// The GitHub data expands to somewhere between 23 - 30 MB depending on the
// output format. 32 MiB is a nice, round number that should be big enough.
load_test_data(
include_bytes!("github-events.msgpack.zst"),
format,
32 * 1024 * 1024,
)
}
fn load_test_data(input: &[u8], format: Format, capacity: usize) -> Vec<u8> {
let mut output = Vec::with_capacity(capacity);
xt::translate(
Handle::from_reader(zstd::Decoder::new(input).expect("failed to create zstd decoder")),
Some(Format::Msgpack),
format,
&mut output,
)
.expect("failed to translate test data");
output
}
| 24.539474 | 91 | 0.608847 |
913dbec65f10f1193c75dc819c3ce693319cd51a | 564 | //! Substrate Node Template CLI library.
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
mod chain_spec;
mod service;
mod cli;
pub use substrate_cli::{VersionInfo, IntoExit, error};
fn run() -> cli::error::Result<()> {
let version = VersionInfo {
name: "Substrate Node",
commit: env!("VERGEN_SHA_SHORT"),
version: env!("CARGO_PKG_VERSION"),
executable_name: "akashi",
author: "SoraSuegami",
description: "akashi",
support_url: "support.anonymous.an",
};
cli::run(::std::env::args(), cli::Exit, version)
}
error_chain::quick_main!(run);
| 21.692308 | 54 | 0.691489 |
2815f43ab6a2777f6e2361628258a037286e8cda | 5,939 | use crate::Error;
use ark_std::rand::{CryptoRng, Rng, SeedableRng};
use ark_std::{
cfg_chunks,
fmt::{Debug, Formatter, Result as FmtResult},
marker::PhantomData,
vec,
vec::Vec,
};
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use super::pedersen;
use crate::variable_length_crh::VariableLengthCRH;
use ark_ec::{
twisted_edwards_extended::GroupProjective as TEProjective, ModelParameters, ProjectiveCurve,
TEModelParameters,
};
use ark_ff::{Field, PrimeField, UniformRand};
pub mod constraints;
type ConstraintF<P> = <<P as ModelParameters>::BaseField as Field>::BasePrimeField;
pub const WINDOW_SIZE: usize = 64;
pub const CHUNK_SIZE: usize = 3;
pub struct VariableLengthBoweHopwoodParameters<P: TEModelParameters> {
pub seed: Vec<u8>,
#[doc(hidden)]
pub _params: PhantomData<P>,
}
impl<P: TEModelParameters> Clone for VariableLengthBoweHopwoodParameters<P> {
fn clone(&self) -> Self {
Self {
seed: self.seed.clone(),
_params: PhantomData,
}
}
}
impl<P: TEModelParameters> Default for VariableLengthBoweHopwoodParameters<P> {
fn default() -> Self {
Self {
seed: vec![0u8; 32],
_params: PhantomData,
}
}
}
impl<P: TEModelParameters> VariableLengthBoweHopwoodParameters<P> {
pub fn get_generators<RO: Rng + CryptoRng + SeedableRng>(
&self,
pos: usize,
) -> Vec<Vec<TEProjective<P>>> {
let mut seed = RO::Seed::default();
let seed_as_mut = seed.as_mut();
seed_as_mut[..self.seed.len()].clone_from_slice(&self.seed[..]);
let mut rng = RO::from_seed(seed);
let num_windows = (pos + WINDOW_SIZE - 1) / WINDOW_SIZE;
let mut generators = Vec::new();
for _ in 0..num_windows {
let mut generators_for_segment = Vec::new();
let mut base = TEProjective::rand(&mut rng);
for _ in 0..WINDOW_SIZE {
generators_for_segment.push(base);
for _ in 0..4 {
base.double_in_place();
}
}
generators.push(generators_for_segment);
}
generators
}
}
pub struct VariableLengthBoweHopwoodCompressedCRH<
RO: Rng + CryptoRng + SeedableRng,
P: TEModelParameters,
> where
P::BaseField: PrimeField<BasePrimeField = P::BaseField>,
{
_rand: PhantomData<RO>,
_group: PhantomData<P>,
}
impl<RO: Rng + CryptoRng + SeedableRng, P: TEModelParameters> VariableLengthCRH<ConstraintF<P>>
for VariableLengthBoweHopwoodCompressedCRH<RO, P>
where
P::BaseField: PrimeField<BasePrimeField = P::BaseField>,
{
type Output = ConstraintF<P>;
type Parameters = VariableLengthBoweHopwoodParameters<P>;
fn setup<R: Rng>(rng: &mut R) -> Result<Self::Parameters, Error> {
let mut seed = RO::Seed::default();
let seed_as_mut = seed.as_mut();
rng.fill_bytes(seed_as_mut);
Ok(Self::Parameters {
seed: seed_as_mut.to_vec(),
_params: PhantomData,
})
}
fn evaluate(parameters: &Self::Parameters, input: &[u8]) -> Result<Self::Output, Error> {
let mut padded_input = Vec::with_capacity(input.len());
let input = pedersen::bytes_to_bits(input);
padded_input.extend_from_slice(&input);
if input.len() % CHUNK_SIZE != 0 {
let current_length = input.len();
padded_input.extend_from_slice(&vec![false; CHUNK_SIZE - current_length % CHUNK_SIZE]);
}
assert_eq!(padded_input.len() % CHUNK_SIZE, 0);
assert_eq!(CHUNK_SIZE, 3);
// Compute sum of h_i^{sum of
// (1-2*c_{i,j,2})*(1+c_{i,j,0}+2*c_{i,j,1})*2^{4*(j-1)} for all j in segment}
// for all i. Described in section 5.4.1.7 in the Zcash protocol
// specification.
let generator = parameters.get_generators::<RO>(padded_input.len() / CHUNK_SIZE);
let result = cfg_chunks!(padded_input, WINDOW_SIZE * CHUNK_SIZE)
.zip(generator)
.map(|(segment_bits, segment_generators)| {
cfg_chunks!(segment_bits, CHUNK_SIZE)
.zip(segment_generators)
.map(|(chunk_bits, generator)| {
let mut encoded = generator;
if chunk_bits[0] {
encoded += generator;
}
if chunk_bits[1] {
encoded = encoded + &generator + &generator;
}
if chunk_bits[2] {
encoded = -encoded;
}
encoded
})
.sum::<TEProjective<P>>()
})
.sum::<TEProjective<P>>();
Ok(result.into_affine().x)
}
fn convert_output_to_field_elements(
output: Self::Output,
) -> Result<Vec<ConstraintF<P>>, Error> {
Ok(vec![output])
}
}
impl<P: TEModelParameters> Debug for VariableLengthBoweHopwoodParameters<P>
where
P::BaseField: PrimeField<BasePrimeField = P::BaseField>,
{
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
writeln!(f, "Bowe-Hopwood-Pedersen Hash Parameters {{")?;
writeln!(f, "\t Generator {:?}", self.seed)?;
writeln!(f, "}}")
}
}
#[cfg(test)]
mod test {
use crate::variable_length_crh::bowe_hopwood::VariableLengthBoweHopwoodCompressedCRH;
use crate::variable_length_crh::VariableLengthCRH;
use ark_ed_on_bls12_381::EdwardsParameters;
use ark_std::test_rng;
use rand_chacha::ChaChaRng;
type H = VariableLengthBoweHopwoodCompressedCRH<ChaChaRng, EdwardsParameters>;
#[test]
fn test_simple_bh() {
let rng = &mut test_rng();
let params = H::setup(rng).unwrap();
let _ = H::evaluate(¶ms, &[1, 2, 3]).unwrap();
}
}
| 31.42328 | 99 | 0.589662 |
29d1f045c9a09318ebad6239bf154384fd80f7e1 | 74 | /// An account is a user of the service.
pub mod database;
pub mod model;
| 18.5 | 40 | 0.716216 |
23645ab189aec1c06de223a6a6cad88f1eeceaac | 1,191 | #![crate_name = "lightning"]
//! Rust-Lightning, not Rusty's Lightning!
//!
//! A full-featured but also flexible lightning implementation, in library form. This allows the
//! user (you) to decide how they wish to use it instead of being a fully self-contained daemon.
//! This means there is no built-in threading/execution environment and it's up to the user to
//! figure out how best to make networking happen/timers fire/things get written to disk/keys get
//! generated/etc. This makes it a good candidate for tight integration into an existing wallet
//! instead of having a rather-separate lightning appendage to a wallet.
#![cfg_attr(not(feature = "fuzztarget"), deny(missing_docs))]
#![forbid(unsafe_code)]
// In general, rust is absolutely horrid at supporting users doing things like,
// for example, compiling Rust code for real environments. Disable useless lints
// that don't do anything but annoy us and cant actually ever be resolved.
#![allow(bare_trait_objects)]
#![allow(ellipsis_inclusive_range_patterns)]
extern crate bitcoin;
#[cfg(test)] extern crate rand;
#[cfg(test)] extern crate hex;
#[macro_use]
pub mod util;
pub mod chain;
pub mod ln;
pub mod routing;
| 38.419355 | 97 | 0.756507 |
f8564ee3223b512958142b2b4ad758a5c822139f | 252 | [
// https://tools.ietf.org/html/rfc3394#section-4.1
TestVector {
encryption_key: "000102030405060708090A0B0C0D0E0F",
plaintext: "00112233445566778899AABBCCDDEEFF",
ciphertext: "1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5",
},
]
| 28 | 67 | 0.761905 |
fe06b2390bb6fbdcb51bcdfff007b7418e73c7b4 | 6,646 | mod thing;
use thing::*;
use crate::{controller::ControllerConfig, data::DittoDeviceStatus, ditto::Client as DittoClient};
use async_trait::async_trait;
use drogue_client::{
core::v1::Conditions,
meta::v1::{CommonMetadataExt, CommonMetadataMut},
openid::{AccessTokenProvider, OpenIdTokenProvider, TokenProvider},
registry, Translator,
};
use drogue_cloud_operator_common::controller::{
base::{
ConditionExt, ControllerOperation, ProcessOutcome, ReadyState, StatusSection,
CONDITION_RECONCILED,
},
reconciler::{
operation::HasFinalizer,
progress::{Progressor, ResourceAccessor, RunConstructor},
ByDevice, ReconcileError, ReconcileProcessor, ReconcileState, Reconciler,
},
};
use std::ops::Deref;
const FINALIZER: &str = "ditto";
pub struct DeviceController<TP>
where
TP: TokenProvider,
{
config: ControllerConfig,
registry: registry::v1::Client<TP>,
ditto: DittoClient,
devops_provider: Option<AccessTokenProvider>,
admin_provider: OpenIdTokenProvider,
}
impl<TP> DeviceController<TP>
where
TP: TokenProvider,
{
pub async fn new(
mut config: ControllerConfig,
registry: registry::v1::Client<TP>,
client: reqwest::Client,
) -> Result<Self, anyhow::Error> {
let ditto = config.ditto_devops.clone();
config.kafka = config.kafka.translate();
let devops_provider = ditto
.username
.zip(ditto.password)
.map(|(user, token)| AccessTokenProvider { user, token });
let admin_provider = config
.ditto_admin
.clone()
.discover_from(client.clone())
.await?;
Ok(Self {
config,
registry,
ditto: DittoClient::new(client, ditto.url),
devops_provider,
admin_provider,
})
}
}
#[async_trait]
impl<TP>
ControllerOperation<
(String, String),
(registry::v1::Application, registry::v1::Device),
registry::v1::Device,
> for DeviceController<TP>
where
TP: TokenProvider,
{
async fn process_resource(
&self,
input: (registry::v1::Application, registry::v1::Device),
) -> Result<ProcessOutcome<registry::v1::Device>, ReconcileError> {
ReconcileProcessor(DeviceReconciler {
config: &self.config,
registry: &self.registry,
ditto: &self.ditto,
devops_provider: &self.devops_provider,
admin_provider: &self.admin_provider,
})
.reconcile(input)
.await
}
async fn recover(
&self,
message: &str,
(app, mut device): (registry::v1::Application, registry::v1::Device),
) -> Result<registry::v1::Device, ()> {
let mut conditions = device
.section::<DittoDeviceStatus>()
.and_then(|s| s.ok().map(|s| s.conditions))
.unwrap_or_default();
conditions.update(CONDITION_RECONCILED, ReadyState::Failed(message.into()));
device
.finish_ready::<DittoDeviceStatus>(conditions, app.metadata.generation)
.map_err(|_| ())?;
Ok(device)
}
}
impl<TP> Deref for DeviceController<TP>
where
TP: TokenProvider,
{
type Target = registry::v1::Client<TP>;
fn deref(&self) -> &Self::Target {
&self.registry
}
}
pub struct ConstructContext {
pub app: registry::v1::Application,
pub device: registry::v1::Device,
}
pub struct DeconstructContext {
pub app: registry::v1::Application,
pub device: registry::v1::Device,
pub status: Option<DittoDeviceStatus>,
}
pub struct DeviceReconciler<'a, TP>
where
TP: TokenProvider,
{
pub config: &'a ControllerConfig,
pub registry: &'a registry::v1::Client<TP>,
pub ditto: &'a DittoClient,
pub devops_provider: &'a Option<AccessTokenProvider>,
pub admin_provider: &'a OpenIdTokenProvider,
}
#[async_trait]
impl<'a, TP> Reconciler for DeviceReconciler<'a, TP>
where
TP: TokenProvider,
{
type Input = (registry::v1::Application, registry::v1::Device);
type Output = registry::v1::Device;
type Construct = ConstructContext;
type Deconstruct = DeconstructContext;
async fn eval_state(
&self,
(app, device): Self::Input,
) -> Result<ReconcileState<Self::Output, Self::Construct, Self::Deconstruct>, ReconcileError>
{
Self::eval_by_finalizer(
device.metadata.has_label_flag("ditto"),
ByDevice(app, device),
FINALIZER,
|ByDevice(app, device)| ConstructContext { app, device },
|ByDevice(app, device)| {
let status = device.section::<DittoDeviceStatus>().and_then(|s| s.ok());
DeconstructContext {
app,
device,
status,
}
},
|ByDevice(_, device)| device,
)
}
async fn construct(
&self,
ctx: Self::Construct,
) -> Result<ProcessOutcome<Self::Output>, ReconcileError> {
Progressor::<Self::Construct>::new(vec![
Box::new(HasFinalizer(FINALIZER)),
Box::new(CreateThing {
config: self.config,
ditto: self.ditto,
provider: self.admin_provider,
}),
])
.run_with::<DittoDeviceStatus>(ctx)
.await
}
async fn deconstruct(
&self,
mut ctx: Self::Deconstruct,
) -> Result<ProcessOutcome<Self::Output>, ReconcileError> {
DeleteThing {
config: self.config,
ditto: self.ditto,
provider: self.admin_provider,
}
.run(&ctx)
.await?;
// cleanup
ctx.device.clear_section::<DittoDeviceStatus>();
ctx.device
.update_section(|c: Conditions| c.clear_ready(DittoDeviceStatus::ready_name()))?;
// remove finalizer
ctx.device.metadata.remove_finalizer(FINALIZER);
// done
Ok(ProcessOutcome::Complete(ctx.device))
}
}
impl ResourceAccessor for ConstructContext {
type Resource = registry::v1::Device;
fn resource(&self) -> &Self::Resource {
&self.device
}
fn resource_mut(&mut self) -> &mut Self::Resource {
&mut self.device
}
fn into(self) -> Self::Resource {
self.device
}
fn conditions(&self) -> Conditions {
self.device
.section::<DittoDeviceStatus>()
.and_then(|s| s.ok())
.unwrap_or_default()
.conditions
}
}
| 26.690763 | 97 | 0.593139 |
1ef572faeac0cc53a273ac682e588ccc13c22669 | 3,724 | //! Tests auto-converted from "sass-spec/spec/core_functions/color/adjust_color/error/mixed_formats.hrx"
#[test]
#[ignore] // missing error
fn blue_and_lightness() {
assert_eq!(
crate::rsass(
"a {b: adjust-color(red, $blue: 1, $lightness: 1%)}\
\n"
)
.unwrap_err(),
"Error: RGB parameters may not be passed along with HSL parameters.\
\n ,\
\n1 | a {b: adjust-color(red, $blue: 1, $lightness: 1%)}\
\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 1:7 root stylesheet",
);
}
#[test]
#[ignore] // missing error
fn green_and_saturation() {
assert_eq!(
crate::rsass(
"a {b: adjust-color(red, $green: 1, $saturation: 1%)}\
\n"
)
.unwrap_err(),
"Error: RGB parameters may not be passed along with HSL parameters.\
\n ,\
\n1 | a {b: adjust-color(red, $green: 1, $saturation: 1%)}\
\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 1:7 root stylesheet",
);
}
#[test]
#[ignore] // missing error
fn green_and_whiteness() {
assert_eq!(
crate::rsass(
"a {b: adjust-color(red, $green: 1, $whiteness: 1%)}\
\n"
)
.unwrap_err(),
"Error: RGB parameters may not be passed along with HWB parameters.\
\n ,\
\n1 | a {b: adjust-color(red, $green: 1, $whiteness: 1%)}\
\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 1:7 root stylesheet",
);
}
#[test]
#[ignore] // missing error
fn lightness_and_whiteness() {
assert_eq!(
crate::rsass(
"a {b: adjust-color(red, $lightness: 1%, $whiteness: 1%)}\
\n"
)
.unwrap_err(),
"Error: HSL parameters may not be passed along with HWB parameters.\
\n ,\
\n1 | a {b: adjust-color(red, $lightness: 1%, $whiteness: 1%)}\
\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 1:7 root stylesheet",
);
}
#[test]
#[ignore] // missing error
fn red_and_blackness() {
assert_eq!(
crate::rsass(
"a {b: adjust-color(red, $red: 1, $blackness: 1%)}\
\n"
)
.unwrap_err(),
"Error: RGB parameters may not be passed along with HWB parameters.\
\n ,\
\n1 | a {b: adjust-color(red, $red: 1, $blackness: 1%)}\
\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 1:7 root stylesheet",
);
}
#[test]
#[ignore] // missing error
fn red_and_hue() {
assert_eq!(
crate::rsass(
"a {b: adjust-color(red, $red: 1, $hue: 1)}\
\n"
)
.unwrap_err(),
"Error: RGB parameters may not be passed along with HSL parameters.\
\n ,\
\n1 | a {b: adjust-color(red, $red: 1, $hue: 1)}\
\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 1:7 root stylesheet",
);
}
#[test]
#[ignore] // missing error
fn saturation_and_blackness() {
assert_eq!(
crate::rsass(
"a {b: adjust-color(red, $saturation: 1%, $blackness: 1%)}\
\n"
)
.unwrap_err(),
"Error: HSL parameters may not be passed along with HWB parameters.\
\n ,\
\n1 | a {b: adjust-color(red, $saturation: 1%, $blackness: 1%)}\
\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 1:7 root stylesheet",
);
}
| 30.52459 | 104 | 0.439581 |
ff05468d3d8491c3b5dc5bf10155d4499facc462 | 7,541 | //! Code to construct paths to a directory for non-anonymous downloads
use super::TorPath;
use crate::{DirInfo, Error, Result};
use tor_error::bad_api_usage;
use tor_guardmgr::{GuardMgr, GuardMonitor, GuardUsable};
use tor_netdir::{Relay, WeightRole};
use tor_rtcompat::Runtime;
use rand::Rng;
/// A PathBuilder that can connect to a directory.
#[non_exhaustive]
pub struct DirPathBuilder {}
impl Default for DirPathBuilder {
fn default() -> Self {
Self::new()
}
}
impl DirPathBuilder {
/// Create a new DirPathBuilder.
pub fn new() -> Self {
DirPathBuilder {}
}
/// Try to create and return a path corresponding to the requirements of
/// this builder.
pub fn pick_path<'a, R: Rng, RT: Runtime>(
&self,
rng: &mut R,
netdir: DirInfo<'a>,
guards: Option<&GuardMgr<RT>>,
) -> Result<(TorPath<'a>, Option<GuardMonitor>, Option<GuardUsable>)> {
match (netdir, guards) {
(dirinfo, Some(guardmgr)) => {
// We use a guardmgr whenever we have one, regardless of whether
// there's a netdir.
//
// That way, we prefer our guards (if they're up) before we default to the fallback directories.
let netdir = match dirinfo {
DirInfo::Directory(netdir) => {
guardmgr.update_network(netdir); // possibly unnecessary.
Some(netdir)
}
_ => None,
};
let guard_usage = tor_guardmgr::GuardUsageBuilder::default()
.kind(tor_guardmgr::GuardUsageKind::OneHopDirectory)
.build()
.expect("Unable to build directory guard usage");
let (guard, mon, usable) = guardmgr.select_guard(guard_usage, netdir)?;
return Ok((TorPath::new_one_hop_owned(&guard), Some(mon), Some(usable)));
}
// In the following cases, we don't have a guardmgr, so we'll use the provided information if we can.
(DirInfo::Fallbacks(f), None) => {
let relay = f.choose(rng)?;
return Ok((TorPath::new_fallback_one_hop(relay), None, None));
}
(DirInfo::Directory(netdir), None) => {
let relay = netdir.pick_relay(rng, WeightRole::BeginDir, Relay::is_dir_cache);
if let Some(r) = relay {
return Ok((TorPath::new_one_hop(r), None, None));
}
}
(DirInfo::Nothing, None) => {
return Err(bad_api_usage!(
"Tried to build a one hop path with no directory, fallbacks, or guard manager"
)
.into());
}
}
Err(Error::NoPath(
"No relays found for use as directory cache".into(),
))
}
}
#[cfg(test)]
mod test {
#![allow(clippy::unwrap_used)]
#![allow(clippy::clone_on_copy)]
use super::*;
use crate::path::assert_same_path_when_owned;
use crate::test::OptDummyGuardMgr;
use std::collections::HashSet;
use tor_guardmgr::fallback::{FallbackDir, FallbackList};
use tor_linkspec::ChanTarget;
use tor_netdir::testnet;
#[test]
fn dirpath_relay() {
let netdir = testnet::construct_netdir()
.unwrap()
.unwrap_if_sufficient()
.unwrap();
let mut rng = rand::thread_rng();
let dirinfo = (&netdir).into();
let guards: OptDummyGuardMgr<'_> = None;
for _ in 0..1000 {
let p = DirPathBuilder::default().pick_path(&mut rng, dirinfo, guards);
let (p, _, _) = p.unwrap();
assert!(p.exit_relay().is_none());
assert_eq!(p.len(), 1);
assert_same_path_when_owned(&p);
if let crate::path::TorPathInner::OneHop(r) = p.inner {
assert!(r.is_dir_cache());
} else {
panic!("Generated the wrong kind of path.");
}
}
}
#[test]
fn dirpath_fallback() {
let fb_owned = vec![
{
let mut bld = FallbackDir::builder();
bld.rsa_identity([0x01; 20].into())
.ed_identity([0x01; 32].into())
.orports()
.push("127.0.0.1:9000".parse().unwrap());
bld.build().unwrap()
},
{
let mut bld = FallbackDir::builder();
bld.rsa_identity([0x03; 20].into())
.ed_identity([0x03; 32].into())
.orports()
.push("127.0.0.1:9003".parse().unwrap());
bld.build().unwrap()
},
];
let fb: FallbackList = fb_owned.clone().into();
let dirinfo = (&fb).into();
let mut rng = rand::thread_rng();
let guards: OptDummyGuardMgr<'_> = None;
for _ in 0..10 {
let p = DirPathBuilder::default().pick_path(&mut rng, dirinfo, guards);
let (p, _, _) = p.unwrap();
assert!(p.exit_relay().is_none());
assert_eq!(p.len(), 1);
assert_same_path_when_owned(&p);
if let crate::path::TorPathInner::FallbackOneHop(f) = p.inner {
assert!(f == &fb_owned[0] || f == &fb_owned[1]);
} else {
panic!("Generated the wrong kind of path.");
}
}
}
#[test]
fn dirpath_no_fallbacks() {
let fb = FallbackList::from([]);
let dirinfo = DirInfo::Fallbacks(&fb);
let mut rng = rand::thread_rng();
let guards: OptDummyGuardMgr<'_> = None;
let err = DirPathBuilder::default().pick_path(&mut rng, dirinfo, guards);
dbg!(err.as_ref().err());
assert!(matches!(
err,
Err(Error::Guard(
tor_guardmgr::PickGuardError::AllFallbacksDown { .. }
))
));
}
#[test]
fn dirpath_with_guards() {
tor_rtcompat::test_with_all_runtimes!(|rt| async move {
let netdir = testnet::construct_netdir()
.unwrap()
.unwrap_if_sufficient()
.unwrap();
let mut rng = rand::thread_rng();
let dirinfo = (&netdir).into();
let statemgr = tor_persist::TestingStateMgr::new();
let guards = tor_guardmgr::GuardMgr::new(rt.clone(), statemgr, [].into()).unwrap();
guards.update_network(&netdir);
let mut distinct_guards = HashSet::new();
// This is a nice easy case, since we tested the harder cases
// in guard-spec. We'll just have every path succeed.
for _ in 0..40 {
let (path, mon, usable) = DirPathBuilder::new()
.pick_path(&mut rng, dirinfo, Some(&guards))
.unwrap();
if let crate::path::TorPathInner::OwnedOneHop(relay) = path.inner {
distinct_guards.insert(relay.ed_identity().clone());
mon.unwrap().succeeded();
assert!(usable.unwrap().await.unwrap());
} else {
panic!("Generated the wrong kind of path.");
}
}
assert_eq!(
distinct_guards.len(),
netdir.params().guard_dir_use_parallelism.get() as usize
);
});
}
}
| 35.909524 | 113 | 0.514255 |
9b1ea3b4c4cf503d4b3d691eb2a6a4773aee09e8 | 48,390 | //! Finds crate binaries and loads their metadata
//!
//! Might I be the first to welcome you to a world of platform differences,
//! version requirements, dependency graphs, conflicting desires, and fun! This
//! is the major guts (along with metadata::creader) of the compiler for loading
//! crates and resolving dependencies. Let's take a tour!
//!
//! # The problem
//!
//! Each invocation of the compiler is immediately concerned with one primary
//! problem, to connect a set of crates to resolved crates on the filesystem.
//! Concretely speaking, the compiler follows roughly these steps to get here:
//!
//! 1. Discover a set of `extern crate` statements.
//! 2. Transform these directives into crate names. If the directive does not
//! have an explicit name, then the identifier is the name.
//! 3. For each of these crate names, find a corresponding crate on the
//! filesystem.
//!
//! Sounds easy, right? Let's walk into some of the nuances.
//!
//! ## Transitive Dependencies
//!
//! Let's say we've got three crates: A, B, and C. A depends on B, and B depends
//! on C. When we're compiling A, we primarily need to find and locate B, but we
//! also end up needing to find and locate C as well.
//!
//! The reason for this is that any of B's types could be composed of C's types,
//! any function in B could return a type from C, etc. To be able to guarantee
//! that we can always type-check/translate any function, we have to have
//! complete knowledge of the whole ecosystem, not just our immediate
//! dependencies.
//!
//! So now as part of the "find a corresponding crate on the filesystem" step
//! above, this involves also finding all crates for *all upstream
//! dependencies*. This includes all dependencies transitively.
//!
//! ## Rlibs and Dylibs
//!
//! The compiler has two forms of intermediate dependencies. These are dubbed
//! rlibs and dylibs for the static and dynamic variants, respectively. An rlib
//! is a rustc-defined file format (currently just an ar archive) while a dylib
//! is a platform-defined dynamic library. Each library has a metadata somewhere
//! inside of it.
//!
//! A third kind of dependency is an rmeta file. These are metadata files and do
//! not contain any code, etc. To a first approximation, these are treated in the
//! same way as rlibs. Where there is both an rlib and an rmeta file, the rlib
//! gets priority (even if the rmeta file is newer). An rmeta file is only
//! useful for checking a downstream crate, attempting to link one will cause an
//! error.
//!
//! When translating a crate name to a crate on the filesystem, we all of a
//! sudden need to take into account both rlibs and dylibs! Linkage later on may
//! use either one of these files, as each has their pros/cons. The job of crate
//! loading is to discover what's possible by finding all candidates.
//!
//! Most parts of this loading systems keep the dylib/rlib as just separate
//! variables.
//!
//! ## Where to look?
//!
//! We can't exactly scan your whole hard drive when looking for dependencies,
//! so we need to places to look. Currently the compiler will implicitly add the
//! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation,
//! and otherwise all -L flags are added to the search paths.
//!
//! ## What criterion to select on?
//!
//! This a pretty tricky area of loading crates. Given a file, how do we know
//! whether it's the right crate? Currently, the rules look along these lines:
//!
//! 1. Does the filename match an rlib/dylib pattern? That is to say, does the
//! filename have the right prefix/suffix?
//! 2. Does the filename have the right prefix for the crate name being queried?
//! This is filtering for files like `libfoo*.rlib` and such. If the crate
//! we're looking for was originally compiled with -C extra-filename, the
//! extra filename will be included in this prefix to reduce reading
//! metadata from crates that would otherwise share our prefix.
//! 3. Is the file an actual rust library? This is done by loading the metadata
//! from the library and making sure it's actually there.
//! 4. Does the name in the metadata agree with the name of the library?
//! 5. Does the target in the metadata agree with the current target?
//! 6. Does the SVH match? (more on this later)
//!
//! If the file answers `yes` to all these questions, then the file is
//! considered as being *candidate* for being accepted. It is illegal to have
//! more than two candidates as the compiler has no method by which to resolve
//! this conflict. Additionally, rlib/dylib candidates are considered
//! separately.
//!
//! After all this has happened, we have 1 or two files as candidates. These
//! represent the rlib/dylib file found for a library, and they're returned as
//! being found.
//!
//! ### What about versions?
//!
//! A lot of effort has been put forth to remove versioning from the compiler.
//! There have been forays in the past to have versioning baked in, but it was
//! largely always deemed insufficient to the point that it was recognized that
//! it's probably something the compiler shouldn't do anyway due to its
//! complicated nature and the state of the half-baked solutions.
//!
//! With a departure from versioning, the primary criterion for loading crates
//! is just the name of a crate. If we stopped here, it would imply that you
//! could never link two crates of the same name from different sources
//! together, which is clearly a bad state to be in.
//!
//! To resolve this problem, we come to the next section!
//!
//! # Expert Mode
//!
//! A number of flags have been added to the compiler to solve the "version
//! problem" in the previous section, as well as generally enabling more
//! powerful usage of the crate loading system of the compiler. The goal of
//! these flags and options are to enable third-party tools to drive the
//! compiler with prior knowledge about how the world should look.
//!
//! ## The `--extern` flag
//!
//! The compiler accepts a flag of this form a number of times:
//!
//! ```text
//! --extern crate-name=path/to/the/crate.rlib
//! ```
//!
//! This flag is basically the following letter to the compiler:
//!
//! > Dear rustc,
//! >
//! > When you are attempting to load the immediate dependency `crate-name`, I
//! > would like you to assume that the library is located at
//! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not
//! > assume that the path I specified has the name `crate-name`.
//!
//! This flag basically overrides most matching logic except for validating that
//! the file is indeed a rust library. The same `crate-name` can be specified
//! twice to specify the rlib/dylib pair.
//!
//! ## Enabling "multiple versions"
//!
//! This basically boils down to the ability to specify arbitrary packages to
//! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it
//! would look something like:
//!
//! ```compile_fail,E0463
//! extern crate b1;
//! extern crate b2;
//!
//! fn main() {}
//! ```
//!
//! and the compiler would be invoked as:
//!
//! ```text
//! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib
//! ```
//!
//! In this scenario there are two crates named `b` and the compiler must be
//! manually driven to be informed where each crate is.
//!
//! ## Frobbing symbols
//!
//! One of the immediate problems with linking the same library together twice
//! in the same problem is dealing with duplicate symbols. The primary way to
//! deal with this in rustc is to add hashes to the end of each symbol.
//!
//! In order to force hashes to change between versions of a library, if
//! desired, the compiler exposes an option `-C metadata=foo`, which is used to
//! initially seed each symbol hash. The string `foo` is prepended to each
//! string-to-hash to ensure that symbols change over time.
//!
//! ## Loading transitive dependencies
//!
//! Dealing with same-named-but-distinct crates is not just a local problem, but
//! one that also needs to be dealt with for transitive dependencies. Note that
//! in the letter above `--extern` flags only apply to the *local* set of
//! dependencies, not the upstream transitive dependencies. Consider this
//! dependency graph:
//!
//! ```text
//! A.1 A.2
//! | |
//! | |
//! B C
//! \ /
//! \ /
//! D
//! ```
//!
//! In this scenario, when we compile `D`, we need to be able to distinctly
//! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these
//! transitive dependencies.
//!
//! Note that the key idea here is that `B` and `C` are both *already compiled*.
//! That is, they have already resolved their dependencies. Due to unrelated
//! technical reasons, when a library is compiled, it is only compatible with
//! the *exact same* version of the upstream libraries it was compiled against.
//! We use the "Strict Version Hash" to identify the exact copy of an upstream
//! library.
//!
//! With this knowledge, we know that `B` and `C` will depend on `A` with
//! different SVH values, so we crawl the normal `-L` paths looking for
//! `liba*.rlib` and filter based on the contained SVH.
//!
//! In the end, this ends up not needing `--extern` to specify upstream
//! transitive dependencies.
//!
//! # Wrapping up
//!
//! That's the general overview of loading crates in the compiler, but it's by
//! no means all of the necessary details. Take a look at the rest of
//! metadata::locator or metadata::creader for all the juicy details!
use crate::creader::Library;
use crate::rmeta::{rustc_version, MetadataBlob, METADATA_HEADER};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::owning_ref::OwningRef;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::MetadataRef;
use rustc_errors::struct_span_err;
use rustc_session::config::{self, CrateType};
use rustc_session::cstore::{CrateSource, MetadataLoader};
use rustc_session::filesearch::{FileDoesntMatch, FileMatches, FileSearch};
use rustc_session::search_paths::PathKind;
use rustc_session::utils::CanonicalizedPath;
use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::Span;
use rustc_target::spec::{Target, TargetTriple};
use snap::read::FrameDecoder;
use std::io::{Read, Result as IoResult, Write};
use std::path::{Path, PathBuf};
use std::{cmp, fmt, fs};
use tracing::{debug, info, warn};
#[derive(Clone)]
crate struct CrateLocator<'a> {
// Immutable per-session configuration.
only_needs_metadata: bool,
sysroot: &'a Path,
metadata_loader: &'a dyn MetadataLoader,
// Immutable per-search configuration.
crate_name: Symbol,
exact_paths: Vec<CanonicalizedPath>,
pub hash: Option<Svh>,
extra_filename: Option<&'a str>,
pub target: &'a Target,
pub triple: TargetTriple,
pub filesearch: FileSearch<'a>,
pub is_proc_macro: bool,
// Mutable in-progress state or output.
crate_rejections: CrateRejections,
}
#[derive(Clone)]
crate struct CratePaths {
name: Symbol,
source: CrateSource,
}
impl CratePaths {
crate fn new(name: Symbol, source: CrateSource) -> CratePaths {
CratePaths { name, source }
}
}
#[derive(Copy, Clone, PartialEq)]
crate enum CrateFlavor {
Rlib,
Rmeta,
Dylib,
}
impl fmt::Display for CrateFlavor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match *self {
CrateFlavor::Rlib => "rlib",
CrateFlavor::Rmeta => "rmeta",
CrateFlavor::Dylib => "dylib",
})
}
}
impl<'a> CrateLocator<'a> {
crate fn new(
sess: &'a Session,
metadata_loader: &'a dyn MetadataLoader,
crate_name: Symbol,
hash: Option<Svh>,
extra_filename: Option<&'a str>,
is_host: bool,
path_kind: PathKind,
) -> CrateLocator<'a> {
// The all loop is because `--crate-type=rlib --crate-type=rlib` is
// legal and produces both inside this type.
let is_rlib = sess.crate_types().iter().all(|c| *c == CrateType::Rlib);
let needs_object_code = sess.opts.output_types.should_codegen();
// If we're producing an rlib, then we don't need object code.
// Or, if we're not producing object code, then we don't need it either
// (e.g., if we're a cdylib but emitting just metadata).
let only_needs_metadata = is_rlib || !needs_object_code;
CrateLocator {
only_needs_metadata,
sysroot: &sess.sysroot,
metadata_loader,
crate_name,
exact_paths: if hash.is_none() {
sess.opts
.externs
.get(&crate_name.as_str())
.into_iter()
.filter_map(|entry| entry.files())
.flatten()
.cloned()
.collect()
} else {
// SVH being specified means this is a transitive dependency,
// so `--extern` options do not apply.
Vec::new()
},
hash,
extra_filename,
target: if is_host { &sess.host } else { &sess.target },
triple: if is_host {
TargetTriple::from_triple(config::host_triple())
} else {
sess.opts.target_triple.clone()
},
filesearch: if is_host {
sess.host_filesearch(path_kind)
} else {
sess.target_filesearch(path_kind)
},
is_proc_macro: false,
crate_rejections: CrateRejections::default(),
}
}
crate fn reset(&mut self) {
self.crate_rejections.via_hash.clear();
self.crate_rejections.via_triple.clear();
self.crate_rejections.via_kind.clear();
self.crate_rejections.via_version.clear();
self.crate_rejections.via_filename.clear();
}
crate fn maybe_load_library_crate(&mut self) -> Result<Option<Library>, CrateError> {
if !self.exact_paths.is_empty() {
return self.find_commandline_library();
}
let mut seen_paths = FxHashSet::default();
if let Some(extra_filename) = self.extra_filename {
if let library @ Some(_) = self.find_library_crate(extra_filename, &mut seen_paths)? {
return Ok(library);
}
}
self.find_library_crate("", &mut seen_paths)
}
fn find_library_crate(
&mut self,
extra_prefix: &str,
seen_paths: &mut FxHashSet<PathBuf>,
) -> Result<Option<Library>, CrateError> {
// want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
let dylib_prefix = format!("{}{}{}", self.target.dll_prefix, self.crate_name, extra_prefix);
let rlib_prefix = format!("lib{}{}", self.crate_name, extra_prefix);
let staticlib_prefix =
format!("{}{}{}", self.target.staticlib_prefix, self.crate_name, extra_prefix);
let mut candidates: FxHashMap<_, (FxHashMap<_, _>, FxHashMap<_, _>, FxHashMap<_, _>)> =
Default::default();
let mut staticlibs = vec![];
// First, find all possible candidate rlibs and dylibs purely based on
// the name of the files themselves. We're trying to match against an
// exact crate name and a possibly an exact hash.
//
// During this step, we can filter all found libraries based on the
// name and id found in the crate id (we ignore the path portion for
// filename matching), as well as the exact hash (if specified). If we
// end up having many candidates, we must look at the metadata to
// perform exact matches against hashes/crate ids. Note that opening up
// the metadata is where we do an exact match against the full contents
// of the crate id (path/name/id).
//
// The goal of this step is to look at as little metadata as possible.
self.filesearch.search(|spf, kind| {
let file = match &spf.file_name_str {
None => return FileDoesntMatch,
Some(file) => file,
};
let (hash, found_kind) = if file.starts_with(&rlib_prefix) && file.ends_with(".rlib") {
(&file[(rlib_prefix.len())..(file.len() - ".rlib".len())], CrateFlavor::Rlib)
} else if file.starts_with(&rlib_prefix) && file.ends_with(".rmeta") {
(&file[(rlib_prefix.len())..(file.len() - ".rmeta".len())], CrateFlavor::Rmeta)
} else if file.starts_with(&dylib_prefix) && file.ends_with(&self.target.dll_suffix) {
(
&file[(dylib_prefix.len())..(file.len() - self.target.dll_suffix.len())],
CrateFlavor::Dylib,
)
} else {
if file.starts_with(&staticlib_prefix)
&& file.ends_with(&self.target.staticlib_suffix)
{
staticlibs
.push(CrateMismatch { path: spf.path.clone(), got: "static".to_string() });
}
return FileDoesntMatch;
};
info!("lib candidate: {}", spf.path.display());
let (rlibs, rmetas, dylibs) = candidates.entry(hash.to_string()).or_default();
let path = fs::canonicalize(&spf.path).unwrap_or_else(|_| spf.path.clone());
if seen_paths.contains(&path) {
return FileDoesntMatch;
};
seen_paths.insert(path.clone());
match found_kind {
CrateFlavor::Rlib => rlibs.insert(path, kind),
CrateFlavor::Rmeta => rmetas.insert(path, kind),
CrateFlavor::Dylib => dylibs.insert(path, kind),
};
FileMatches
});
self.crate_rejections.via_kind.extend(staticlibs);
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = FxHashMap::default();
for (_hash, (rlibs, rmetas, dylibs)) in candidates {
if let Some((svh, lib)) = self.extract_lib(rlibs, rmetas, dylibs)? {
libraries.insert(svh, lib);
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => Ok(None),
1 => Ok(Some(libraries.into_iter().next().unwrap().1)),
_ => Err(CrateError::MultipleMatchingCrates(self.crate_name, libraries)),
}
}
fn extract_lib(
&mut self,
rlibs: FxHashMap<PathBuf, PathKind>,
rmetas: FxHashMap<PathBuf, PathKind>,
dylibs: FxHashMap<PathBuf, PathKind>,
) -> Result<Option<(Svh, Library)>, CrateError> {
let mut slot = None;
// Order here matters, rmeta should come first. See comment in
// `extract_one` below.
let source = CrateSource {
rmeta: self.extract_one(rmetas, CrateFlavor::Rmeta, &mut slot)?,
rlib: self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot)?,
dylib: self.extract_one(dylibs, CrateFlavor::Dylib, &mut slot)?,
};
Ok(slot.map(|(svh, metadata)| (svh, Library { source, metadata })))
}
fn needs_crate_flavor(&self, flavor: CrateFlavor) -> bool {
if flavor == CrateFlavor::Dylib && self.is_proc_macro {
return true;
}
if self.only_needs_metadata {
flavor == CrateFlavor::Rmeta
} else {
// we need all flavors (perhaps not true, but what we do for now)
true
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(
&mut self,
m: FxHashMap<PathBuf, PathKind>,
flavor: CrateFlavor,
slot: &mut Option<(Svh, MetadataBlob)>,
) -> Result<Option<(PathBuf, PathKind)>, CrateError> {
// If we are producing an rlib, and we've already loaded metadata, then
// we should not attempt to discover further crate sources (unless we're
// locating a proc macro; exact logic is in needs_crate_flavor). This means
// that under -Zbinary-dep-depinfo we will not emit a dependency edge on
// the *unused* rlib, and by returning `None` here immediately we
// guarantee that we do indeed not use it.
//
// See also #68149 which provides more detail on why emitting the
// dependency on the rlib is a bad thing.
//
// We currently do not verify that these other sources are even in sync,
// and this is arguably a bug (see #10786), but because reading metadata
// is quite slow (especially from dylibs) we currently do not read it
// from the other crate sources.
if slot.is_some() {
if m.is_empty() || !self.needs_crate_flavor(flavor) {
return Ok(None);
} else if m.len() == 1 {
return Ok(Some(m.into_iter().next().unwrap()));
}
}
let mut ret: Option<(PathBuf, PathKind)> = None;
let mut err_data: Option<Vec<PathBuf>> = None;
for (lib, kind) in m {
info!("{} reading metadata from: {}", flavor, lib.display());
let (hash, metadata) =
match get_metadata_section(self.target, flavor, &lib, self.metadata_loader) {
Ok(blob) => {
if let Some(h) = self.crate_matches(&blob, &lib) {
(h, blob)
} else {
info!("metadata mismatch");
continue;
}
}
Err(err) => {
warn!("no metadata found: {}", err);
continue;
}
};
// If we see multiple hashes, emit an error about duplicate candidates.
if slot.as_ref().map_or(false, |s| s.0 != hash) {
if let Some(candidates) = err_data {
return Err(CrateError::MultipleCandidates(
self.crate_name,
flavor,
candidates,
));
}
err_data = Some(vec![ret.as_ref().unwrap().0.clone()]);
*slot = None;
}
if let Some(candidates) = &mut err_data {
candidates.push(lib);
continue;
}
// Ok so at this point we've determined that `(lib, kind)` above is
// a candidate crate to load, and that `slot` is either none (this
// is the first crate of its kind) or if some the previous path has
// the exact same hash (e.g., it's the exact same crate).
//
// In principle these two candidate crates are exactly the same so
// we can choose either of them to link. As a stupidly gross hack,
// however, we favor crate in the sysroot.
//
// You can find more info in rust-lang/rust#39518 and various linked
// issues, but the general gist is that during testing libstd the
// compilers has two candidates to choose from: one in the sysroot
// and one in the deps folder. These two crates are the exact same
// crate but if the compiler chooses the one in the deps folder
// it'll cause spurious errors on Windows.
//
// As a result, we favor the sysroot crate here. Note that the
// candidates are all canonicalized, so we canonicalize the sysroot
// as well.
if let Some((prev, _)) = &ret {
let sysroot = self.sysroot;
let sysroot = sysroot.canonicalize().unwrap_or_else(|_| sysroot.to_path_buf());
if prev.starts_with(&sysroot) {
continue;
}
}
*slot = Some((hash, metadata));
ret = Some((lib, kind));
}
if let Some(candidates) = err_data {
Err(CrateError::MultipleCandidates(self.crate_name, flavor, candidates))
} else {
Ok(ret)
}
}
fn crate_matches(&mut self, metadata: &MetadataBlob, libpath: &Path) -> Option<Svh> {
let rustc_version = rustc_version();
let found_version = metadata.get_rustc_version();
if found_version != rustc_version {
info!("Rejecting via version: expected {} got {}", rustc_version, found_version);
self.crate_rejections
.via_version
.push(CrateMismatch { path: libpath.to_path_buf(), got: found_version });
return None;
}
let root = metadata.get_root();
if root.is_proc_macro_crate() != self.is_proc_macro {
info!(
"Rejecting via proc macro: expected {} got {}",
self.is_proc_macro,
root.is_proc_macro_crate(),
);
return None;
}
if self.exact_paths.is_empty() && self.crate_name != root.name() {
info!("Rejecting via crate name");
return None;
}
if root.triple() != &self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, root.triple());
self.crate_rejections.via_triple.push(CrateMismatch {
path: libpath.to_path_buf(),
got: root.triple().to_string(),
});
return None;
}
let hash = root.hash();
if let Some(expected_hash) = self.hash {
if hash != expected_hash {
info!("Rejecting via hash: expected {} got {}", expected_hash, hash);
self.crate_rejections
.via_hash
.push(CrateMismatch { path: libpath.to_path_buf(), got: hash.to_string() });
return None;
}
}
Some(hash)
}
fn find_commandline_library(&mut self) -> Result<Option<Library>, CrateError> {
// First, filter out all libraries that look suspicious. We only accept
// files which actually exist that have the correct naming scheme for
// rlibs/dylibs.
let mut rlibs = FxHashMap::default();
let mut rmetas = FxHashMap::default();
let mut dylibs = FxHashMap::default();
for loc in &self.exact_paths {
if !loc.canonicalized().exists() {
return Err(CrateError::ExternLocationNotExist(
self.crate_name,
loc.original().clone(),
));
}
let file = match loc.original().file_name().and_then(|s| s.to_str()) {
Some(file) => file,
None => {
return Err(CrateError::ExternLocationNotFile(
self.crate_name,
loc.original().clone(),
));
}
};
if file.starts_with("lib") && (file.ends_with(".rlib") || file.ends_with(".rmeta"))
|| file.starts_with(&self.target.dll_prefix)
&& file.ends_with(&self.target.dll_suffix)
{
// Make sure there's at most one rlib and at most one dylib.
// Note to take care and match against the non-canonicalized name:
// some systems save build artifacts into content-addressed stores
// that do not preserve extensions, and then link to them using
// e.g. symbolic links. If we canonicalize too early, we resolve
// the symlink, the file type is lost and we might treat rlibs and
// rmetas as dylibs.
let loc_canon = loc.canonicalized().clone();
let loc = loc.original();
if loc.file_name().unwrap().to_str().unwrap().ends_with(".rlib") {
rlibs.insert(loc_canon, PathKind::ExternFlag);
} else if loc.file_name().unwrap().to_str().unwrap().ends_with(".rmeta") {
rmetas.insert(loc_canon, PathKind::ExternFlag);
} else {
dylibs.insert(loc_canon, PathKind::ExternFlag);
}
} else {
self.crate_rejections
.via_filename
.push(CrateMismatch { path: loc.original().clone(), got: String::new() });
}
}
// Extract the dylib/rlib/rmeta triple.
Ok(self.extract_lib(rlibs, rmetas, dylibs)?.map(|(_, lib)| lib))
}
crate fn into_error(self, root: Option<CratePaths>) -> CrateError {
CrateError::LocatorCombined(CombinedLocatorError {
crate_name: self.crate_name,
root,
triple: self.triple,
dll_prefix: self.target.dll_prefix.clone(),
dll_suffix: self.target.dll_suffix.clone(),
crate_rejections: self.crate_rejections,
})
}
}
fn get_metadata_section(
target: &Target,
flavor: CrateFlavor,
filename: &Path,
loader: &dyn MetadataLoader,
) -> Result<MetadataBlob, String> {
if !filename.exists() {
return Err(format!("no such file: '{}'", filename.display()));
}
let raw_bytes: MetadataRef = match flavor {
CrateFlavor::Rlib => loader.get_rlib_metadata(target, filename)?,
CrateFlavor::Dylib => {
let buf = loader.get_dylib_metadata(target, filename)?;
// The header is uncompressed
let header_len = METADATA_HEADER.len();
debug!("checking {} bytes of metadata-version stamp", header_len);
let header = &buf[..cmp::min(header_len, buf.len())];
if header != METADATA_HEADER {
return Err(format!(
"incompatible metadata version found: '{}'",
filename.display()
));
}
// Header is okay -> inflate the actual metadata
let compressed_bytes = &buf[header_len..];
debug!("inflating {} bytes of compressed metadata", compressed_bytes.len());
// Assume the decompressed data will be at least the size of the compressed data, so we
// don't have to grow the buffer as much.
let mut inflated = Vec::with_capacity(compressed_bytes.len());
match FrameDecoder::new(compressed_bytes).read_to_end(&mut inflated) {
Ok(_) => rustc_erase_owner!(OwningRef::new(inflated).map_owner_box()),
Err(_) => {
return Err(format!("failed to decompress metadata: {}", filename.display()));
}
}
}
CrateFlavor::Rmeta => {
// mmap the file, because only a small fraction of it is read.
let file = std::fs::File::open(filename)
.map_err(|_| format!("failed to open rmeta metadata: '{}'", filename.display()))?;
let mmap = unsafe { Mmap::map(file) };
let mmap = mmap
.map_err(|_| format!("failed to mmap rmeta metadata: '{}'", filename.display()))?;
rustc_erase_owner!(OwningRef::new(mmap).map_owner_box())
}
};
let blob = MetadataBlob::new(raw_bytes);
if blob.is_compatible() {
Ok(blob)
} else {
Err(format!("incompatible metadata version found: '{}'", filename.display()))
}
}
/// Look for a plugin registrar. Returns its library path and crate disambiguator.
pub fn find_plugin_registrar(
sess: &Session,
metadata_loader: &dyn MetadataLoader,
span: Span,
name: Symbol,
) -> PathBuf {
match find_plugin_registrar_impl(sess, metadata_loader, name) {
Ok(res) => res,
// `core` is always available if we got as far as loading plugins.
Err(err) => err.report(sess, span, false),
}
}
fn find_plugin_registrar_impl<'a>(
sess: &'a Session,
metadata_loader: &dyn MetadataLoader,
name: Symbol,
) -> Result<PathBuf, CrateError> {
info!("find plugin registrar `{}`", name);
let mut locator = CrateLocator::new(
sess,
metadata_loader,
name,
None, // hash
None, // extra_filename
true, // is_host
PathKind::Crate,
);
match locator.maybe_load_library_crate()? {
Some(library) => match library.source.dylib {
Some(dylib) => Ok(dylib.0),
None => Err(CrateError::NonDylibPlugin(name)),
},
None => Err(locator.into_error(None)),
}
}
/// A diagnostic function for dumping crate metadata to an output stream.
pub fn list_file_metadata(
target: &Target,
path: &Path,
metadata_loader: &dyn MetadataLoader,
out: &mut dyn Write,
) -> IoResult<()> {
let filename = path.file_name().unwrap().to_str().unwrap();
let flavor = if filename.ends_with(".rlib") {
CrateFlavor::Rlib
} else if filename.ends_with(".rmeta") {
CrateFlavor::Rmeta
} else {
CrateFlavor::Dylib
};
match get_metadata_section(target, flavor, path, metadata_loader) {
Ok(metadata) => metadata.list_crate_metadata(out),
Err(msg) => write!(out, "{}\n", msg),
}
}
// ------------------------------------------ Error reporting -------------------------------------
#[derive(Clone)]
struct CrateMismatch {
path: PathBuf,
got: String,
}
#[derive(Clone, Default)]
struct CrateRejections {
via_hash: Vec<CrateMismatch>,
via_triple: Vec<CrateMismatch>,
via_kind: Vec<CrateMismatch>,
via_version: Vec<CrateMismatch>,
via_filename: Vec<CrateMismatch>,
}
/// Candidate rejection reasons collected during crate search.
/// If no candidate is accepted, then these reasons are presented to the user,
/// otherwise they are ignored.
crate struct CombinedLocatorError {
crate_name: Symbol,
root: Option<CratePaths>,
triple: TargetTriple,
dll_prefix: String,
dll_suffix: String,
crate_rejections: CrateRejections,
}
crate enum CrateError {
NonAsciiName(Symbol),
ExternLocationNotExist(Symbol, PathBuf),
ExternLocationNotFile(Symbol, PathBuf),
MultipleCandidates(Symbol, CrateFlavor, Vec<PathBuf>),
MultipleMatchingCrates(Symbol, FxHashMap<Svh, Library>),
SymbolConflictsCurrent(Symbol),
SymbolConflictsOthers(Symbol),
StableCrateIdCollision(Symbol, Symbol),
DlOpen(String),
DlSym(String),
LocatorCombined(CombinedLocatorError),
NonDylibPlugin(Symbol),
}
impl CrateError {
crate fn report(self, sess: &Session, span: Span, missing_core: bool) -> ! {
let mut err = match self {
CrateError::NonAsciiName(crate_name) => sess.struct_span_err(
span,
&format!("cannot load a crate with a non-ascii name `{}`", crate_name),
),
CrateError::ExternLocationNotExist(crate_name, loc) => sess.struct_span_err(
span,
&format!("extern location for {} does not exist: {}", crate_name, loc.display()),
),
CrateError::ExternLocationNotFile(crate_name, loc) => sess.struct_span_err(
span,
&format!("extern location for {} is not a file: {}", crate_name, loc.display()),
),
CrateError::MultipleCandidates(crate_name, flavor, candidates) => {
let mut err = struct_span_err!(
sess,
span,
E0465,
"multiple {} candidates for `{}` found",
flavor,
crate_name,
);
for (i, candidate) in candidates.iter().enumerate() {
err.span_note(span, &format!("candidate #{}: {}", i + 1, candidate.display()));
}
err
}
CrateError::MultipleMatchingCrates(crate_name, libraries) => {
let mut err = struct_span_err!(
sess,
span,
E0464,
"multiple matching crates for `{}`",
crate_name
);
let candidates = libraries
.iter()
.filter_map(|(_, lib)| {
let crate_name = &lib.metadata.get_root().name().as_str();
match (&lib.source.dylib, &lib.source.rlib) {
(Some((pd, _)), Some((pr, _))) => Some(format!(
"\ncrate `{}`: {}\n{:>padding$}",
crate_name,
pd.display(),
pr.display(),
padding = 8 + crate_name.len()
)),
(Some((p, _)), None) | (None, Some((p, _))) => {
Some(format!("\ncrate `{}`: {}", crate_name, p.display()))
}
(None, None) => None,
}
})
.collect::<String>();
err.note(&format!("candidates:{}", candidates));
err
}
CrateError::SymbolConflictsCurrent(root_name) => struct_span_err!(
sess,
span,
E0519,
"the current crate is indistinguishable from one of its dependencies: it has the \
same crate-name `{}` and was compiled with the same `-C metadata` arguments. \
This will result in symbol conflicts between the two.",
root_name,
),
CrateError::SymbolConflictsOthers(root_name) => struct_span_err!(
sess,
span,
E0523,
"found two different crates with name `{}` that are not distinguished by differing \
`-C metadata`. This will result in symbol conflicts between the two.",
root_name,
),
CrateError::StableCrateIdCollision(crate_name0, crate_name1) => {
let msg = format!(
"found crates (`{}` and `{}`) with colliding StableCrateId values.",
crate_name0, crate_name1
);
sess.struct_span_err(span, &msg)
}
CrateError::DlOpen(s) | CrateError::DlSym(s) => sess.struct_span_err(span, &s),
CrateError::LocatorCombined(locator) => {
let crate_name = locator.crate_name;
let add = match &locator.root {
None => String::new(),
Some(r) => format!(" which `{}` depends on", r.name),
};
let mut msg = "the following crate versions were found:".to_string();
let mut err = if !locator.crate_rejections.via_hash.is_empty() {
let mut err = struct_span_err!(
sess,
span,
E0460,
"found possibly newer version of crate `{}`{}",
crate_name,
add,
);
err.note("perhaps that crate needs to be recompiled?");
let mismatches = locator.crate_rejections.via_hash.iter();
for CrateMismatch { path, .. } in mismatches {
msg.push_str(&format!("\ncrate `{}`: {}", crate_name, path.display()));
}
if let Some(r) = locator.root {
for path in r.source.paths() {
msg.push_str(&format!("\ncrate `{}`: {}", r.name, path.display()));
}
}
err.note(&msg);
err
} else if !locator.crate_rejections.via_triple.is_empty() {
let mut err = struct_span_err!(
sess,
span,
E0461,
"couldn't find crate `{}` with expected target triple {}{}",
crate_name,
locator.triple,
add,
);
let mismatches = locator.crate_rejections.via_triple.iter();
for CrateMismatch { path, got } in mismatches {
msg.push_str(&format!(
"\ncrate `{}`, target triple {}: {}",
crate_name,
got,
path.display(),
));
}
err.note(&msg);
err
} else if !locator.crate_rejections.via_kind.is_empty() {
let mut err = struct_span_err!(
sess,
span,
E0462,
"found staticlib `{}` instead of rlib or dylib{}",
crate_name,
add,
);
err.help("please recompile that crate using --crate-type lib");
let mismatches = locator.crate_rejections.via_kind.iter();
for CrateMismatch { path, .. } in mismatches {
msg.push_str(&format!("\ncrate `{}`: {}", crate_name, path.display()));
}
err.note(&msg);
err
} else if !locator.crate_rejections.via_version.is_empty() {
let mut err = struct_span_err!(
sess,
span,
E0514,
"found crate `{}` compiled by an incompatible version of rustc{}",
crate_name,
add,
);
err.help(&format!(
"please recompile that crate using this compiler ({}) \
(consider running `cargo clean` first)",
rustc_version(),
));
let mismatches = locator.crate_rejections.via_version.iter();
for CrateMismatch { path, got } in mismatches {
msg.push_str(&format!(
"\ncrate `{}` compiled by {}: {}",
crate_name,
got,
path.display(),
));
}
err.note(&msg);
err
} else {
let mut err = struct_span_err!(
sess,
span,
E0463,
"can't find crate for `{}`{}",
crate_name,
add,
);
if (crate_name == sym::std || crate_name == sym::core)
&& locator.triple != TargetTriple::from_triple(config::host_triple())
{
if missing_core {
err.note(&format!(
"the `{}` target may not be installed",
locator.triple
));
} else {
err.note(&format!(
"the `{}` target may not support the standard library",
locator.triple
));
}
// NOTE: this suggests using rustup, even though the user may not have it installed.
// That's because they could choose to install it; or this may give them a hint which
// target they need to install from their distro.
if missing_core {
err.help(&format!(
"consider downloading the target with `rustup target add {}`",
locator.triple
));
}
// Suggest using #![no_std]. #[no_core] is unstable and not really supported anyway.
// NOTE: this is a dummy span if `extern crate std` was injected by the compiler.
// If it's not a dummy, that means someone added `extern crate std` explicitly and `#![no_std]` won't help.
if !missing_core && span.is_dummy() {
let current_crate =
sess.opts.crate_name.as_deref().unwrap_or("<unknown>");
err.note(&format!(
"`std` is required by `{}` because it does not declare `#![no_std]`",
current_crate
));
}
if sess.is_nightly_build() {
err.help("consider building the standard library from source with `cargo build -Zbuild-std`");
}
} else if crate_name
== Symbol::intern(&sess.opts.debugging_opts.profiler_runtime)
{
err.note(&"the compiler may have been built without the profiler runtime");
} else if crate_name.as_str().starts_with("rustc_") {
err.help(
"maybe you need to install the missing components with: \
`rustup component add rust-src rustc-dev llvm-tools-preview`",
);
}
err.span_label(span, "can't find crate");
err
};
if !locator.crate_rejections.via_filename.is_empty() {
let mismatches = locator.crate_rejections.via_filename.iter();
for CrateMismatch { path, .. } in mismatches {
err.note(&format!(
"extern location for {} is of an unknown type: {}",
crate_name,
path.display(),
))
.help(&format!(
"file name should be lib*.rlib or {}*.{}",
locator.dll_prefix, locator.dll_suffix
));
}
}
err
}
CrateError::NonDylibPlugin(crate_name) => struct_span_err!(
sess,
span,
E0457,
"plugin `{}` only found in rlib format, but must be available in dylib format",
crate_name,
),
};
err.emit();
sess.abort_if_errors();
unreachable!();
}
}
| 42.410167 | 131 | 0.553399 |
d7d00e39c1c438d47c5e405a6a97ebc66b1d3411 | 3,055 | use std::collections::HashMap;
use near_sdk::{
// log,
require,
env, ext_contract, near_bindgen, assert_one_yocto,
Gas, Balance, AccountId, BorshStorageKey, PanicOnDefault,
Promise, PromiseResult, promise_result_as_success,
collections::{Vector, LookupMap, UnorderedMap, UnorderedSet},
borsh::{self, BorshDeserialize, BorshSerialize},
serde::{Serialize, Deserialize},
serde_json::from_str,
json_types::{U128},
};
use crate::offer::*;
use crate::nft_traits::*;
use crate::internal::*;
use crate::self_callbacks::*;
pub use crate::events::*;
mod owner;
mod events;
mod storage;
mod views;
mod enumeration;
mod nft_traits;
mod internal;
mod nft_callbacks;
mod offer;
mod self_callbacks;
/// TODO verify gas amounts for cases like auto_transfer (unlikely to be able to use 115 Tgas, so what's the max royalties we can handle???)
pub const GAS_FOR_ROYALTIES: Gas = Gas(115_000_000_000_000);
pub const GAS_FOR_NFT_TRANSFER: Gas = Gas(15_000_000_000_000);
/// TODO where is this used and how can we measure and optimize?
pub const CALLBACK_GAS: Gas = Gas(30_000_000_000_000);
pub const DELIMETER: char = '|';
pub const NO_DEPOSIT: Balance = 0;
//Creating custom types to use within the contract. This makes things more readable.
pub type TokenId = String;
//defines the payout type we'll be parsing from the NFT contract as a part of the royalty standard.
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct Payout {
pub payout: HashMap<AccountId, U128>,
}
#[derive(BorshSerialize, BorshStorageKey)]
enum StorageKey {
OfferById,
OfferByMakerId,
OfferByMakerIdInner { maker_id: AccountId },
OfferByTakerId,
OfferByTakerIdInner { taker_id: AccountId },
OfferByContractTokenId,
OfferStorageByOwnerId,
}
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize, PanicOnDefault)]
pub struct Contract {
owner_id: AccountId,
outbid_timeout: u64,
offer_storage_amount: Balance,
min_bid_amount: Balance,
market_balance: Balance,
market_royalty: u32,
offer_id: u64,
offer_by_id: UnorderedMap<u64, Offer>,
offers_by_maker_id: LookupMap<AccountId, UnorderedSet<u64>>,
offers_by_taker_id: LookupMap<AccountId, UnorderedSet<u64>>,
offer_by_contract_token_id: LookupMap<String, u64>,
offer_storage_by_owner_id: LookupMap<AccountId, u64>,
}
#[near_bindgen]
impl Contract {
#[init]
pub fn new(owner_id: AccountId, market_royalty: u32) -> Self {
Self {
owner_id,
outbid_timeout: 86_400_000_000_000, // 24hr
offer_storage_amount: 20_000_000_000_000_000_000_000, // 2kb 0.02 N
min_bid_amount: 99_999_999_999_999_999_999_999, // bids > 0.1 N
market_balance: 0,
market_royalty,
offer_id: 0,
offer_by_id: UnorderedMap::new(StorageKey::OfferById),
offers_by_maker_id: LookupMap::new(StorageKey::OfferByMakerId),
offers_by_taker_id: LookupMap::new(StorageKey::OfferByTakerId),
offer_by_contract_token_id: LookupMap::new(StorageKey::OfferByContractTokenId),
offer_storage_by_owner_id: LookupMap::new(StorageKey::OfferStorageByOwnerId),
}
}
}
| 30.55 | 140 | 0.761375 |
726dd771130111569de18b877c57f15676a817c9 | 2,913 | // Copyright Materialize, Inc. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Provides convenience functions for working with upstream Postgres sources from the `sql` package.
use anyhow::anyhow;
use sql_parser::ast::display::{AstDisplay, AstFormatter};
use sql_parser::impl_display;
use tokio_postgres::types::Type as PgType;
use tokio_postgres::NoTls;
/// The schema of a single column
pub struct PgColumn {
name: String,
scalar_type: PgType,
nullable: bool,
}
impl AstDisplay for PgColumn {
fn fmt(&self, f: &mut AstFormatter) {
f.write_str(&self.name);
f.write_str(" ");
f.write_str(&self.scalar_type);
f.write_str(" ");
if self.nullable {
f.write_str("NULL");
} else {
f.write_str("NOT NULL");
}
}
}
impl_display!(PgColumn);
/// Fetches column information from an upstream Postgres source, given
/// a connection string, a namespace, and a target table.
///
/// # Errors
///
/// - Invalid connection string, user information, or user permissions.
/// - Upstream table does not exist or contains invalid values.
pub async fn fetch_columns(
conn: &str,
namespace: &str,
table: &str,
) -> Result<Vec<PgColumn>, anyhow::Error> {
let (client, connection) = tokio_postgres::connect(&conn, NoTls).await?;
tokio::spawn(connection);
let rel_id: u32 = client
.query(
"SELECT c.oid
FROM pg_catalog.pg_class c
INNER JOIN pg_catalog.pg_namespace n
ON (c.relnamespace = n.oid)
WHERE n.nspname = $1
AND c.relname = $2;",
&[&namespace, &table],
)
.await?
.get(0)
.ok_or_else(|| anyhow!("table not found in the upstream catalog"))?
.get(0);
Ok(client
.query(
"SELECT a.attname, a.atttypid, a.attnotnull
FROM pg_catalog.pg_attribute a
WHERE a.attnum > 0::pg_catalog.int2
AND NOT a.attisdropped
AND a.attrelid = $1
ORDER BY a.attnum",
&[&rel_id],
)
.await?
.into_iter()
.map(|row| {
let name: String = row.get(0);
let oid = row.get(1);
let scalar_type =
PgType::from_oid(oid).ok_or_else(|| anyhow!("unknown type OID: {}", oid))?;
let nullable = !row.get::<_, bool>(2);
Ok(PgColumn {
name,
scalar_type,
nullable,
})
})
.collect::<Result<Vec<_>, anyhow::Error>>()?)
}
| 30.030928 | 101 | 0.569516 |
716a8644ff79d1669ecbfe9a6963cfe60f0b9942 | 7,589 | // Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Integration tests for the `Ping` network behaviour.
use libp2p_core::{
Multiaddr,
PeerId,
identity,
muxing::StreamMuxerBox,
transport::{Transport, boxed::Boxed},
upgrade
};
use libp2p_noise as noise;
use libp2p_ping::*;
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_tcp::TcpConfig;
use futures::{prelude::*, channel::mpsc};
use quickcheck::*;
use std::{io, num::NonZeroU8, time::Duration};
#[test]
fn ping_pong() {
fn prop(count: NonZeroU8) {
let cfg = PingConfig::new()
.with_keep_alive(true)
.with_interval(Duration::from_millis(10));
let (peer1_id, trans) = mk_transport();
let mut swarm1 = Swarm::new(trans, Ping::new(cfg.clone()), peer1_id.clone());
let (peer2_id, trans) = mk_transport();
let mut swarm2 = Swarm::new(trans, Ping::new(cfg), peer2_id.clone());
let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
let pid1 = peer1_id.clone();
let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap();
Swarm::listen_on(&mut swarm1, addr).unwrap();
let mut count1 = count.get();
let mut count2 = count.get();
let peer1 = async move {
while let Some(_) = swarm1.next().now_or_never() {}
for l in Swarm::listeners(&swarm1) {
tx.send(l.clone()).await.unwrap();
}
loop {
match swarm1.next().await {
PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } => {
count1 -= 1;
if count1 == 0 {
return (pid1.clone(), peer, rtt)
}
},
PingEvent { result: Err(e), .. } => panic!("Ping failure: {:?}", e),
_ => {}
}
}
};
let pid2 = peer2_id.clone();
let peer2 = async move {
Swarm::dial_addr(&mut swarm2, rx.next().await.unwrap()).unwrap();
loop {
match swarm2.next().await {
PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } => {
count2 -= 1;
if count2 == 0 {
return (pid2.clone(), peer, rtt)
}
},
PingEvent { result: Err(e), .. } => panic!("Ping failure: {:?}", e),
_ => {}
}
}
};
let result = future::select(Box::pin(peer1), Box::pin(peer2));
let ((p1, p2, rtt), _) = async_std::task::block_on(result).factor_first();
assert!(p1 == peer1_id && p2 == peer2_id || p1 == peer2_id && p2 == peer1_id);
assert!(rtt < Duration::from_millis(50));
}
QuickCheck::new().tests(3).quickcheck(prop as fn(_))
}
/// Tests that the connection is closed upon a configurable
/// number of consecutive ping failures.
#[test]
fn max_failures() {
fn prop(max_failures: NonZeroU8) {
let cfg = PingConfig::new()
.with_keep_alive(true)
.with_interval(Duration::from_millis(10))
.with_timeout(Duration::from_millis(0))
.with_max_failures(max_failures.into());
let (peer1_id, trans) = mk_transport();
let mut swarm1 = Swarm::new(trans, Ping::new(cfg.clone()), peer1_id.clone());
let (peer2_id, trans) = mk_transport();
let mut swarm2 = Swarm::new(trans, Ping::new(cfg), peer2_id.clone());
let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap();
Swarm::listen_on(&mut swarm1, addr).unwrap();
let peer1 = async move {
while let Some(_) = swarm1.next().now_or_never() {}
for l in Swarm::listeners(&swarm1) {
tx.send(l.clone()).await.unwrap();
}
let mut count1: u8 = 0;
loop {
match swarm1.next_event().await {
SwarmEvent::Behaviour(PingEvent {
result: Ok(PingSuccess::Ping { .. }), ..
}) => {
count1 = 0; // there may be an occasional success
}
SwarmEvent::Behaviour(PingEvent {
result: Err(_), ..
}) => {
count1 += 1;
}
SwarmEvent::ConnectionClosed { .. } => {
return count1
}
_ => {}
}
}
};
let peer2 = async move {
Swarm::dial_addr(&mut swarm2, rx.next().await.unwrap()).unwrap();
let mut count2: u8 = 0;
loop {
match swarm2.next_event().await {
SwarmEvent::Behaviour(PingEvent {
result: Ok(PingSuccess::Ping { .. }), ..
}) => {
count2 = 0; // there may be an occasional success
}
SwarmEvent::Behaviour(PingEvent {
result: Err(_), ..
}) => {
count2 += 1;
}
SwarmEvent::ConnectionClosed { .. } => {
return count2
}
_ => {}
}
}
};
let future = future::join(peer1, peer2);
let (count1, count2) = async_std::task::block_on(future);
assert_eq!(u8::max(count1, count2), max_failures.get() - 1);
}
QuickCheck::new().tests(3).quickcheck(prop as fn(_))
}
fn mk_transport() -> (
PeerId,
Boxed<
(PeerId, StreamMuxerBox),
io::Error
>
) {
let id_keys = identity::Keypair::generate_ed25519();
let peer_id = id_keys.public().into_peer_id();
let noise_keys = noise::Keypair::<noise::X25519Spec>::new().into_authentic(&id_keys).unwrap();
let transport = TcpConfig::new()
.nodelay(true)
.upgrade(upgrade::Version::V1)
.authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated())
.multiplex(libp2p_yamux::Config::default())
.map(|(peer, muxer), _| (peer, StreamMuxerBox::new(muxer)))
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
.boxed();
(peer_id, transport)
}
| 35.297674 | 98 | 0.521149 |
7166c136369edcc758aa524a47b817aff4fb726b | 23,037 | use crate::vm::{FileId, VM};
use dora_parser::lexer::position::Position;
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum SemError {
Unimplemented,
UnknownClass(String),
UnknownType(String),
UnknownIdentifier(String),
UnknownStruct(String),
UnknownFunction(String),
UnknownField(String, String),
UnknownMethod(String, String, Vec<String>),
UnknownEnumValue(String),
MultipleCandidatesForMethod(String, String, Vec<String>),
UnknownMethodForTypeParam(String, String, Vec<String>),
MultipleCandidatesForTypeParam(String, String, Vec<String>),
MultipleCandidatesForStaticMethodWithTypeParam,
UnknownStaticMethodWithTypeParam,
UnknownStaticMethod(String, String, Vec<String>),
UnknownCtor(String, Vec<String>),
MethodExists(String, String, Position),
IncompatibleWithNil(String),
IdentifierExists(String),
ShadowFunction(String),
ShadowParam(String),
ShadowClass(String),
ShadowStruct(String),
ShadowTrait(String),
ShadowField(String),
ShadowGlobal(String),
ShadowConst(String),
ShadowEnum(String),
ShadowEnumValue(String),
InvalidLhsAssignment,
NoEnumValue,
VarNeedsTypeInfo(String),
ParamTypesIncompatible(String, Vec<String>, Vec<String>),
WhileCondType(String),
IfCondType(String),
ReturnType(String, String),
LvalueExpected,
AssignType(String, String, String),
AssignField(String, String, String, String),
UnOpType(String, String),
BinOpType(String, String, String),
ConstValueExpected,
OutsideLoop,
NoReturnValue,
MainNotFound,
WrongMainDefinition,
ThisUnavailable,
SelfTypeUnavailable,
SuperUnavailable,
SuperNeedsMethodCall,
ReferenceTypeExpected(String),
ThrowNil,
CatchOrFinallyExpected,
LetMissingInitialization,
LetReassigned,
FctReassigned,
ClassReassigned,
TypeParamReassigned,
FctUsedAsIdentifier,
ClsUsedAsIdentifier,
TypeParamUsedAsIdentifier,
EnumUsedAsIdentifier,
TypeParamUsedAsCallee,
UnderivableType(String),
CycleInHierarchy,
SuperfluousOverride(String),
SuperfluousOpen(String),
MissingOverride(String),
ThrowsDifference(String),
MethodNotOverridable(String),
TypesIncompatible(String, String),
ReturnTypeMismatch(String, String),
OverrideMismatch,
UnresolvedInternal,
UnclosedComment,
UnknownChar(char),
UnclosedChar,
UnclosedString,
NumberOverflow(String),
ExpectedClass(String),
ExpectedFactor(String),
ExpectedToken(String, String),
ExpectedTopLevelElement(String),
ExpectedTrait(String),
ExpectedType(String),
ExpectedIdentifier(String),
ExpectedStringable(String),
ExpectedSomeIdentifier,
MisplacedElse,
IoError,
ExpectedClassElement(String),
MisplacedAnnotation(String),
RedundantAnnotation(String),
UnknownAnnotation(String),
InvalidEscapeSequence(char),
MissingFctBody,
FctCallExpected,
ThisOrSuperExpected(String),
NoSuperDelegationWithPrimaryCtor(String),
NoSuperClass(String),
RecursiveStructure,
TraitMethodWithBody,
TryNeedsCall,
TryCallNonThrowing,
ThrowingCallWithoutTry,
TypeParamsExpected,
TypeParamNameNotUnique(String),
StaticMethodNotInTrait(String, String, Vec<String>),
MethodNotInTrait(String, String, Vec<String>),
StaticMethodMissingFromTrait(String, String, Vec<String>),
MethodMissingFromTrait(String, String, Vec<String>),
WrongNumberTypeParams(usize, usize),
ClassExpected(String),
ClassExpectedAsTypeParam,
AssignmentToConst,
BoundExpected,
NoTypeParamsExpected,
MultipleClassBounds,
DuplicateTraitBound,
ClassBoundNotSatisfied(String, String),
TraitBoundNotSatisfied(String, String),
AbstractMethodNotInAbstractClass,
AbstractMethodWithImplementation,
NewAbstractClass,
MissingAbstractOverride(String, String),
ModifierNotAllowedForStaticMethod(String),
GlobalInitializerNotSupported,
MakeIteratorReturnType(String),
UnknownStructField(String, String),
StructFieldNotInitialized(String, String),
InvalidLeftSideOfSeparator,
InvalidUseOfTypeParams,
NameOfStaticMethodExpected,
IfBranchTypesIncompatible(String, String),
NameExpected,
IndexExpected,
IllegalTupleIndex(u64, String),
}
impl SemError {
pub fn message(&self) -> String {
match *self {
SemError::Unimplemented => format!("feature not implemented yet."),
SemError::UnknownClass(ref name) => format!("class `{}` does not exist.", name),
SemError::UnknownType(ref name) => format!("type `{}` does not exist.", name),
SemError::UnknownIdentifier(ref name) => format!("unknown identifier `{}`.", name),
SemError::UnknownStruct(ref name) => format!("unknown struct `{}`.", name),
SemError::UnknownFunction(ref name) => format!("unknown function `{}`", name),
SemError::UnknownMethod(ref cls, ref name, ref args) => {
let args = args.join(", ");
format!(
"no method with definition `{}({})` in class `{}`.",
name, args, cls
)
}
SemError::UnknownEnumValue(ref name) => {
format!("no value with name `{}` in enumeration.", name)
}
SemError::MultipleCandidatesForMethod(ref cls, ref name, ref args) => {
let args = args.join(", ");
format!(
"multiple candidates for definition `{}({})` in class `{}`.",
name, args, cls
)
}
SemError::UnknownMethodForTypeParam(ref tp, ref name, ref args) => {
let args = args.join(", ");
format!(
"no method with definition `{}({})` found for type param `{}`.",
name, args, tp
)
}
SemError::MultipleCandidatesForTypeParam(ref tp, ref name, ref args) => {
let args = args.join(", ");
format!(
"multiple candidates with definition `{}({})` found for type param `{}`.",
name, args, tp
)
}
SemError::MultipleCandidatesForStaticMethodWithTypeParam => {
"multiple candidates for static method call found.".into()
}
SemError::UnknownStaticMethodWithTypeParam => {
"no static method with this name found for type param.".into()
}
SemError::UnknownStaticMethod(ref cls, ref name, ref args) => {
let args = args.join(", ");
format!("no static method `{}::{}({})`.", cls, name, args)
}
SemError::UnknownCtor(ref name, ref args) => {
let args = args.join(", ");
format!("no ctor with definition `{}({})`.", name, args)
}
SemError::MethodExists(ref cls, ref name, pos) => format!(
"method with name `{}` already exists in class `{}` at line {}.",
name, cls, pos
),
SemError::IncompatibleWithNil(ref ty) => {
format!("cannot assign `nil` to type `{}`.", ty)
}
SemError::UnknownField(ref field, ref ty) => {
format!("unknown field `{}` for type `{}`", field, ty)
}
SemError::IdentifierExists(ref name) => {
format!("can not redefine identifier `{}`.", name)
}
SemError::ShadowFunction(ref name) => format!("can not shadow function `{}`.", name),
SemError::ShadowParam(ref name) => format!("can not shadow param `{}`.", name),
SemError::ShadowClass(ref name) => format!("can not shadow class `{}`.", name),
SemError::ShadowStruct(ref name) => format!("can not shadow struct `{}`.", name),
SemError::ShadowTrait(ref name) => format!("can not shadow trait `{}`.", name),
SemError::ShadowField(ref name) => {
format!("field with name `{}` already exists.", name)
}
SemError::ShadowGlobal(ref name) => {
format!("can not shadow global variable `{}`.", name)
}
SemError::ShadowConst(ref name) => format!("can not shadow const `{}`.", name),
SemError::ShadowEnum(ref name) => format!("can not shadow enum `{}`.", name),
SemError::ShadowEnumValue(ref name) => format!("can not shadow enum value `{}`.", name),
SemError::NoEnumValue => "enum needs at least one value.".into(),
SemError::VarNeedsTypeInfo(ref name) => format!(
"variable `{}` needs either type declaration or expression.",
name
),
SemError::ParamTypesIncompatible(ref name, ref def, ref expr) => {
let def = def.join(", ");
let expr = expr.join(", ");
format!(
"function `{}({})` cannot be called as `{}({})`",
name, def, name, expr
)
}
SemError::WhileCondType(ref ty) => {
format!("`while` expects condition of type `bool` but got `{}`.", ty)
}
SemError::IfCondType(ref ty) => {
format!("`if` expects condition of type `bool` but got `{}`.", ty)
}
SemError::ReturnType(ref def, ref expr) => format!(
"`return` expects value of type `{}` but got `{}`.",
def, expr
),
SemError::LvalueExpected => format!("lvalue expected for assignment"),
SemError::AssignType(ref name, ref def, ref expr) => format!(
"cannot assign `{}` to variable `{}` of type `{}`.",
expr, name, def
),
SemError::AssignField(ref name, ref cls, ref def, ref expr) => format!(
"cannot assign `{}` to field `{}`.`{}` of type `{}`.",
expr, cls, name, def
),
SemError::UnOpType(ref op, ref expr) => format!(
"unary operator `{}` can not handle value of type `{} {}`.",
op, op, expr
),
SemError::BinOpType(ref op, ref lhs, ref rhs) => format!(
"binary operator `{}` can not handle expression of type `{} {} {}`",
op, lhs, op, rhs
),
SemError::ConstValueExpected => "constant value expected".into(),
SemError::OutsideLoop => "statement only allowed inside loops".into(),
SemError::NoReturnValue => "function does not return a value in all code paths".into(),
SemError::MainNotFound => "no `main` function found in the program".into(),
SemError::WrongMainDefinition => "`main` function has wrong definition".into(),
SemError::ThisUnavailable => "`self` can only be used in methods not functions".into(),
SemError::SelfTypeUnavailable => "`Self` can only be used in traits.".into(),
SemError::SuperUnavailable => {
"`super` only available in methods of classes with parent class".into()
}
SemError::SuperNeedsMethodCall => "`super` only allowed in method calls".into(),
SemError::ReferenceTypeExpected(ref name) => {
format!("`{}` is not a reference type.", name)
}
SemError::ThrowNil => "throwing `nil` is not allowed.".into(),
SemError::CatchOrFinallyExpected => "`try` without `catch` or `finally`.".into(),
SemError::LetMissingInitialization => "`let` binding is missing initialization.".into(),
SemError::LetReassigned => "`let` binding cannot be reassigned.".into(),
SemError::FctReassigned => "function cannot be reassigned.".into(),
SemError::ClassReassigned => "class cannot be reassigned.".into(),
SemError::TypeParamReassigned => "type param cannot be reassigned.".into(),
SemError::FctUsedAsIdentifier => "function cannot be used as identifier.".into(),
SemError::ClsUsedAsIdentifier => "class cannot be used as identifier.".into(),
SemError::TypeParamUsedAsIdentifier => {
"type param cannot be used as identifier.".into()
}
SemError::EnumUsedAsIdentifier => "enum cannot be used as identifier.".into(),
SemError::InvalidLhsAssignment => "invalid left-hand-side of assignment.".into(),
SemError::TypeParamUsedAsCallee => "type param cannot be used as callee.".into(),
SemError::UnderivableType(ref name) => {
format!("type `{}` cannot be used as super class.", name)
}
SemError::CycleInHierarchy => "cycle in type hierarchy detected.".into(),
SemError::SuperfluousOverride(_) => {
"method `{}` uses modifier `override` without overriding a function.".into()
}
SemError::MissingOverride(_) => "method `{}` is missing modifier `override`.".into(),
SemError::SuperfluousOpen(_) => {
"method `{}` uses modifier `open` but class allows no subclasses.".into()
}
SemError::ThrowsDifference(_) => {
"use of `throws` in method `{}`needs to match super class".into()
}
SemError::MethodNotOverridable(ref name) => {
format!("method `{}` in super class not overridable.", name)
}
SemError::TypesIncompatible(ref na, ref nb) => {
format!("types `{}` and `{}` incompatible.", na, nb)
}
SemError::ReturnTypeMismatch(ref fct, ref sup) => {
format!("return types `{}` and `{}` do not match.", fct, sup)
}
SemError::OverrideMismatch => "definition does not match overriden function.".into(),
SemError::UnresolvedInternal => "unresolved internal.".into(),
SemError::MisplacedElse => "misplace else.".into(),
SemError::ExpectedToken(ref exp, ref got) => {
format!("expected {} but got {}.", exp, got)
}
SemError::NumberOverflow(ref ty) => format!("number does not fit into type {}.", ty),
SemError::ExpectedClass(ref cls) => format!("expected class name but got {}.", cls),
SemError::ExpectedFactor(ref got) => format!("factor expected but got {}.", got),
SemError::ExpectedTrait(ref trt) => format!("expected trait name but got {}.", trt),
SemError::ExpectedType(ref got) => format!("type expected but got {}.", got),
SemError::ExpectedIdentifier(ref tok) => {
format!("identifier expected but got {}.", tok)
}
SemError::ExpectedSomeIdentifier => "identifier expected".into(),
SemError::ExpectedTopLevelElement(ref token) => {
format!("expected function or class but got {}.", token)
}
SemError::ExpectedClassElement(ref token) => {
format!("field or method expected but got {}.", token)
}
SemError::ExpectedStringable(ref ty) => {
format!("type {} does not implement Stringable.", ty)
}
SemError::MisplacedAnnotation(ref modifier) => {
format!("misplaced annotation `{}`.", modifier)
}
SemError::RedundantAnnotation(ref token) => format!("redundant annotation {}.", token),
SemError::UnknownAnnotation(ref token) => format!("unknown annotation {}.", token),
SemError::UnknownChar(ch) => {
format!("unknown character {} (codepoint {}).", ch, ch as usize)
}
SemError::UnclosedComment => "unclosed comment.".into(),
SemError::InvalidEscapeSequence(ch) => format!("unknown escape sequence `\\{}`.", ch),
SemError::UnclosedString => "unclosed string.".into(),
SemError::UnclosedChar => "unclosed char.".into(),
SemError::IoError => "error reading from file.".into(),
SemError::MissingFctBody => "missing function body.".into(),
SemError::FctCallExpected => format!("function call expected"),
SemError::ThisOrSuperExpected(ref val) => {
format!("`self` or `super` expected but got {}.", val)
}
SemError::NoSuperDelegationWithPrimaryCtor(ref name) => format!(
"no `super` delegation allowed for ctor in class {}, because class has \
primary ctor.",
name
),
SemError::NoSuperClass(ref name) => {
format!("class `{}` does not have super class.", name)
}
SemError::RecursiveStructure => "recursive structure is not allowed.".into(),
SemError::TraitMethodWithBody => {
"trait method is not allowed to have definition".into()
}
SemError::TryNeedsCall => "`try` expects function or method call.".into(),
SemError::TryCallNonThrowing => {
"given function or method call for `try` does not throw.".into()
}
SemError::ThrowingCallWithoutTry => {
"function or method call that is able to throw, needs `try`.".into()
}
SemError::TypeParamsExpected => "type params expected.".into(),
SemError::TypeParamNameNotUnique(ref name) => {
format!("type param `{}` name already used.", name)
}
SemError::StaticMethodNotInTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` does not define static method `{}({})`.",
trait_name, mtd_name, args
)
}
SemError::MethodNotInTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` does not define method `{}({})`.",
trait_name, mtd_name, args
)
}
SemError::StaticMethodMissingFromTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` defines static method `{}({})` but is missing in `impl`.",
trait_name, mtd_name, args
)
}
SemError::MethodMissingFromTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` defines method `{}({})` but is missing in `impl`.",
trait_name, mtd_name, args
)
}
SemError::WrongNumberTypeParams(exp, actual) => {
format!("expected {} type parameters but got {}.", exp, actual)
}
SemError::ClassExpected(ref name) => format!("`{}` is not a class.", name),
SemError::ClassExpectedAsTypeParam => "class as type parameter expected.".into(),
SemError::AssignmentToConst => "cannot assign to const variable.".into(),
SemError::BoundExpected => "class or trait bound expected".into(),
SemError::NoTypeParamsExpected => "no type params allowed".into(),
SemError::MultipleClassBounds => "multiple class bounds not allowed".into(),
SemError::DuplicateTraitBound => "duplicate trait bound".into(),
SemError::ClassBoundNotSatisfied(ref name, ref xclass) => {
format!("type `{}` not a subclass of `{}`.", name, xclass)
}
SemError::TraitBoundNotSatisfied(ref name, ref xtrait) => {
format!("type `{}` does not implement trait `{}`.", name, xtrait)
}
SemError::AbstractMethodWithImplementation => {
"abstract methods cannot be implemented.".into()
}
SemError::AbstractMethodNotInAbstractClass => {
"abstract methods only allowed in abstract classes.".into()
}
SemError::NewAbstractClass => "cannot create object of abstract class.".into(),
SemError::MissingAbstractOverride(ref cls, ref name) => format!(
"missing override of abstract method `{}` in class `{}`.",
cls, name
),
SemError::ModifierNotAllowedForStaticMethod(ref modifier) => {
format!("modifier `{}` not allowed for static method.", modifier)
}
SemError::GlobalInitializerNotSupported => {
"global variables do no support initial assignment for now.".into()
}
SemError::MakeIteratorReturnType(ref ty) => format!(
"makeIterator() returns `{}` which does not implement Iterator.",
ty
),
SemError::UnknownStructField(ref struc, ref field) => {
format!("struct `{}` does not have field named `{}`.", struc, field)
}
SemError::StructFieldNotInitialized(ref struc, ref field) => {
format!("field `{}` in struct `{}` not initialized.", field, struc)
}
SemError::InvalidLeftSideOfSeparator => {
"left hand side of separator is not a class.".into()
}
SemError::InvalidUseOfTypeParams => {
"type params need to be used on class or function.".into()
}
SemError::NameOfStaticMethodExpected => "name of static method expected.".into(),
SemError::IfBranchTypesIncompatible(ref then_block, ref else_block) => format!(
"if-branches have incompatible types `{}` and `{}`.",
then_block, else_block
),
SemError::NameExpected => "name expected for dot-operator.".into(),
SemError::IndexExpected => "index expected as right-hand-side for tuple.".into(),
SemError::IllegalTupleIndex(idx, ref ty) => {
format!("illegal index `{}` for type `{}`", idx, ty)
}
}
}
}
#[derive(Clone, Debug)]
pub struct SemErrorAndPos {
pub file: FileId,
pub pos: Position,
pub msg: SemError,
}
impl SemErrorAndPos {
pub fn new(file: FileId, pos: Position, msg: SemError) -> SemErrorAndPos {
SemErrorAndPos { file, pos, msg }
}
pub fn message(&self, vm: &VM) -> String {
let file = vm.file(self.file);
format!(
"error in {} at {}: {}",
file.name,
self.pos,
self.msg.message()
)
}
}
| 45.982036 | 100 | 0.567956 |
18b9c42f9ba963cd0b2dabb1eb70e848440a5b37 | 4,165 | #![cfg(test)]
use enr::Enr;
use eth2_libp2p::Multiaddr;
use eth2_libp2p::NetworkConfig;
use eth2_libp2p::Service as LibP2PService;
use slog::{debug, error, o, Drain};
use std::time::Duration;
pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
if enabled {
slog::Logger::root(drain.filter_level(level).fuse(), o!())
} else {
slog::Logger::root(drain.filter(|_| false).fuse(), o!())
}
}
pub fn build_config(
port: u16,
mut boot_nodes: Vec<Enr>,
secret_key: Option<String>,
) -> NetworkConfig {
let mut config = NetworkConfig::default();
config.libp2p_port = port; // tcp port
config.discovery_port = port; // udp port
config.boot_nodes.append(&mut boot_nodes);
config.secret_key_hex = secret_key;
config.network_dir.push(port.to_string());
// Reduce gossipsub heartbeat parameters
config.gs_config.heartbeat_initial_delay = Duration::from_millis(500);
config.gs_config.heartbeat_interval = Duration::from_millis(500);
config
}
pub fn build_libp2p_instance(
port: u16,
boot_nodes: Vec<Enr>,
secret_key: Option<String>,
log: slog::Logger,
) -> LibP2PService {
let config = build_config(port, boot_nodes, secret_key);
// launch libp2p service
LibP2PService::new(&config, log.clone()).unwrap().1
}
#[allow(dead_code)]
pub fn get_enr(node: &LibP2PService) -> Enr {
node.swarm.discovery().local_enr().clone()
}
// Returns `n` libp2p peers in fully connected topology.
#[allow(dead_code)]
pub fn build_full_mesh(log: slog::Logger, n: usize, start_port: Option<u16>) -> Vec<LibP2PService> {
let base_port = start_port.unwrap_or(9000);
let mut nodes: Vec<LibP2PService> = (base_port..base_port + n as u16)
.map(|p| build_libp2p_instance(p, vec![], None, log.clone()))
.collect();
let multiaddrs: Vec<Multiaddr> = nodes
.iter()
.map(|x| get_enr(&x).multiaddr()[1].clone())
.collect();
for (i, node) in nodes.iter_mut().enumerate().take(n) {
for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) {
if i != j {
match libp2p::Swarm::dial_addr(&mut node.swarm, multiaddr.clone()) {
Ok(()) => debug!(log, "Connected"),
Err(_) => error!(log, "Failed to connect"),
};
}
}
}
nodes
}
// Constructs a pair of nodes with seperate loggers. The sender dials the receiver.
// This returns a (sender, receiver) pair.
#[allow(dead_code)]
pub fn build_node_pair(log: &slog::Logger, start_port: u16) -> (LibP2PService, LibP2PService) {
let sender_log = log.new(o!("who" => "sender"));
let receiver_log = log.new(o!("who" => "receiver"));
let mut sender = build_libp2p_instance(start_port, vec![], None, sender_log);
let receiver = build_libp2p_instance(start_port + 1, vec![], None, receiver_log);
let receiver_multiaddr = receiver.swarm.discovery().local_enr().clone().multiaddr()[1].clone();
match libp2p::Swarm::dial_addr(&mut sender.swarm, receiver_multiaddr) {
Ok(()) => debug!(log, "Sender dialed receiver"),
Err(_) => error!(log, "Dialing failed"),
};
(sender, receiver)
}
// Returns `n` peers in a linear topology
#[allow(dead_code)]
pub fn build_linear(log: slog::Logger, n: usize, start_port: Option<u16>) -> Vec<LibP2PService> {
let base_port = start_port.unwrap_or(9000);
let mut nodes: Vec<LibP2PService> = (base_port..base_port + n as u16)
.map(|p| build_libp2p_instance(p, vec![], None, log.clone()))
.collect();
let multiaddrs: Vec<Multiaddr> = nodes
.iter()
.map(|x| get_enr(&x).multiaddr()[1].clone())
.collect();
for i in 0..n - 1 {
match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) {
Ok(()) => debug!(log, "Connected"),
Err(_) => error!(log, "Failed to connect"),
};
}
nodes
}
| 35.905172 | 100 | 0.630732 |
64ebc773c97a19dabe28ac996f74601598418f59 | 3,293 | use std::collections::HashMap;
use std::io::{BufRead, BufReader};
use std::net::Ipv4Addr;
use std::str::FromStr;
use std::time::Duration;
use std::time::Instant;
use tokio::net::UdpSocket;
use hyper::{body::HttpBody as _, Body, Client, Request, Uri};
use spectral::prelude::*;
use testcontainers::core::Port;
use testcontainers::*;
use trust_dns_client::client::{Client as DnsClient, ClientHandle, AsyncClient};
use trust_dns_client::op::DnsResponse;
use trust_dns_client::rr::{DNSClass, Name, RData, Record, RecordType};
use trust_dns_client::udp::UdpClientStream;
#[derive(Default)]
struct TestServer;
// TODO replace with GenericImage
impl Image for TestServer {
type Args = Vec<String>;
type EnvVars = HashMap<String, String>;
type Volumes = HashMap<String, String>;
type EntryPoint = std::convert::Infallible;
fn descriptor(&self) -> String {
String::from("dyndns-debian:latest")
}
fn wait_until_ready<D: Docker>(&self, container: &Container<D, Self>) {
container
.logs()
.stderr
.wait_for_message("DNS server now listening")
.unwrap();
}
fn args(&self) -> <Self as Image>::Args {
vec![]
}
fn volumes(&self) -> Self::Volumes {
let mut volumes = HashMap::new();
let project_root = env!("CARGO_MANIFEST_DIR");
volumes.insert(format!("{}/config", project_root).to_string(), "/etc/dyndns/config".to_string());
volumes
}
fn env_vars(&self) -> Self::EnvVars {
let mut env: HashMap<String, String> = HashMap::new();
env.insert("RUN_MODE".into(), "test".into());
env
}
fn ports(&self) -> Option<Vec<Port>> {
let mut ports: Vec<Port> = Vec::new();
ports.push(Port {
local: 8080,
internal: 80,
});
ports.push((5353, 53).into());
Some(ports)
}
fn with_args(self, _arguments: <Self as Image>::Args) -> Self {
self
}
}
//#[test]
#[tokio::test]
async fn should_get_the_correct_ipv4() {
let _ = pretty_env_logger::try_init();
let docker = clients::Cli::default();
let container = docker.run(TestServer);
let client = Client::new();
let req = Request::builder()
.method("PUT")
.uri("http://localhost:8080/hostname")
.header("Authorization", "super_secure")
.body(Body::from(
"{\"hostname\":\"test.dyn.example.com\", \"ip\":\"12.13.14.15\"}",
))
.expect("request builder");
let res = client.request(req).await.expect("http request");
println!("status: {}", res.status());
let address = "127.0.0.1:5353".parse().unwrap();
let stream = UdpClientStream::<UdpSocket>::new(address);
//let (mut client, bg) = runtime.block_on(client).expect("connection failed");
let (mut client, bg) = AsyncClient::connect(stream).await.expect("connection failed");
let name = Name::from_str("test.dyn.example.com.").unwrap();
let response = client.query(name, DNSClass::IN, RecordType::A).await.unwrap();
let answers: &[Record] = response.answers();
if let &RData::A(ref ip) = answers[0].rdata() {
assert_eq!(*ip, Ipv4Addr::new(12, 13, 14, 15))
} else {
assert!(false, "unexpected result")
}
}
| 30.775701 | 105 | 0.609778 |
ddc14e68675a98cd3af319e08e56a05a3700bdb2 | 603 | pub trait MyIterator {
}
pub struct MyStruct<T>(T);
macro_rules! array_impls {
($($N:expr)+) => {
$(
impl<'a, T> MyIterator for &'a MyStruct<[T; $N]> {
}
)+
}
}
// @has issue_53812/trait.MyIterator.html '//*[@id="implementors-list"]/h3[1]' 'MyStruct<[T; 0]>'
// @has - '//*[@id="implementors-list"]/h3[2]' 'MyStruct<[T; 1]>'
// @has - '//*[@id="implementors-list"]/h3[3]' 'MyStruct<[T; 2]>'
// @has - '//*[@id="implementors-list"]/h3[4]' 'MyStruct<[T; 3]>'
// @has - '//*[@id="implementors-list"]/h3[5]' 'MyStruct<[T; 10]>'
array_impls! { 10 3 2 1 0 }
| 28.714286 | 97 | 0.517413 |
ccd0b15fb493133c3c6b982b0b015103b79ade1e | 4,134 | //! This program shows how to render a triangle and change its position and color on the fly by
//! updating “shader uniforms”. Those are values stored on the GPU that remain constant for the
//! whole duration of a draw call (you typically change it between each draw call to customize each
//! draw).
//!
//! This example demonstrate how to add time to your shader to start building moving and animated
//! effects.
//!
//! Press the <up action>, <down action>, <left action> and <right action> to move the triangle on
//! the screen.
//!
//! <https://docs.rs/luminance>
use crate::{
shared::{Semantics, Vertex, VertexColor, VertexPosition},
Example, InputAction, LoopFeedback, PlatformServices,
};
use luminance::UniformInterface;
use luminance_front::{
context::GraphicsContext,
framebuffer::Framebuffer,
pipeline::PipelineState,
render_state::RenderState,
shader::{types::Vec2, Program, Uniform},
tess::{Mode, Tess},
texture::Dim2,
Backend,
};
const VS: &'static str = include_str!("displacement-vs.glsl");
const FS: &'static str = include_str!("displacement-fs.glsl");
// Only one triangle this time.
const TRI_VERTICES: [Vertex; 3] = [
Vertex {
pos: VertexPosition::new([0.5, -0.5]),
rgb: VertexColor::new([1., 0., 0.]),
},
Vertex {
pos: VertexPosition::new([0.0, 0.5]),
rgb: VertexColor::new([0., 1., 0.]),
},
Vertex {
pos: VertexPosition::new([-0.5, -0.5]),
rgb: VertexColor::new([0., 0., 1.]),
},
];
// Create a uniform interface. This is a type that will be used to customize the shader. In our
// case, we just want to pass the time and the position of the triangle, for instance.
//
// This macro only supports structs for now; you cannot use enums as uniform interfaces.
#[derive(Debug, UniformInterface)]
struct ShaderInterface {
#[uniform(name = "t")]
time: Uniform<f32>,
triangle_pos: Uniform<Vec2<f32>>,
}
pub struct LocalExample {
program: Program<Semantics, (), ShaderInterface>,
triangle: Tess<Vertex>,
triangle_pos: Vec2<f32>,
}
impl Example for LocalExample {
fn bootstrap(
_platform: &mut impl PlatformServices,
context: &mut impl GraphicsContext<Backend = Backend>,
) -> Self {
let program = context
.new_shader_program()
.from_strings(VS, None, None, FS)
.expect("program creation")
.ignore_warnings();
let triangle = context
.new_tess()
.set_vertices(&TRI_VERTICES[..])
.set_mode(Mode::Triangle)
.build()
.unwrap();
let triangle_pos = Vec2::new(0., 0.);
Self {
program,
triangle,
triangle_pos,
}
}
fn render_frame(
mut self,
t: f32,
back_buffer: Framebuffer<Dim2, (), ()>,
actions: impl Iterator<Item = InputAction>,
context: &mut impl GraphicsContext<Backend = Backend>,
) -> LoopFeedback<Self> {
for action in actions {
match action {
InputAction::Quit => return LoopFeedback::Exit,
InputAction::Left => self.triangle_pos[0] -= 0.1,
InputAction::Right => self.triangle_pos[0] += 0.1,
InputAction::Forward => self.triangle_pos[1] += 0.1,
InputAction::Backward => self.triangle_pos[1] -= 0.1,
_ => (),
}
}
let program = &mut self.program;
let triangle = &self.triangle;
let triangle_pos = self.triangle_pos;
let render = context
.new_pipeline_gate()
.pipeline(
&back_buffer,
&PipelineState::default(),
|_, mut shd_gate| {
// notice the iface free variable, which type is &ShaderInterface
shd_gate.shade(program, |mut iface, uni, mut rdr_gate| {
// update the time and triangle position on the GPU shader program
iface.set(&uni.time, t);
iface.set(&uni.triangle_pos, triangle_pos);
rdr_gate.render(&RenderState::default(), |mut tess_gate| {
// render the dynamically selected slice
tess_gate.render(triangle)
})
})
},
)
.assume();
if render.is_ok() {
LoopFeedback::Continue(self)
} else {
LoopFeedback::Exit
}
}
}
| 29.319149 | 99 | 0.634253 |
acabb90da7b766858192e2aac6f53987c10f99d0 | 25,087 | use super::{Emitter, Result};
use crate::{list::ListFormat, text_writer::WriteJs};
use swc_common::Spanned;
use swc_ecma_ast::*;
use swc_ecma_codegen_macros::emitter;
impl<'a> Emitter<'a> {
#[emitter]
fn emit_pat_or_ts_param_prop(&mut self, n: &ParamOrTsParamProp) -> Result {
match *n {
ParamOrTsParamProp::Param(ref n) => emit!(n),
ParamOrTsParamProp::TsParamProp(ref n) => emit!(n),
}
}
#[emitter]
fn emit_ts_array_type(&mut self, n: &TsArrayType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.elem_type);
punct!("[");
punct!("]");
}
#[emitter]
fn emit_ts_as_expr(&mut self, n: &TsAsExpr) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.expr);
space!();
keyword!("as");
space!();
emit!(n.type_ann);
}
#[emitter]
fn emit_ts_call_signature_decl(&mut self, n: &TsCallSignatureDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_params);
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
if let Some(type_ann) = &n.type_ann {
space!();
punct!("=>");
space!();
emit!(type_ann);
}
}
#[emitter]
fn emit_ts_cond_type(&mut self, n: &TsConditionalType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.check_type);
space!();
keyword!("extends");
space!();
emit!(n.extends_type);
space!();
punct!("?");
space!();
emit!(n.true_type);
space!();
punct!(":");
space!();
emit!(n.false_type);
}
#[emitter]
fn emit_ts_constructor_signature_decl(&mut self, n: &TsConstructSignatureDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_constructor_signature_decl")
}
#[emitter]
fn emit_ts_constructor_type(&mut self, n: &TsConstructorType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("new");
space!();
if let Some(type_params) = &n.type_params {
emit!(type_params);
}
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
formatting_space!();
punct!("=>");
formatting_space!();
emit!(n.type_ann)
}
#[emitter]
fn emit_ts_entity_name(&mut self, n: &TsEntityName) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsEntityName::TsQualifiedName(n) => {
emit!(n);
punct!(".");
}
TsEntityName::Ident(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_enum_decl(&mut self, n: &TsEnumDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
if n.is_const {
keyword!("const");
space!();
}
keyword!("enum");
space!();
emit!(n.id);
formatting_space!();
punct!("{");
self.emit_list(n.span, Some(&n.members), ListFormat::EnumMembers)?;
punct!("}");
}
#[emitter]
fn emit_ts_enum_member(&mut self, n: &TsEnumMember) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.id);
if let Some(init) = &n.init {
formatting_space!();
punct!("=");
formatting_space!();
emit!(init);
}
}
#[emitter]
fn emit_ts_enum_member_id(&mut self, n: &TsEnumMemberId) -> Result {
match n {
TsEnumMemberId::Ident(n) => emit!(n),
TsEnumMemberId::Str(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_export_assignment(&mut self, n: &TsExportAssignment) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_export_assignment")
}
#[emitter]
fn emit_ts_expr_with_type_args(&mut self, n: &TsExprWithTypeArgs) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.expr);
emit!(n.type_args);
}
#[emitter]
fn emit_ts_external_module_ref(&mut self, n: &TsExternalModuleRef) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_external_module_ref")
}
#[emitter]
fn emit_ts_fn_or_constructor_type(&mut self, n: &TsFnOrConstructorType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsFnOrConstructorType::TsFnType(n) => emit!(n),
TsFnOrConstructorType::TsConstructorType(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_fn_param(&mut self, n: &TsFnParam) -> Result {
match n {
TsFnParam::Ident(n) => emit!(n),
TsFnParam::Array(n) => emit!(n),
TsFnParam::Rest(n) => emit!(n),
TsFnParam::Object(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_fn_type(&mut self, n: &TsFnType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_params);
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
formatting_space!();
punct!("=>");
formatting_space!();
emit!(n.type_ann);
}
#[emitter]
fn emit_ts_import_equals_decl(&mut self, n: &TsImportEqualsDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.is_export {
keyword!("export");
space!();
}
keyword!("import");
formatting_space!();
punct!("=");
formatting_space!();
emit!(n.module_ref);
}
#[emitter]
fn emit_ts_index_signature(&mut self, n: &TsIndexSignature) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("[");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!("]");
punct!(":");
formatting_space!();
emit!(n.type_ann);
semi!();
}
#[emitter]
fn emit_ts_index_accessed_type(&mut self, n: &TsIndexedAccessType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.obj_type);
punct!("[");
emit!(n.index_type);
punct!("]");
}
#[emitter]
fn emit_ts_infer_type(&mut self, n: &TsInferType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("infer");
space!();
emit!(n.type_param);
}
#[emitter]
fn emit_ts_interface_body(&mut self, n: &TsInterfaceBody) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.emit_list(n.span, Some(&n.body), ListFormat::InterfaceMembers)?;
punct!("}");
}
#[emitter]
fn emit_ts_interface_decl(&mut self, n: &TsInterfaceDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
keyword!("interface");
space!();
emit!(n.id);
if !n.extends.is_empty() {
space!();
keyword!("extends");
space!();
self.emit_list(n.span, Some(&n.extends), ListFormat::HeritageClauseTypes)?;
}
formatting_space!();
emit!(n.body);
}
#[emitter]
fn emit_ts_intersection_type(&mut self, n: &TsIntersectionType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
self.emit_list(
n.span,
Some(&n.types),
ListFormat::IntersectionTypeConstituents,
)?;
}
#[emitter]
fn emit_ts_keyword_type(&mut self, n: &TsKeywordType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n.kind {
TsKeywordTypeKind::TsAnyKeyword => keyword!(n.span, "any"),
TsKeywordTypeKind::TsUnknownKeyword => keyword!(n.span, "unknown"),
TsKeywordTypeKind::TsNumberKeyword => keyword!(n.span, "number"),
TsKeywordTypeKind::TsObjectKeyword => keyword!(n.span, "object"),
TsKeywordTypeKind::TsBooleanKeyword => keyword!(n.span, "boolean"),
TsKeywordTypeKind::TsBigIntKeyword => keyword!(n.span, "bigint"),
TsKeywordTypeKind::TsStringKeyword => keyword!(n.span, "string"),
TsKeywordTypeKind::TsSymbolKeyword => keyword!(n.span, "symbol"),
TsKeywordTypeKind::TsVoidKeyword => keyword!(n.span, "void"),
TsKeywordTypeKind::TsUndefinedKeyword => keyword!(n.span, "undefined"),
TsKeywordTypeKind::TsNullKeyword => keyword!(n.span, "null"),
TsKeywordTypeKind::TsNeverKeyword => keyword!(n.span, "never"),
}
}
#[emitter]
fn emit_ts_lit(&mut self, n: &TsLit) -> Result {
match n {
TsLit::Number(n) => emit!(n),
TsLit::Str(n) => emit!(n),
TsLit::Bool(n) => emit!(n),
TsLit::Tpl(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_lit_type(&mut self, n: &TsLitType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.lit);
}
#[emitter]
fn emit_ts_mapped_type(&mut self, n: &TsMappedType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.wr.write_line()?;
self.wr.increase_indent()?;
match n.readonly {
None => {}
Some(tpm) => match tpm {
TruePlusMinus::True => {
keyword!("readonly");
space!();
}
TruePlusMinus::Plus => {
punct!("+");
keyword!("readonly");
space!();
}
TruePlusMinus::Minus => {
punct!("-");
keyword!("readonly");
space!();
}
},
}
punct!("[");
emit!(n.type_param.name);
if let Some(constraints) = &n.type_param.constraint {
space!();
keyword!("in");
space!();
}
if let Some(default) = &n.type_param.default {
formatting_space!();
punct!("=");
formatting_space!();
emit!(default);
}
emit!(n.type_param.constraint);
punct!("]");
match n.optional {
None => {}
Some(tpm) => match tpm {
TruePlusMinus::True => {
punct!("?");
}
TruePlusMinus::Plus => {
punct!("+");
punct!("/");
}
TruePlusMinus::Minus => {
punct!("-");
punct!("?");
}
},
}
punct!(":");
space!();
emit!(n.type_ann);
semi!();
self.wr.write_line()?;
self.wr.decrease_indent()?;
punct!("}");
}
#[emitter]
fn emit_ts_method_signature(&mut self, n: &TsMethodSignature) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.readonly {
keyword!("readonly");
}
if n.computed {
punct!("[");
emit!(n.key);
punct!("]");
} else {
emit!(n.key)
}
if n.optional {
punct!("?");
}
if let Some(type_params) = &n.type_params {
emit!(type_params);
}
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
if let Some(ref type_ann) = n.type_ann {
punct!(":");
formatting_space!();
emit!(type_ann);
}
}
#[emitter]
fn emit_ts_module_block(&mut self, n: &TsModuleBlock) -> Result {
self.emit_list(n.span, Some(&n.body), ListFormat::SourceFileStatements)?;
self.emit_leading_comments_of_pos(n.span().lo())?;
}
#[emitter]
fn emit_ts_module_decl(&mut self, n: &TsModuleDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
keyword!("module");
space!();
emit!(n.id);
formatting_space!();
if let Some(body) = &n.body {
emit!(body);
}
}
#[emitter]
fn emit_ts_module_name(&mut self, n: &TsModuleName) -> Result {
match n {
TsModuleName::Ident(n) => emit!(n),
TsModuleName::Str(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_module_ref(&mut self, n: &TsModuleRef) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_module_ref")
}
#[emitter]
fn emit_ts_ns_body(&mut self, n: &TsNamespaceBody) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.wr.increase_indent()?;
match n {
TsNamespaceBody::TsModuleBlock(n) => emit!(n),
TsNamespaceBody::TsNamespaceDecl(n) => emit!(n),
}
self.wr.decrease_indent()?;
punct!("}");
}
#[emitter]
fn emit_ts_ns_decl(&mut self, n: &TsNamespaceDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_ns_decl")
}
#[emitter]
fn emit_ts_ns_export_decl(&mut self, n: &TsNamespaceExportDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_ns_export_decl")
}
#[emitter]
fn emit_ts_non_null_expr(&mut self, n: &TsNonNullExpr) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_non_null_expr")
}
#[emitter]
fn emit_ts_optional_type(&mut self, n: &TsOptionalType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_ann);
punct!("?");
}
#[emitter]
fn emit_ts_param_prop(&mut self, n: &TsParamProp) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
self.emit_accesibility(n.accessibility)?;
if n.readonly {
keyword!("readonly");
space!();
}
emit!(n.param);
}
#[emitter]
fn emit_ts_param_prop_param(&mut self, n: &TsParamPropParam) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsParamPropParam::Ident(n) => emit!(n),
TsParamPropParam::Assign(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_paren_type(&mut self, n: &TsParenthesizedType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("(");
emit!(n.type_ann);
punct!(")");
}
#[emitter]
fn emit_ts_property_signature(&mut self, n: &TsPropertySignature) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.readonly {
keyword!("readonly");
space!();
}
if n.computed {
punct!("[");
emit!(n.key);
punct!("]");
} else {
emit!(n.key);
}
if n.optional {
punct!("?");
}
emit!(n.type_params);
// punct!("(");
// self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
// punct!(")");
if let Some(type_ann) = &n.type_ann {
punct!(":");
formatting_space!();
emit!(type_ann);
}
if let Some(init) = &n.init {
formatting_space!();
punct!("=");
formatting_space!();
emit!(init);
}
}
#[emitter]
fn emit_ts_qualified_name(&mut self, n: &TsQualifiedName) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.left);
punct!(".");
emit!(n.right);
}
#[emitter]
fn emit_ts_rest_type(&mut self, n: &TsRestType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("...");
emit!(n.type_ann);
}
#[emitter]
fn emit_ts_signature_decl(&mut self, n: &TsSignatureDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_signature_decl")
}
#[emitter]
fn emit_ts_this_type(&mut self, n: &TsThisType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!(n.span, "this");
}
#[emitter]
fn emit_ts_this_type_or_ident(&mut self, n: &TsThisTypeOrIdent) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsThisTypeOrIdent::TsThisType(n) => emit!(n),
TsThisTypeOrIdent::Ident(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_tuple_type(&mut self, n: &TsTupleType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("[");
self.emit_list(n.span, Some(&n.elem_types), ListFormat::TupleTypeElements)?;
punct!("]");
}
#[emitter]
fn emit_ts_tuple_element(&mut self, n: &TsTupleElement) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if let Some(label) = &n.label {
emit!(label);
punct!(":");
formatting_space!();
}
emit!(n.ty)
}
#[emitter]
fn emit_ts_type(&mut self, n: &TsType) -> Result {
match n {
TsType::TsKeywordType(n) => emit!(n),
TsType::TsThisType(n) => emit!(n),
TsType::TsFnOrConstructorType(n) => emit!(n),
TsType::TsTypeRef(n) => emit!(n),
TsType::TsTypeQuery(n) => emit!(n),
TsType::TsTypeLit(n) => emit!(n),
TsType::TsArrayType(n) => emit!(n),
TsType::TsTupleType(n) => emit!(n),
TsType::TsOptionalType(n) => emit!(n),
TsType::TsRestType(n) => emit!(n),
TsType::TsUnionOrIntersectionType(n) => emit!(n),
TsType::TsConditionalType(n) => emit!(n),
TsType::TsInferType(n) => emit!(n),
TsType::TsParenthesizedType(n) => emit!(n),
TsType::TsTypeOperator(n) => emit!(n),
TsType::TsIndexedAccessType(n) => emit!(n),
TsType::TsMappedType(n) => emit!(n),
TsType::TsLitType(n) => emit!(n),
TsType::TsTypePredicate(n) => emit!(n),
TsType::TsImportType(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_import_type(&mut self, n: &TsImportType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("import");
punct!("(");
emit!(n.arg);
punct!(")");
if let Some(n) = &n.qualifier {
punct!(".");
emit!(n);
}
if let Some(type_args) = &n.type_args {
punct!("<");
emit!(type_args);
punct!(">");
}
}
#[emitter]
fn emit_ts_type_alias_decl(&mut self, n: &TsTypeAliasDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
keyword!("type");
space!();
emit!(n.id);
if let Some(type_params) = &n.type_params {
emit!(type_params);
}
formatting_space!();
punct!("=");
formatting_space!();
emit!(n.type_ann);
semi!();
}
#[emitter]
fn emit_ts_type_ann(&mut self, n: &TsTypeAnn) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_ann)
}
#[emitter]
fn emit_ts_type_assertion(&mut self, n: &TsTypeAssertion) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_type_assertion")
}
#[emitter]
fn emit_ts_const_assertion(&mut self, n: &TsConstAssertion) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.expr);
space!();
keyword!("as");
space!();
keyword!("const");
}
#[emitter]
fn emit_ts_type_cast_expr(&mut self, n: &TsTypeCastExpr) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_type_cast_expr")
}
#[emitter]
fn emit_ts_type_element(&mut self, n: &TsTypeElement) -> Result {
match n {
TsTypeElement::TsCallSignatureDecl(n) => emit!(n),
TsTypeElement::TsConstructSignatureDecl(n) => emit!(n),
TsTypeElement::TsPropertySignature(n) => emit!(n),
TsTypeElement::TsMethodSignature(n) => emit!(n),
TsTypeElement::TsIndexSignature(n) => emit!(n),
}
semi!();
}
#[emitter]
fn emit_ts_type_lit(&mut self, n: &TsTypeLit) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.emit_list(
n.span,
Some(&n.members),
ListFormat::MultiLineTypeLiteralMembers,
)?;
punct!("}");
}
#[emitter]
fn emit_ts_type_operator(&mut self, n: &TsTypeOperator) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n.op {
TsTypeOperatorOp::KeyOf => keyword!("keyof"),
TsTypeOperatorOp::Unique => keyword!("unique"),
TsTypeOperatorOp::ReadOnly => keyword!("readonly"),
}
space!();
emit!(n.type_ann);
}
#[emitter]
fn emit_ts_type_param(&mut self, n: &TsTypeParam) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.name);
if let Some(constraints) = &n.constraint {
space!();
keyword!("extends");
space!();
emit!(constraints);
}
if let Some(default) = &n.default {
formatting_space!();
punct!("=");
formatting_space!();
emit!(default);
}
}
#[emitter]
fn emit_ts_type_param_decl(&mut self, n: &TsTypeParamDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("<");
self.emit_list(n.span, Some(&n.params), ListFormat::TypeParameters)?;
punct!(">");
}
#[emitter]
fn emit_ts_type_param_instantiation(&mut self, n: &TsTypeParamInstantiation) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("<");
self.emit_list(n.span, Some(&n.params), ListFormat::TypeParameters)?;
punct!(">");
}
#[emitter]
fn emit_ts_type_predicate(&mut self, n: &TsTypePredicate) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.asserts {
keyword!("asserts");
space!();
}
emit!(n.param_name);
if let Some(type_ann) = &n.type_ann {
space!();
keyword!("is");
space!();
emit!(type_ann);
}
}
#[emitter]
fn emit_ts_type_query(&mut self, n: &TsTypeQuery) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("typeof");
space!();
emit!(n.expr_name);
}
#[emitter]
fn emit_ts_type_query_expr(&mut self, n: &TsTypeQueryExpr) -> Result {
match n {
TsTypeQueryExpr::TsEntityName(n) => emit!(n),
TsTypeQueryExpr::Import(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_type_ref(&mut self, n: &TsTypeRef) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_name);
if let Some(n) = &n.type_params {
punct!("<");
self.emit_list(n.span, Some(&n.params), ListFormat::TypeArguments)?;
punct!(">");
}
}
#[emitter]
fn emit_ts_union_or_intersection_type(&mut self, n: &TsUnionOrIntersectionType) -> Result {
match n {
TsUnionOrIntersectionType::TsUnionType(n) => emit!(n),
TsUnionOrIntersectionType::TsIntersectionType(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_union_type(&mut self, n: &TsUnionType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
self.emit_list(n.span, Some(&n.types), ListFormat::UnionTypeConstituents)?;
}
}
| 26.351891 | 95 | 0.521226 |
21123e7faf293dcb19bf566c4c30d8aaf317496f | 1,562 |
pub struct IconAssignmentLate {
props: crate::Props,
}
impl yew::Component for IconAssignmentLate {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M19 3h-4.18C14.4 1.84 13.3 1 12 1c-1.3 0-2.4.84-2.82 2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-6 15h-2v-2h2v2zm0-4h-2V8h2v6zm-1-9c-.55 0-1-.45-1-1s.45-1 1-1 1 .45 1 1-.45 1-1 1z"/></svg>
</svg>
}
}
}
| 33.956522 | 359 | 0.574264 |
39d66bf07131bf4470e3658d59cfc5808cb5e429 | 13,757 | /*
*
* * This file is part of OpenTSDB.
* * Copyright (C) 2021 Yahoo.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
use std::{collections::HashMap, rc::Rc};
use crate::query::query::{Query, QueryType};
use crate::segment::myst_segment::MystSegment;
use crate::segment::persistence::Builder;
use crate::query::cache::Cache;
use crate::query::filter::FilterType;
use crate::query::query_filter::QueryFilter;
use crate::segment::segment_reader::SegmentReader;
use crate::query::query_runner::QueryRunner;
use bloomfilter::Bloom;
use croaring::Bitmap;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn write_and_get_segment_readers() -> Vec<SegmentReader<File>> {
let data_path = String::from("./data/");
let mut segment_readers = Vec::new();
let epoch = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let mut segment = MystSegment::new(1, epoch, 200);
let shard_id = segment.shard_id;
let created = segment.epoch;
let cache = Arc::new(Cache::new());
write_data(&mut segment, &epoch, 0, 100);
close_segment(segment);
let file_path = MystSegment::get_segment_filename(&shard_id, &created, data_path);
let reader = File::open(file_path.clone()).unwrap();
let segment_reader = SegmentReader::new(shard_id, created, reader, cache, file_path).unwrap();
segment_readers.push(segment_reader);
segment_readers
}
pub fn write_data(segment: &mut MystSegment, epoch: &u64, metrics_start: u32, metrics_end: u32) {
let mut tags = HashMap::new();
tags.insert(Rc::new(String::from("foo")), Rc::new(String::from("bar")));
tags.insert(Rc::new(String::from("do")), Rc::new(String::from("re")));
tags.insert(Rc::new(String::from("hi")), Rc::new(String::from("hello")));
// let mut event = YmsEvent::default();
// let mut datum = Datum::default();
for i in metrics_start..metrics_end {
let mut metric = String::from("metric");
metric.push_str(&i.to_string());
//datum.metric.insert(metric, hash);
segment.add_timeseries(Rc::new(metric), tags.clone(), i as u64, *epoch);
}
}
pub fn close_segment(mut segment: MystSegment) {
println!("Closing");
let data_path = String::from("./data/");
let shard_id = segment.shard_id;
let epoch = segment.epoch;
let mut writer = File::create(MystSegment::get_segment_filename(
&shard_id,
&epoch,
data_path.clone(),
))
.unwrap();
segment.build(&mut writer, &mut 0).unwrap();
let mut lock_file_name = MystSegment::get_path_prefix(&shard_id, &epoch, data_path).clone();
lock_file_name.push_str("/.lock");
File::create(lock_file_name).unwrap();
println!("done Closing");
}
pub fn write_data_large_segment() -> Vec<SegmentReader<BufReader<File>>> {
let data_path = String::from("./data/");
let mut segment_readers = Vec::new();
let mut epoch = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let mut segment = MystSegment::new(1, epoch, 200);
let shard_id = segment.shard_id;
let created = segment.epoch;
let cache = Arc::new(Cache::new());
write_data(&mut segment, &epoch, 0, 1);
epoch = epoch + 7 * 60 * 60;
write_data(&mut segment, &epoch, 2, 3);
epoch = epoch + 7 * 60 * 60;
write_data(&mut segment, &epoch, 4, 5);
close_segment(segment);
let file_path = MystSegment::get_segment_filename(&shard_id, &created, data_path);
let reader = BufReader::new(File::open(file_path.clone()).unwrap());
let segment_reader = SegmentReader::new(shard_id, created, reader, cache, file_path).unwrap();
segment_readers.push(segment_reader);
segment_readers
}
pub fn build_regex_tag_value_filter() -> QueryFilter {
// let _metric_filter = QueryFilter::Metric {
// metric: String::from("metric0"),
// _type: FilterType::Literal,
// };
let tag_filter = QueryFilter::TagValue {
key: String::from("foo"),
filter: String::from("b.*"),
_type: FilterType::Regex,
};
let filters = vec![tag_filter];
let query_filter = QueryFilter::Chain {
filters,
op: String::from("AND"),
};
query_filter
}
pub fn build_literal_tag_value_filter() -> QueryFilter {
let _metric_filter = QueryFilter::Metric {
metric: String::from("metric0"),
_type: FilterType::Literal,
};
let tag_filter = QueryFilter::TagValue {
key: String::from("foo"),
filter: String::from("bar"),
_type: FilterType::Literal,
};
let filters = vec![tag_filter];
let query_filter = QueryFilter::Chain {
filters,
op: String::from("AND"),
};
query_filter
}
pub fn build_not_tag_value_filter() -> QueryFilter {
let metric_filter = QueryFilter::Metric {
metric: String::from("metric0"),
_type: FilterType::Literal,
};
let tag_filter = QueryFilter::TagValue {
key: String::from("foo"),
filter: String::from("bar"),
_type: FilterType::Literal,
};
let not_filter = QueryFilter::NOT {
filter: Box::from(tag_filter),
};
let filters = vec![metric_filter, not_filter];
let query_filter = QueryFilter::Chain {
filters,
op: String::from("AND"),
};
query_filter
}
pub fn build_explicit_tag_filter() -> QueryFilter {
let metric_filter = QueryFilter::Metric {
metric: String::from("metric0"),
_type: FilterType::Literal,
};
let tag_filter_1 = QueryFilter::TagValue {
key: String::from("foo"),
filter: String::from("bar"),
_type: FilterType::Literal,
};
let tag_filter_2 = QueryFilter::TagValue {
key: String::from("do"),
filter: String::from("re"),
_type: FilterType::Literal,
};
let tag_filter_3 = QueryFilter::TagValue {
key: String::from("hi"),
filter: String::from("hello"),
_type: FilterType::Literal,
};
let filters = vec![metric_filter, tag_filter_1, tag_filter_2, tag_filter_3];
let query_filter = QueryFilter::Chain {
filters,
op: String::from("AND"),
};
let explicit_filter = QueryFilter::ExplicitTags {
filter: Box::new(query_filter),
count: 3,
};
explicit_filter
}
pub fn build_timeseries_query(filter: QueryFilter) -> Query {
let query = Query {
from: 0,
to: 0,
start: 0,
end: 0,
query_type: QueryType::TIMESERIES,
limit: 0,
group: vec![String::from("foo"), String::from("do"), String::from("hi")],
filter,
};
query
}
#[test]
pub fn test_query() {
// let data = r#"{"from":0,"to":1,"start":1617815836,"end":1617837436,"order":"ASCENDING","type":"TIMESERIES","group":["foo", "do"],"namespace":"ssp","query":{"filters":[{"filter":"prod","type":"TagValueLiteralOr","tagKey":"corp:Environment"}],"op":"AND","type":"Chain"}}"#;
let data = r#"{"from":0,"to":1,"start":1619475054,"end":1619496654,"order":"ASCENDING","type":"TIMESERIES","group":[],"namespace":"ssp","query":{"filters":[{"metric":"med.req.ad.Requests","type":"MetricLiteral"}],"op":"AND","type":"Chain"}}"#;
let q = Query::from_json(data);
println!("{:?}", q);
//info!("{:?}", q);
}
#[test]
pub fn search_timeseries() {
let segment_readers = write_and_get_segment_readers();
let filter = build_regex_tag_value_filter();
let query = build_timeseries_query(filter);
let mut config = crate::utils::config::Config::default();
config.docstore_block_size = 200;
let mut query_runner = QueryRunner::new(segment_readers, &query, &config);
let mut curr_time = SystemTime::now();
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(num_cpus::get())
.build()
.unwrap();
let mut ts = crate::myst_grpc::TimeseriesResponse {
grouped_timeseries: Vec::new(),
dict: None,
streams: 0,
};
query_runner
.search_timeseries(&thread_pool, &mut ts)
.unwrap();
println!(
"Time to get all metrics {:?}",
SystemTime::now().duration_since(curr_time).unwrap()
);
println!("{:?}", ts);
assert_eq!(ts.grouped_timeseries.len(), 1);
}
#[test]
pub fn search_timeseries_large_segment() {
let segment_readers = write_data_large_segment();
let filter = build_regex_tag_value_filter();
let mut query = build_timeseries_query(filter);
query.start = segment_readers.get(0).unwrap().created;
query.end = query.start + 11 * 60 * 60;
let mut config = crate::utils::config::Config::default();
config.docstore_block_size = 200;
let mut query_runner = QueryRunner::new(segment_readers, &query, &config);
let mut curr_time = SystemTime::now();
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(num_cpus::get())
.build()
.unwrap();
let mut ts = crate::myst_grpc::TimeseriesResponse {
grouped_timeseries: Vec::new(),
dict: None,
streams: 0,
};
query_runner
.search_timeseries(&thread_pool, &mut ts)
.unwrap();
println!(
"Time to get all metrics {:?}",
SystemTime::now().duration_since(curr_time).unwrap()
);
println!("{:?}", ts);
assert_eq!(ts.grouped_timeseries.len(), 1);
}
#[test]
pub fn search_timeseries_with_not_filter() {
let segment_readers = write_and_get_segment_readers();
let filter = build_not_tag_value_filter();
let query = build_timeseries_query(filter);
let config = crate::utils::config::Config::default();
let mut query_runner = QueryRunner::new(segment_readers, &query, &config);
let mut curr_time = SystemTime::now();
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(1)
.build()
.unwrap();
let mut ts = crate::myst_grpc::TimeseriesResponse {
grouped_timeseries: Vec::new(),
dict: None,
streams: 0,
};
query_runner
.search_timeseries(&thread_pool, &mut ts)
.unwrap();
println!(
"Time to get all metrics {:?}",
SystemTime::now().duration_since(curr_time).unwrap()
);
assert_eq!(ts.grouped_timeseries.len(), 0);
}
#[test]
pub fn search_timeseries_with_explicit_filter() {
let segment_readers = write_and_get_segment_readers();
let filter = build_explicit_tag_filter();
let query = build_timeseries_query(filter);
let config = crate::utils::config::Config::default();
println!("{:?}", query);
let mut query_runner = QueryRunner::new(segment_readers, &query, &config);
let mut curr_time = SystemTime::now();
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(1)
.build()
.unwrap();
let mut ts = crate::myst_grpc::TimeseriesResponse {
grouped_timeseries: Vec::new(),
dict: None,
streams: 0,
};
query_runner
.search_timeseries(&thread_pool, &mut ts)
.unwrap();
println!(
"Time to get all metrics {:?}",
SystemTime::now().duration_since(curr_time).unwrap()
);
println!("{:?}", ts);
assert_eq!(ts.grouped_timeseries.len(), 1);
}
#[test]
pub fn test_groupby_ordering() {
let segment_readers = write_and_get_segment_readers();
let filter = build_literal_tag_value_filter();
let mut query = build_timeseries_query(filter);
query.group = vec![String::from("foo"), String::from("do")];
let config = crate::utils::config::Config::default();
let mut query_runner = QueryRunner::new(segment_readers, &query, &config);
let mut curr_time = SystemTime::now();
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(1)
.build()
.unwrap();
let mut ts = crate::myst_grpc::TimeseriesResponse {
grouped_timeseries: Vec::new(),
dict: None,
streams: 0,
};
query_runner
.search_timeseries(&thread_pool, &mut ts)
.unwrap();
println!(
"Time to get all metrics {:?}",
SystemTime::now().duration_since(curr_time).unwrap()
);
let group = ts.grouped_timeseries;
assert_eq!((&group).len(), 1);
let groups = &group.get(0).unwrap().group;
assert_eq!(groups.len(), 2);
let dict = ts.dict.unwrap();
assert_eq!("bar", dict.dict.get(groups.get(0).unwrap()).unwrap());
assert_eq!("re", dict.dict.get(groups.get(1).unwrap()).unwrap());
}
#[test]
pub fn test_bloom() {
let expected_num_items = 100000000000;
// out of 100 items that are not inserted, expect 1 to return true for contain
let false_positive_rate = 0.01;
let mut filter = Bloom::<u32>::compute_bitmap_size(expected_num_items, false_positive_rate);
println!("{}", filter);
let mut bitmap = Bitmap::create_with_capacity(expected_num_items as u32);
for i in 0..expected_num_items {
bitmap.add(i as u32);
}
println!("size of bitmap {:?}", bitmap.get_serialized_size_in_bytes());
bitmap.run_optimize();
println!("size of bitmap {:?}", bitmap.get_serialized_size_in_bytes());
}
| 33.553659 | 279 | 0.63335 |
791e89d5a6088fdc03c73f7faf855cfd45f1c63a | 15,199 | // use std::io::{stdout, Write};
use crossterm;
use super::*;
use super::keybindings::keys;
impl Lino {
pub(crate) fn initiate_input_event_loop(&mut self, syntect_config: &mut SyntectConfig) {
loop {
if self.rendering.is_rendering { continue; }
self.render(syntect_config);
// let previous_cursor = self.cursor.clone();
// `read()` blocks until an `Event` is available
let event = crossterm::event::read();
if event.is_err() {
self.panic_gracefully(&Error::err4());
}
match event.unwrap() {
crossterm::event::Event::Key(key_event) => {
self.handle_key_event(&key_event);
},
crossterm::event::Event::Mouse(_) => (),
crossterm::event::Event::Resize(_, _) => {
self.update_terminal_size();
},
}
if self.should_exit { break; }
}
}
pub(crate) fn handle_key_event(&mut self, event: &crossterm::event::KeyEvent) {
let mut key_binding = format!("");
self.highlighting.start_row = self.cursor.row;
match event.code {
crossterm::event::KeyCode::Char(c) => {
if event.modifiers == crossterm::event::KeyModifiers::SHIFT
|| event.modifiers == crossterm::event::KeyModifiers::NONE {
self.input_char_buf = Some(c);
key_binding = format!("{}", keys::CHAR_INPUT);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'w' || c == 'W') {
key_binding = format!("{}+{}", keys::CTRL, 'w');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'q' || c == 'Q') {
key_binding = format!("{}+{}", keys::CTRL, 'q');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 's' || c == 'S') {
key_binding = format!("{}+{}", keys::CTRL, 's');
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT
&& (c == 's' || c == 'S') {
key_binding = format!("{}+{}", keys::ALT, 's');
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT
&& (c == 'g' || c == 'G') {
key_binding = format!("{}+{}", keys::ALT, 'g');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
| crossterm::event::KeyModifiers::SHIFT && (c == 's' || c == 'S') {
key_binding = format!("{}+{}+{}", keys::CTRL, keys::SHIFT, 's');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'a' || c == 'A') {
key_binding = format!("{}+{}", keys::CTRL, 'a');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'c' || c == 'C') {
key_binding = format!("{}+{}", keys::CTRL, 'c');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'd' || c == 'D') {
key_binding = format!("{}+{}", keys::CTRL, 'd');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'x' || c == 'X') {
key_binding = format!("{}+{}", keys::CTRL, 'x');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'v' || c == 'V') {
key_binding = format!("{}+{}", keys::CTRL, 'v');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'z' || c == 'Z') {
key_binding = format!("{}+{}", keys::CTRL, 'z');
}
else if event.modifiers == (crossterm::event::KeyModifiers::CONTROL
| crossterm::event::KeyModifiers::SHIFT) && (c == 'z' || c == 'Z') {
key_binding = format!("{}+{}", keys::CTRL, 'y');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'y' || c == 'Y') {
key_binding = format!("{}+{}", keys::CTRL, 'y');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'f' || c == 'F') {
key_binding = format!("{}+{}", keys::CTRL, 'f');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'r' || c == 'R') {
key_binding = format!("{}+{}", keys::CTRL, 'r');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'n' || c == 'N') {
key_binding = format!("{}+{}", keys::CTRL, 'n');
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
&& (c == 'p' || c == 'P') {
key_binding = format!("{}+{}", keys::CTRL, 'p');
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT
&& c == ']' {
key_binding = format!("{}+{}", keys::ALT, ']');
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT
&& c == '[' {
key_binding = format!("{}+{}", keys::ALT, '[');
}
},
crossterm::event::KeyCode::Tab => {
key_binding = format!("{}", keys::TAB);
},
crossterm::event::KeyCode::BackTab => {
key_binding = format!("{}+{}", keys::SHIFT, keys::TAB);
},
crossterm::event::KeyCode::Enter => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::ENTER);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL {
key_binding = format!("{}+{}", keys::CTRL, keys::ENTER);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::ENTER);
}
},
crossterm::event::KeyCode::Backspace => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::BACKSPACE);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL {
key_binding = format!("{}+{}", keys::CTRL, keys::BACKSPACE);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT {
key_binding = format!("{}+{}", keys::ALT, keys::BACKSPACE);
}
},
crossterm::event::KeyCode::Delete => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::DELETE);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL {
key_binding = format!("{}+{}", keys::CTRL, keys::DELETE);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT {
key_binding = format!("{}+{}", keys::ALT, keys::DELETE);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::DELETE);
}
},
crossterm::event::KeyCode::Home => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::HOME);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::HOME);
}
},
crossterm::event::KeyCode::End => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::END);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::END);
}
},
crossterm::event::KeyCode::PageUp => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::PAGE_UP);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::PAGE_UP);
}
},
crossterm::event::KeyCode::PageDown => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::PAGE_DOWN);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::PAGE_DOWN);
}
},
crossterm::event::KeyCode::Left => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::LEFT);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL {
key_binding = format!("{}+{}", keys::CTRL, keys::LEFT);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::LEFT);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT {
key_binding = format!("{}+{}", keys::ALT, keys::LEFT);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
| crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}+{}", keys::CTRL, keys::SHIFT, keys::LEFT);
}
},
crossterm::event::KeyCode::Right => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::RIGHT);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL {
key_binding = format!("{}+{}", keys::CTRL, keys::RIGHT);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::RIGHT);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT {
key_binding = format!("{}+{}", keys::ALT, keys::RIGHT);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
| crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}+{}", keys::CTRL, keys::SHIFT, keys::RIGHT);
}
},
crossterm::event::KeyCode::Up => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::UP);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL {
key_binding = format!("{}+{}", keys::CTRL, keys::UP);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::UP);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT {
key_binding = format!("{}+{}", keys::ALT, keys::UP);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
| crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}+{}", keys::CTRL, keys::SHIFT, keys::UP);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT
| crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}+{}", keys::ALT, keys::SHIFT, keys::UP);
}
},
crossterm::event::KeyCode::Down => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::DOWN);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL {
key_binding = format!("{}+{}", keys::CTRL, keys::DOWN);
}
else if event.modifiers == crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}", keys::SHIFT, keys::DOWN);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT {
key_binding = format!("{}+{}", keys::ALT, keys::DOWN);
}
else if event.modifiers == crossterm::event::KeyModifiers::CONTROL
| crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}+{}", keys::CTRL, keys::SHIFT, keys::DOWN);
}
else if event.modifiers == crossterm::event::KeyModifiers::ALT
| crossterm::event::KeyModifiers::SHIFT {
key_binding = format!("{}+{}+{}", keys::ALT, keys::SHIFT, keys::DOWN);
}
},
crossterm::event::KeyCode::Esc => {
if event.modifiers == crossterm::event::KeyModifiers::NONE {
key_binding = format!("{}", keys::ESC);
}
},
_ => ()
}
let operation_to_perform = self.keybindings.get(&key_binding);
if !operation_to_perform.is_none() {
operation_to_perform.unwrap()(self);
}
self.set_file_unsaved_if_applicable();
self.highlighting.end_row = self.cursor.row;
}
}
| 43.301994 | 92 | 0.440292 |
d666c80b899e99e13d86e223d020945f80f90823 | 87,311 | #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(in_band_lifetimes)]
#![feature(nll)]
#![recursion_limit = "256"]
#[macro_use]
extern crate syntax;
use rustc::bug;
use rustc::hir::def::{DefKind, Res};
use rustc::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc::hir::itemlikevisit::DeepVisitor;
use rustc::hir::{self, AssocItemKind, Node, PatKind};
use rustc::lint;
use rustc::middle::privacy::{AccessLevel, AccessLevels};
use rustc::ty::fold::TypeVisitor;
use rustc::ty::query::Providers;
use rustc::ty::subst::InternalSubsts;
use rustc::ty::{self, GenericParamDefKind, TraitRef, Ty, TyCtxt, TypeFoldable};
use rustc::util::nodemap::HirIdSet;
use rustc_data_structures::fx::FxHashSet;
use syntax::ast::Ident;
use syntax::attr;
use syntax::symbol::{kw, sym};
use syntax_pos::hygiene::Transparency;
use syntax_pos::Span;
use std::marker::PhantomData;
use std::{cmp, fmt, mem};
use rustc_error_codes::*;
////////////////////////////////////////////////////////////////////////////////
/// Generic infrastructure used to implement specific visitors below.
////////////////////////////////////////////////////////////////////////////////
/// Implemented to visit all `DefId`s in a type.
/// Visiting `DefId`s is useful because visibilities and reachabilities are attached to them.
/// The idea is to visit "all components of a type", as documented in
/// https://github.com/rust-lang/rfcs/blob/master/text/2145-type-privacy.md#how-to-determine-visibility-of-a-type.
/// The default type visitor (`TypeVisitor`) does most of the job, but it has some shortcomings.
/// First, it doesn't have overridable `fn visit_trait_ref`, so we have to catch trait `DefId`s
/// manually. Second, it doesn't visit some type components like signatures of fn types, or traits
/// in `impl Trait`, see individual comments in `DefIdVisitorSkeleton::visit_ty`.
trait DefIdVisitor<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx>;
fn shallow(&self) -> bool {
false
}
fn skip_assoc_tys(&self) -> bool {
false
}
fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool;
/// Not overridden, but used to actually visit types and traits.
fn skeleton(&mut self) -> DefIdVisitorSkeleton<'_, 'tcx, Self> {
DefIdVisitorSkeleton {
def_id_visitor: self,
visited_opaque_tys: Default::default(),
dummy: Default::default(),
}
}
fn visit(&mut self, ty_fragment: impl TypeFoldable<'tcx>) -> bool {
ty_fragment.visit_with(&mut self.skeleton())
}
fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool {
self.skeleton().visit_trait(trait_ref)
}
fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> bool {
self.skeleton().visit_predicates(predicates)
}
}
struct DefIdVisitorSkeleton<'v, 'tcx, V>
where
V: DefIdVisitor<'tcx> + ?Sized,
{
def_id_visitor: &'v mut V,
visited_opaque_tys: FxHashSet<DefId>,
dummy: PhantomData<TyCtxt<'tcx>>,
}
impl<'tcx, V> DefIdVisitorSkeleton<'_, 'tcx, V>
where
V: DefIdVisitor<'tcx> + ?Sized,
{
fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool {
let TraitRef { def_id, substs } = trait_ref;
self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref.print_only_trait_path())
|| (!self.def_id_visitor.shallow() && substs.visit_with(self))
}
fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> bool {
let ty::GenericPredicates { parent: _, predicates } = predicates;
for (predicate, _span) in predicates {
match predicate {
ty::Predicate::Trait(poly_predicate) => {
let ty::TraitPredicate { trait_ref } = *poly_predicate.skip_binder();
if self.visit_trait(trait_ref) {
return true;
}
}
ty::Predicate::Projection(poly_predicate) => {
let ty::ProjectionPredicate { projection_ty, ty } =
*poly_predicate.skip_binder();
if ty.visit_with(self) {
return true;
}
if self.visit_trait(projection_ty.trait_ref(self.def_id_visitor.tcx())) {
return true;
}
}
ty::Predicate::TypeOutlives(poly_predicate) => {
let ty::OutlivesPredicate(ty, _region) = *poly_predicate.skip_binder();
if ty.visit_with(self) {
return true;
}
}
ty::Predicate::RegionOutlives(..) => {}
_ => bug!("unexpected predicate: {:?}", predicate),
}
}
false
}
}
impl<'tcx, V> TypeVisitor<'tcx> for DefIdVisitorSkeleton<'_, 'tcx, V>
where
V: DefIdVisitor<'tcx> + ?Sized,
{
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
let tcx = self.def_id_visitor.tcx();
// InternalSubsts are not visited here because they are visited below in `super_visit_with`.
match ty.kind {
ty::Adt(&ty::AdtDef { did: def_id, .. }, ..)
| ty::Foreign(def_id)
| ty::FnDef(def_id, ..)
| ty::Closure(def_id, ..)
| ty::Generator(def_id, ..) => {
if self.def_id_visitor.visit_def_id(def_id, "type", &ty) {
return true;
}
if self.def_id_visitor.shallow() {
return false;
}
// Default type visitor doesn't visit signatures of fn types.
// Something like `fn() -> Priv {my_func}` is considered a private type even if
// `my_func` is public, so we need to visit signatures.
if let ty::FnDef(..) = ty.kind {
if tcx.fn_sig(def_id).visit_with(self) {
return true;
}
}
// Inherent static methods don't have self type in substs.
// Something like `fn() {my_method}` type of the method
// `impl Pub<Priv> { pub fn my_method() {} }` is considered a private type,
// so we need to visit the self type additionally.
if let Some(assoc_item) = tcx.opt_associated_item(def_id) {
if let ty::ImplContainer(impl_def_id) = assoc_item.container {
if tcx.type_of(impl_def_id).visit_with(self) {
return true;
}
}
}
}
ty::Projection(proj) | ty::UnnormalizedProjection(proj) => {
if self.def_id_visitor.skip_assoc_tys() {
// Visitors searching for minimal visibility/reachability want to
// conservatively approximate associated types like `<Type as Trait>::Alias`
// as visible/reachable even if both `Type` and `Trait` are private.
// Ideally, associated types should be substituted in the same way as
// free type aliases, but this isn't done yet.
return false;
}
// This will also visit substs if necessary, so we don't need to recurse.
return self.visit_trait(proj.trait_ref(tcx));
}
ty::Dynamic(predicates, ..) => {
// All traits in the list are considered the "primary" part of the type
// and are visited by shallow visitors.
for predicate in *predicates.skip_binder() {
let trait_ref = match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => trait_ref,
ty::ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx),
ty::ExistentialPredicate::AutoTrait(def_id) => {
ty::ExistentialTraitRef { def_id, substs: InternalSubsts::empty() }
}
};
let ty::ExistentialTraitRef { def_id, substs: _ } = trait_ref;
if self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref) {
return true;
}
}
}
ty::Opaque(def_id, ..) => {
// Skip repeated `Opaque`s to avoid infinite recursion.
if self.visited_opaque_tys.insert(def_id) {
// The intent is to treat `impl Trait1 + Trait2` identically to
// `dyn Trait1 + Trait2`. Therefore we ignore def-id of the opaque type itself
// (it either has no visibility, or its visibility is insignificant, like
// visibilities of type aliases) and recurse into predicates instead to go
// through the trait list (default type visitor doesn't visit those traits).
// All traits in the list are considered the "primary" part of the type
// and are visited by shallow visitors.
if self.visit_predicates(tcx.predicates_of(def_id)) {
return true;
}
}
}
// These types don't have their own def-ids (but may have subcomponents
// with def-ids that should be visited recursively).
ty::Bool
| ty::Char
| ty::Int(..)
| ty::Uint(..)
| ty::Float(..)
| ty::Str
| ty::Never
| ty::Array(..)
| ty::Slice(..)
| ty::Tuple(..)
| ty::RawPtr(..)
| ty::Ref(..)
| ty::FnPtr(..)
| ty::Param(..)
| ty::Error
| ty::GeneratorWitness(..) => {}
ty::Bound(..) | ty::Placeholder(..) | ty::Infer(..) => {
bug!("unexpected type: {:?}", ty)
}
}
!self.def_id_visitor.shallow() && ty.super_visit_with(self)
}
}
fn def_id_visibility<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
) -> (ty::Visibility, Span, &'static str) {
match tcx.hir().as_local_hir_id(def_id) {
Some(hir_id) => {
let vis = match tcx.hir().get(hir_id) {
Node::Item(item) => &item.vis,
Node::ForeignItem(foreign_item) => &foreign_item.vis,
Node::MacroDef(macro_def) => {
if attr::contains_name(¯o_def.attrs, sym::macro_export) {
return (ty::Visibility::Public, macro_def.span, "public");
} else {
¯o_def.vis
}
}
Node::TraitItem(..) | Node::Variant(..) => {
return def_id_visibility(tcx, tcx.hir().get_parent_did(hir_id));
}
Node::ImplItem(impl_item) => {
match tcx.hir().get(tcx.hir().get_parent_item(hir_id)) {
Node::Item(item) => match &item.kind {
hir::ItemKind::Impl(.., None, _, _) => &impl_item.vis,
hir::ItemKind::Impl(.., Some(trait_ref), _, _) => {
return def_id_visibility(tcx, trait_ref.path.res.def_id());
}
kind => bug!("unexpected item kind: {:?}", kind),
},
node => bug!("unexpected node kind: {:?}", node),
}
}
Node::Ctor(vdata) => {
let parent_hir_id = tcx.hir().get_parent_node(hir_id);
match tcx.hir().get(parent_hir_id) {
Node::Variant(..) => {
let parent_did = tcx.hir().local_def_id(parent_hir_id);
let (mut ctor_vis, mut span, mut descr) =
def_id_visibility(tcx, parent_did);
let adt_def = tcx.adt_def(tcx.hir().get_parent_did(hir_id));
let ctor_did = tcx.hir().local_def_id(vdata.ctor_hir_id().unwrap());
let variant = adt_def.variant_with_ctor_id(ctor_did);
if variant.is_field_list_non_exhaustive()
&& ctor_vis == ty::Visibility::Public
{
ctor_vis =
ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
let attrs = tcx.get_attrs(variant.def_id);
span =
attr::find_by_name(&attrs, sym::non_exhaustive).unwrap().span;
descr = "crate-visible";
}
return (ctor_vis, span, descr);
}
Node::Item(..) => {
let item = match tcx.hir().get(parent_hir_id) {
Node::Item(item) => item,
node => bug!("unexpected node kind: {:?}", node),
};
let (mut ctor_vis, mut span, mut descr) = (
ty::Visibility::from_hir(&item.vis, parent_hir_id, tcx),
item.vis.span,
item.vis.node.descr(),
);
for field in vdata.fields() {
let field_vis = ty::Visibility::from_hir(&field.vis, hir_id, tcx);
if ctor_vis.is_at_least(field_vis, tcx) {
ctor_vis = field_vis;
span = field.vis.span;
descr = field.vis.node.descr();
}
}
// If the structure is marked as non_exhaustive then lower the
// visibility to within the crate.
if ctor_vis == ty::Visibility::Public {
let adt_def = tcx.adt_def(tcx.hir().get_parent_did(hir_id));
if adt_def.non_enum_variant().is_field_list_non_exhaustive() {
ctor_vis =
ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
span = attr::find_by_name(&item.attrs, sym::non_exhaustive)
.unwrap()
.span;
descr = "crate-visible";
}
}
return (ctor_vis, span, descr);
}
node => bug!("unexpected node kind: {:?}", node),
}
}
Node::Expr(expr) => {
return (
ty::Visibility::Restricted(tcx.hir().get_module_parent(expr.hir_id)),
expr.span,
"private",
);
}
node => bug!("unexpected node kind: {:?}", node),
};
(ty::Visibility::from_hir(vis, hir_id, tcx), vis.span, vis.node.descr())
}
None => {
let vis = tcx.visibility(def_id);
let descr = if vis == ty::Visibility::Public { "public" } else { "private" };
(vis, tcx.def_span(def_id), descr)
}
}
}
// Set the correct `TypeckTables` for the given `item_id` (or an empty table if
// there is no `TypeckTables` for the item).
fn item_tables<'a, 'tcx>(
tcx: TyCtxt<'tcx>,
hir_id: hir::HirId,
empty_tables: &'a ty::TypeckTables<'tcx>,
) -> &'a ty::TypeckTables<'tcx> {
let def_id = tcx.hir().local_def_id(hir_id);
if tcx.has_typeck_tables(def_id) { tcx.typeck_tables_of(def_id) } else { empty_tables }
}
fn min(vis1: ty::Visibility, vis2: ty::Visibility, tcx: TyCtxt<'_>) -> ty::Visibility {
if vis1.is_at_least(vis2, tcx) { vis2 } else { vis1 }
}
////////////////////////////////////////////////////////////////////////////////
/// Visitor used to determine if pub(restricted) is used anywhere in the crate.
///
/// This is done so that `private_in_public` warnings can be turned into hard errors
/// in crates that have been updated to use pub(restricted).
////////////////////////////////////////////////////////////////////////////////
struct PubRestrictedVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
has_pub_restricted: bool,
}
impl Visitor<'tcx> for PubRestrictedVisitor<'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_vis(&mut self, vis: &'tcx hir::Visibility<'tcx>) {
self.has_pub_restricted = self.has_pub_restricted || vis.node.is_pub_restricted();
}
}
////////////////////////////////////////////////////////////////////////////////
/// Visitor used to determine impl visibility and reachability.
////////////////////////////////////////////////////////////////////////////////
struct FindMin<'a, 'tcx, VL: VisibilityLike> {
tcx: TyCtxt<'tcx>,
access_levels: &'a AccessLevels,
min: VL,
}
impl<'a, 'tcx, VL: VisibilityLike> DefIdVisitor<'tcx> for FindMin<'a, 'tcx, VL> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn shallow(&self) -> bool {
VL::SHALLOW
}
fn skip_assoc_tys(&self) -> bool {
true
}
fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool {
self.min = VL::new_min(self, def_id);
false
}
}
trait VisibilityLike: Sized {
const MAX: Self;
const SHALLOW: bool = false;
fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self;
// Returns an over-approximation (`skip_assoc_tys` = true) of visibility due to
// associated types for which we can't determine visibility precisely.
fn of_impl(hir_id: hir::HirId, tcx: TyCtxt<'_>, access_levels: &AccessLevels) -> Self {
let mut find = FindMin { tcx, access_levels, min: Self::MAX };
let def_id = tcx.hir().local_def_id(hir_id);
find.visit(tcx.type_of(def_id));
if let Some(trait_ref) = tcx.impl_trait_ref(def_id) {
find.visit_trait(trait_ref);
}
find.min
}
}
impl VisibilityLike for ty::Visibility {
const MAX: Self = ty::Visibility::Public;
fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
min(def_id_visibility(find.tcx, def_id).0, find.min, find.tcx)
}
}
impl VisibilityLike for Option<AccessLevel> {
const MAX: Self = Some(AccessLevel::Public);
// Type inference is very smart sometimes.
// It can make an impl reachable even some components of its type or trait are unreachable.
// E.g. methods of `impl ReachableTrait<UnreachableTy> for ReachableTy<UnreachableTy> { ... }`
// can be usable from other crates (#57264). So we skip substs when calculating reachability
// and consider an impl reachable if its "shallow" type and trait are reachable.
//
// The assumption we make here is that type-inference won't let you use an impl without knowing
// both "shallow" version of its self type and "shallow" version of its trait if it exists
// (which require reaching the `DefId`s in them).
const SHALLOW: bool = true;
fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
cmp::min(
if let Some(hir_id) = find.tcx.hir().as_local_hir_id(def_id) {
find.access_levels.map.get(&hir_id).cloned()
} else {
Self::MAX
},
find.min,
)
}
}
////////////////////////////////////////////////////////////////////////////////
/// The embargo visitor, used to determine the exports of the AST.
////////////////////////////////////////////////////////////////////////////////
struct EmbargoVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
/// Accessibility levels for reachable nodes.
access_levels: AccessLevels,
/// A set of pairs corresponding to modules, where the first module is
/// reachable via a macro that's defined in the second module. This cannot
/// be represented as reachable because it can't handle the following case:
///
/// pub mod n { // Should be `Public`
/// pub(crate) mod p { // Should *not* be accessible
/// pub fn f() -> i32 { 12 } // Must be `Reachable`
/// }
/// }
/// pub macro m() {
/// n::p::f()
/// }
macro_reachable: FxHashSet<(hir::HirId, DefId)>,
/// Previous accessibility level; `None` means unreachable.
prev_level: Option<AccessLevel>,
/// Has something changed in the level map?
changed: bool,
}
struct ReachEverythingInTheInterfaceVisitor<'a, 'tcx> {
access_level: Option<AccessLevel>,
item_def_id: DefId,
ev: &'a mut EmbargoVisitor<'tcx>,
}
impl EmbargoVisitor<'tcx> {
fn get(&self, id: hir::HirId) -> Option<AccessLevel> {
self.access_levels.map.get(&id).cloned()
}
/// Updates node level and returns the updated level.
fn update(&mut self, id: hir::HirId, level: Option<AccessLevel>) -> Option<AccessLevel> {
let old_level = self.get(id);
// Accessibility levels can only grow.
if level > old_level {
self.access_levels.map.insert(id, level.unwrap());
self.changed = true;
level
} else {
old_level
}
}
fn reach(
&mut self,
item_id: hir::HirId,
access_level: Option<AccessLevel>,
) -> ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
ReachEverythingInTheInterfaceVisitor {
access_level: cmp::min(access_level, Some(AccessLevel::Reachable)),
item_def_id: self.tcx.hir().local_def_id(item_id),
ev: self,
}
}
/// Updates the item as being reachable through a macro defined in the given
/// module. Returns `true` if the level has changed.
fn update_macro_reachable(&mut self, reachable_mod: hir::HirId, defining_mod: DefId) -> bool {
if self.macro_reachable.insert((reachable_mod, defining_mod)) {
self.update_macro_reachable_mod(reachable_mod, defining_mod);
true
} else {
false
}
}
fn update_macro_reachable_mod(&mut self, reachable_mod: hir::HirId, defining_mod: DefId) {
let module_def_id = self.tcx.hir().local_def_id(reachable_mod);
let module = self.tcx.hir().get_module(module_def_id).0;
for item_id in module.item_ids {
let hir_id = item_id.id;
let item_def_id = self.tcx.hir().local_def_id(hir_id);
if let Some(def_kind) = self.tcx.def_kind(item_def_id) {
let item = self.tcx.hir().expect_item(hir_id);
let vis = ty::Visibility::from_hir(&item.vis, hir_id, self.tcx);
self.update_macro_reachable_def(hir_id, def_kind, vis, defining_mod);
}
}
if let Some(exports) = self.tcx.module_exports(module_def_id) {
for export in exports {
if export.vis.is_accessible_from(defining_mod, self.tcx) {
if let Res::Def(def_kind, def_id) = export.res {
let vis = def_id_visibility(self.tcx, def_id).0;
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) {
self.update_macro_reachable_def(hir_id, def_kind, vis, defining_mod);
}
}
}
}
}
}
fn update_macro_reachable_def(
&mut self,
hir_id: hir::HirId,
def_kind: DefKind,
vis: ty::Visibility,
module: DefId,
) {
let level = Some(AccessLevel::Reachable);
if let ty::Visibility::Public = vis {
self.update(hir_id, level);
}
match def_kind {
// No type privacy, so can be directly marked as reachable.
DefKind::Const
| DefKind::Macro(_)
| DefKind::Static
| DefKind::TraitAlias
| DefKind::TyAlias => {
if vis.is_accessible_from(module, self.tcx) {
self.update(hir_id, level);
}
}
// We can't use a module name as the final segment of a path, except
// in use statements. Since re-export checking doesn't consider
// hygiene these don't need to be marked reachable. The contents of
// the module, however may be reachable.
DefKind::Mod => {
if vis.is_accessible_from(module, self.tcx) {
self.update_macro_reachable(hir_id, module);
}
}
DefKind::Struct | DefKind::Union => {
// While structs and unions have type privacy, their fields do
// not.
if let ty::Visibility::Public = vis {
let item = self.tcx.hir().expect_item(hir_id);
if let hir::ItemKind::Struct(ref struct_def, _)
| hir::ItemKind::Union(ref struct_def, _) = item.kind
{
for field in struct_def.fields() {
let field_vis =
ty::Visibility::from_hir(&field.vis, field.hir_id, self.tcx);
if field_vis.is_accessible_from(module, self.tcx) {
self.reach(field.hir_id, level).ty();
}
}
} else {
bug!("item {:?} with DefKind {:?}", item, def_kind);
}
}
}
// These have type privacy, so are not reachable unless they're
// public
DefKind::AssocConst
| DefKind::AssocTy
| DefKind::AssocOpaqueTy
| DefKind::ConstParam
| DefKind::Ctor(_, _)
| DefKind::Enum
| DefKind::ForeignTy
| DefKind::Fn
| DefKind::OpaqueTy
| DefKind::Method
| DefKind::Trait
| DefKind::TyParam
| DefKind::Variant => (),
}
}
/// Given the path segments of a `ItemKind::Use`, then we need
/// to update the visibility of the intermediate use so that it isn't linted
/// by `unreachable_pub`.
///
/// This isn't trivial as `path.res` has the `DefId` of the eventual target
/// of the use statement not of the next intermediate use statement.
///
/// To do this, consider the last two segments of the path to our intermediate
/// use statement. We expect the penultimate segment to be a module and the
/// last segment to be the name of the item we are exporting. We can then
/// look at the items contained in the module for the use statement with that
/// name and update that item's visibility.
///
/// FIXME: This solution won't work with glob imports and doesn't respect
/// namespaces. See <https://github.com/rust-lang/rust/pull/57922#discussion_r251234202>.
fn update_visibility_of_intermediate_use_statements(
&mut self,
segments: &[hir::PathSegment<'_>],
) {
if let Some([module, segment]) = segments.rchunks_exact(2).next() {
if let Some(item) = module
.res
.and_then(|res| res.mod_def_id())
.and_then(|def_id| self.tcx.hir().as_local_hir_id(def_id))
.map(|module_hir_id| self.tcx.hir().expect_item(module_hir_id))
{
if let hir::ItemKind::Mod(m) = &item.kind {
for item_id in m.item_ids.as_ref() {
let item = self.tcx.hir().expect_item(item_id.id);
let def_id = self.tcx.hir().local_def_id(item_id.id);
if !self.tcx.hygienic_eq(segment.ident, item.ident, def_id) {
continue;
}
if let hir::ItemKind::Use(..) = item.kind {
self.update(item.hir_id, Some(AccessLevel::Exported));
}
}
}
}
}
}
}
impl Visitor<'tcx> for EmbargoVisitor<'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let inherited_item_level = match item.kind {
hir::ItemKind::Impl(..) => {
Option::<AccessLevel>::of_impl(item.hir_id, self.tcx, &self.access_levels)
}
// Foreign modules inherit level from parents.
hir::ItemKind::ForeignMod(..) => self.prev_level,
// Other `pub` items inherit levels from parents.
hir::ItemKind::Const(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::ExternCrate(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::Trait(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Union(..)
| hir::ItemKind::Use(..) => {
if item.vis.node.is_pub() {
self.prev_level
} else {
None
}
}
};
// Update level of the item itself.
let item_level = self.update(item.hir_id, inherited_item_level);
// Update levels of nested things.
match item.kind {
hir::ItemKind::Enum(ref def, _) => {
for variant in def.variants {
let variant_level = self.update(variant.id, item_level);
if let Some(ctor_hir_id) = variant.data.ctor_hir_id() {
self.update(ctor_hir_id, item_level);
}
for field in variant.data.fields() {
self.update(field.hir_id, variant_level);
}
}
}
hir::ItemKind::Impl(.., ref trait_ref, _, impl_item_refs) => {
for impl_item_ref in impl_item_refs {
if trait_ref.is_some() || impl_item_ref.vis.node.is_pub() {
self.update(impl_item_ref.id.hir_id, item_level);
}
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
for trait_item_ref in trait_item_refs {
self.update(trait_item_ref.id.hir_id, item_level);
}
}
hir::ItemKind::Struct(ref def, _) | hir::ItemKind::Union(ref def, _) => {
if let Some(ctor_hir_id) = def.ctor_hir_id() {
self.update(ctor_hir_id, item_level);
}
for field in def.fields() {
if field.vis.node.is_pub() {
self.update(field.hir_id, item_level);
}
}
}
hir::ItemKind::ForeignMod(ref foreign_mod) => {
for foreign_item in foreign_mod.items {
if foreign_item.vis.node.is_pub() {
self.update(foreign_item.hir_id, item_level);
}
}
}
hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::Use(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Const(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::ExternCrate(..) => {}
}
// Mark all items in interfaces of reachable items as reachable.
match item.kind {
// The interface is empty.
hir::ItemKind::ExternCrate(..) => {}
// All nested items are checked by `visit_item`.
hir::ItemKind::Mod(..) => {}
// Re-exports are handled in `visit_mod`. However, in order to avoid looping over
// all of the items of a mod in `visit_mod` looking for use statements, we handle
// making sure that intermediate use statements have their visibilities updated here.
hir::ItemKind::Use(ref path, _) => {
if item_level.is_some() {
self.update_visibility_of_intermediate_use_statements(path.segments.as_ref());
}
}
// The interface is empty.
hir::ItemKind::GlobalAsm(..) => {}
hir::ItemKind::OpaqueTy(..) => {
// FIXME: This is some serious pessimization intended to workaround deficiencies
// in the reachability pass (`middle/reachable.rs`). Types are marked as link-time
// reachable if they are returned via `impl Trait`, even from private functions.
let exist_level = cmp::max(item_level, Some(AccessLevel::ReachableFromImplTrait));
self.reach(item.hir_id, exist_level).generics().predicates().ty();
}
// Visit everything.
hir::ItemKind::Const(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::TyAlias(..) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates().ty();
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
for trait_item_ref in trait_item_refs {
let mut reach = self.reach(trait_item_ref.id.hir_id, item_level);
reach.generics().predicates();
if trait_item_ref.kind == AssocItemKind::Type
&& !trait_item_ref.defaultness.has_value()
{
// No type to visit.
} else {
reach.ty();
}
}
}
}
hir::ItemKind::TraitAlias(..) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
}
}
// Visit everything except for private impl items.
hir::ItemKind::Impl(.., impl_item_refs) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates().ty().trait_ref();
for impl_item_ref in impl_item_refs {
let impl_item_level = self.get(impl_item_ref.id.hir_id);
if impl_item_level.is_some() {
self.reach(impl_item_ref.id.hir_id, impl_item_level)
.generics()
.predicates()
.ty();
}
}
}
}
// Visit everything, but enum variants have their own levels.
hir::ItemKind::Enum(ref def, _) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
}
for variant in def.variants {
let variant_level = self.get(variant.id);
if variant_level.is_some() {
for field in variant.data.fields() {
self.reach(field.hir_id, variant_level).ty();
}
// Corner case: if the variant is reachable, but its
// enum is not, make the enum reachable as well.
self.update(item.hir_id, variant_level);
}
}
}
// Visit everything, but foreign items have their own levels.
hir::ItemKind::ForeignMod(ref foreign_mod) => {
for foreign_item in foreign_mod.items {
let foreign_item_level = self.get(foreign_item.hir_id);
if foreign_item_level.is_some() {
self.reach(foreign_item.hir_id, foreign_item_level)
.generics()
.predicates()
.ty();
}
}
}
// Visit everything except for private fields.
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
for field in struct_def.fields() {
let field_level = self.get(field.hir_id);
if field_level.is_some() {
self.reach(field.hir_id, field_level).ty();
}
}
}
}
}
let orig_level = mem::replace(&mut self.prev_level, item_level);
intravisit::walk_item(self, item);
self.prev_level = orig_level;
}
fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) {
// Blocks can have public items, for example impls, but they always
// start as completely private regardless of publicity of a function,
// constant, type, field, etc., in which this block resides.
let orig_level = mem::replace(&mut self.prev_level, None);
intravisit::walk_block(self, b);
self.prev_level = orig_level;
}
fn visit_mod(&mut self, m: &'tcx hir::Mod<'tcx>, _sp: Span, id: hir::HirId) {
// This code is here instead of in visit_item so that the
// crate module gets processed as well.
if self.prev_level.is_some() {
let def_id = self.tcx.hir().local_def_id(id);
if let Some(exports) = self.tcx.module_exports(def_id) {
for export in exports.iter() {
if export.vis == ty::Visibility::Public {
if let Some(def_id) = export.res.opt_def_id() {
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) {
self.update(hir_id, Some(AccessLevel::Exported));
}
}
}
}
}
}
intravisit::walk_mod(self, m, id);
}
fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef<'tcx>) {
if attr::find_transparency(&md.attrs, md.legacy).0 != Transparency::Opaque {
self.update(md.hir_id, Some(AccessLevel::Public));
return;
}
let macro_module_def_id =
ty::DefIdTree::parent(self.tcx, self.tcx.hir().local_def_id(md.hir_id)).unwrap();
let mut module_id = match self.tcx.hir().as_local_hir_id(macro_module_def_id) {
Some(module_id) if self.tcx.hir().is_hir_id_module(module_id) => module_id,
// `module_id` doesn't correspond to a `mod`, return early (#63164, #65252).
_ => return,
};
let level = if md.vis.node.is_pub() { self.get(module_id) } else { None };
let new_level = self.update(md.hir_id, level);
if new_level.is_none() {
return;
}
loop {
let changed_reachability = self.update_macro_reachable(module_id, macro_module_def_id);
if changed_reachability || module_id == hir::CRATE_HIR_ID {
break;
}
module_id = self.tcx.hir().get_parent_node(module_id);
}
}
}
impl ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
fn generics(&mut self) -> &mut Self {
for param in &self.ev.tcx.generics_of(self.item_def_id).params {
match param.kind {
GenericParamDefKind::Lifetime => {}
GenericParamDefKind::Type { has_default, .. } => {
if has_default {
self.visit(self.ev.tcx.type_of(param.def_id));
}
}
GenericParamDefKind::Const => {
self.visit(self.ev.tcx.type_of(param.def_id));
}
}
}
self
}
fn predicates(&mut self) -> &mut Self {
self.visit_predicates(self.ev.tcx.predicates_of(self.item_def_id));
self
}
fn ty(&mut self) -> &mut Self {
self.visit(self.ev.tcx.type_of(self.item_def_id));
self
}
fn trait_ref(&mut self) -> &mut Self {
if let Some(trait_ref) = self.ev.tcx.impl_trait_ref(self.item_def_id) {
self.visit_trait(trait_ref);
}
self
}
}
impl DefIdVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.ev.tcx
}
fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool {
if let Some(hir_id) = self.ev.tcx.hir().as_local_hir_id(def_id) {
if let ((ty::Visibility::Public, ..), _)
| (_, Some(AccessLevel::ReachableFromImplTrait)) =
(def_id_visibility(self.tcx(), def_id), self.access_level)
{
self.ev.update(hir_id, self.access_level);
}
}
false
}
}
//////////////////////////////////////////////////////////////////////////////////////
/// Name privacy visitor, checks privacy and reports violations.
/// Most of name privacy checks are performed during the main resolution phase,
/// or later in type checking when field accesses and associated items are resolved.
/// This pass performs remaining checks for fields in struct expressions and patterns.
//////////////////////////////////////////////////////////////////////////////////////
struct NamePrivacyVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
tables: &'a ty::TypeckTables<'tcx>,
current_item: hir::HirId,
empty_tables: &'a ty::TypeckTables<'tcx>,
}
impl<'a, 'tcx> NamePrivacyVisitor<'a, 'tcx> {
// Checks that a field in a struct constructor (expression or pattern) is accessible.
fn check_field(
&mut self,
use_ctxt: Span, // syntax context of the field name at the use site
span: Span, // span of the field pattern, e.g., `x: 0`
def: &'tcx ty::AdtDef, // definition of the struct or enum
field: &'tcx ty::FieldDef,
) {
// definition of the field
let ident = Ident::new(kw::Invalid, use_ctxt);
let current_hir = self.current_item;
let def_id = self.tcx.adjust_ident_and_get_scope(ident, def.did, current_hir).1;
if !def.is_enum() && !field.vis.is_accessible_from(def_id, self.tcx) {
struct_span_err!(
self.tcx.sess,
span,
E0451,
"field `{}` of {} `{}` is private",
field.ident,
def.variant_descr(),
self.tcx.def_path_str(def.did)
)
.span_label(span, format!("field `{}` is private", field.ident))
.emit();
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
// Don't visit nested modules, since we run a separate visitor walk
// for each module in `privacy_access_levels`
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
let orig_tables = mem::replace(&mut self.tables, self.tcx.body_tables(body));
let body = self.tcx.hir().body(body);
self.visit_body(body);
self.tables = orig_tables;
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let orig_current_item = mem::replace(&mut self.current_item, item.hir_id);
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, item.hir_id, self.empty_tables));
intravisit::walk_item(self, item);
self.current_item = orig_current_item;
self.tables = orig_tables;
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ti.hir_id, self.empty_tables));
intravisit::walk_trait_item(self, ti);
self.tables = orig_tables;
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ii.hir_id, self.empty_tables));
intravisit::walk_impl_item(self, ii);
self.tables = orig_tables;
}
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
match expr.kind {
hir::ExprKind::Struct(ref qpath, fields, ref base) => {
let res = self.tables.qpath_res(qpath, expr.hir_id);
let adt = self.tables.expr_ty(expr).ty_adt_def().unwrap();
let variant = adt.variant_of_res(res);
if let Some(ref base) = *base {
// If the expression uses FRU we need to make sure all the unmentioned fields
// are checked for privacy (RFC 736). Rather than computing the set of
// unmentioned fields, just check them all.
for (vf_index, variant_field) in variant.fields.iter().enumerate() {
let field = fields
.iter()
.find(|f| self.tcx.field_index(f.hir_id, self.tables) == vf_index);
let (use_ctxt, span) = match field {
Some(field) => (field.ident.span, field.span),
None => (base.span, base.span),
};
self.check_field(use_ctxt, span, adt, variant_field);
}
} else {
for field in fields {
let use_ctxt = field.ident.span;
let index = self.tcx.field_index(field.hir_id, self.tables);
self.check_field(use_ctxt, field.span, adt, &variant.fields[index]);
}
}
}
_ => {}
}
intravisit::walk_expr(self, expr);
}
fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
match pat.kind {
PatKind::Struct(ref qpath, fields, _) => {
let res = self.tables.qpath_res(qpath, pat.hir_id);
let adt = self.tables.pat_ty(pat).ty_adt_def().unwrap();
let variant = adt.variant_of_res(res);
for field in fields {
let use_ctxt = field.ident.span;
let index = self.tcx.field_index(field.hir_id, self.tables);
self.check_field(use_ctxt, field.span, adt, &variant.fields[index]);
}
}
_ => {}
}
intravisit::walk_pat(self, pat);
}
}
////////////////////////////////////////////////////////////////////////////////////////////
/// Type privacy visitor, checks types for privacy and reports violations.
/// Both explicitly written types and inferred types of expressions and patters are checked.
/// Checks are performed on "semantic" types regardless of names and their hygiene.
////////////////////////////////////////////////////////////////////////////////////////////
struct TypePrivacyVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
tables: &'a ty::TypeckTables<'tcx>,
current_item: DefId,
in_body: bool,
span: Span,
empty_tables: &'a ty::TypeckTables<'tcx>,
}
impl<'a, 'tcx> TypePrivacyVisitor<'a, 'tcx> {
fn item_is_accessible(&self, did: DefId) -> bool {
def_id_visibility(self.tcx, did).0.is_accessible_from(self.current_item, self.tcx)
}
// Take node-id of an expression or pattern and check its type for privacy.
fn check_expr_pat_type(&mut self, id: hir::HirId, span: Span) -> bool {
self.span = span;
if self.visit(self.tables.node_type(id)) || self.visit(self.tables.node_substs(id)) {
return true;
}
if let Some(adjustments) = self.tables.adjustments().get(id) {
for adjustment in adjustments {
if self.visit(adjustment.target) {
return true;
}
}
}
false
}
fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
let is_error = !self.item_is_accessible(def_id);
if is_error {
self.tcx.sess.span_err(self.span, &format!("{} `{}` is private", kind, descr));
}
is_error
}
}
impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
// Don't visit nested modules, since we run a separate visitor walk
// for each module in `privacy_access_levels`
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
let orig_tables = mem::replace(&mut self.tables, self.tcx.body_tables(body));
let orig_in_body = mem::replace(&mut self.in_body, true);
let body = self.tcx.hir().body(body);
self.visit_body(body);
self.tables = orig_tables;
self.in_body = orig_in_body;
}
fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) {
self.span = hir_ty.span;
if self.in_body {
// Types in bodies.
if self.visit(self.tables.node_type(hir_ty.hir_id)) {
return;
}
} else {
// Types in signatures.
// FIXME: This is very ineffective. Ideally each HIR type should be converted
// into a semantic type only once and the result should be cached somehow.
if self.visit(rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty)) {
return;
}
}
intravisit::walk_ty(self, hir_ty);
}
fn visit_trait_ref(&mut self, trait_ref: &'tcx hir::TraitRef<'tcx>) {
self.span = trait_ref.path.span;
if !self.in_body {
// Avoid calling `hir_trait_to_predicates` in bodies, it will ICE.
// The traits' privacy in bodies is already checked as a part of trait object types.
let bounds = rustc_typeck::hir_trait_to_predicates(self.tcx, trait_ref);
for (trait_predicate, _) in bounds.trait_bounds {
if self.visit_trait(*trait_predicate.skip_binder()) {
return;
}
}
for (poly_predicate, _) in bounds.projection_bounds {
let tcx = self.tcx;
if self.visit(poly_predicate.skip_binder().ty)
|| self.visit_trait(poly_predicate.skip_binder().projection_ty.trait_ref(tcx))
{
return;
}
}
}
intravisit::walk_trait_ref(self, trait_ref);
}
// Check types of expressions
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
if self.check_expr_pat_type(expr.hir_id, expr.span) {
// Do not check nested expressions if the error already happened.
return;
}
match expr.kind {
hir::ExprKind::Assign(_, ref rhs, _) | hir::ExprKind::Match(ref rhs, ..) => {
// Do not report duplicate errors for `x = y` and `match x { ... }`.
if self.check_expr_pat_type(rhs.hir_id, rhs.span) {
return;
}
}
hir::ExprKind::MethodCall(_, span, _) => {
// Method calls have to be checked specially.
self.span = span;
if let Some(def_id) = self.tables.type_dependent_def_id(expr.hir_id) {
if self.visit(self.tcx.type_of(def_id)) {
return;
}
} else {
self.tcx
.sess
.delay_span_bug(expr.span, "no type-dependent def for method call");
}
}
_ => {}
}
intravisit::walk_expr(self, expr);
}
// Prohibit access to associated items with insufficient nominal visibility.
//
// Additionally, until better reachability analysis for macros 2.0 is available,
// we prohibit access to private statics from other crates, this allows to give
// more code internal visibility at link time. (Access to private functions
// is already prohibited by type privacy for function types.)
fn visit_qpath(&mut self, qpath: &'tcx hir::QPath<'tcx>, id: hir::HirId, span: Span) {
let def = match self.tables.qpath_res(qpath, id) {
Res::Def(kind, def_id) => Some((kind, def_id)),
_ => None,
};
let def = def.filter(|(kind, _)| match kind {
DefKind::Method
| DefKind::AssocConst
| DefKind::AssocTy
| DefKind::AssocOpaqueTy
| DefKind::Static => true,
_ => false,
});
if let Some((kind, def_id)) = def {
let is_local_static =
if let DefKind::Static = kind { def_id.is_local() } else { false };
if !self.item_is_accessible(def_id) && !is_local_static {
let name = match *qpath {
hir::QPath::Resolved(_, ref path) => path.to_string(),
hir::QPath::TypeRelative(_, ref segment) => segment.ident.to_string(),
};
let msg = format!("{} `{}` is private", kind.descr(def_id), name);
self.tcx.sess.span_err(span, &msg);
return;
}
}
intravisit::walk_qpath(self, qpath, id, span);
}
// Check types of patterns.
fn visit_pat(&mut self, pattern: &'tcx hir::Pat<'tcx>) {
if self.check_expr_pat_type(pattern.hir_id, pattern.span) {
// Do not check nested patterns if the error already happened.
return;
}
intravisit::walk_pat(self, pattern);
}
fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
if let Some(ref init) = local.init {
if self.check_expr_pat_type(init.hir_id, init.span) {
// Do not report duplicate errors for `let x = y`.
return;
}
}
intravisit::walk_local(self, local);
}
// Check types in item interfaces.
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let orig_current_item =
mem::replace(&mut self.current_item, self.tcx.hir().local_def_id(item.hir_id));
let orig_in_body = mem::replace(&mut self.in_body, false);
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, item.hir_id, self.empty_tables));
intravisit::walk_item(self, item);
self.tables = orig_tables;
self.in_body = orig_in_body;
self.current_item = orig_current_item;
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ti.hir_id, self.empty_tables));
intravisit::walk_trait_item(self, ti);
self.tables = orig_tables;
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ii.hir_id, self.empty_tables));
intravisit::walk_impl_item(self, ii);
self.tables = orig_tables;
}
}
impl DefIdVisitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
self.check_def_id(def_id, kind, descr)
}
}
///////////////////////////////////////////////////////////////////////////////
/// Obsolete visitors for checking for private items in public interfaces.
/// These visitors are supposed to be kept in frozen state and produce an
/// "old error node set". For backward compatibility the new visitor reports
/// warnings instead of hard errors when the erroneous node is not in this old set.
///////////////////////////////////////////////////////////////////////////////
struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
access_levels: &'a AccessLevels,
in_variant: bool,
// Set of errors produced by this obsolete visitor.
old_error_set: HirIdSet,
}
struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
inner: &'a ObsoleteVisiblePrivateTypesVisitor<'b, 'tcx>,
/// Whether the type refers to private types.
contains_private: bool,
/// Whether we've recurred at all (i.e., if we're pointing at the
/// first type on which `visit_ty` was called).
at_outer_type: bool,
/// Whether that first type is a public path.
outer_type_is_public_path: bool,
}
impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
fn path_is_private_type(&self, path: &hir::Path<'_>) -> bool {
let did = match path.res {
Res::PrimTy(..) | Res::SelfTy(..) | Res::Err => return false,
res => res.def_id(),
};
// A path can only be private if:
// it's in this crate...
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(did) {
// .. and it corresponds to a private type in the AST (this returns
// `None` for type parameters).
match self.tcx.hir().find(hir_id) {
Some(Node::Item(ref item)) => !item.vis.node.is_pub(),
Some(_) | None => false,
}
} else {
return false;
}
}
fn trait_is_public(&self, trait_id: hir::HirId) -> bool {
// FIXME: this would preferably be using `exported_items`, but all
// traits are exported currently (see `EmbargoVisitor.exported_trait`).
self.access_levels.is_public(trait_id)
}
fn check_generic_bound(&mut self, bound: &hir::GenericBound<'_>) {
if let hir::GenericBound::Trait(ref trait_ref, _) = *bound {
if self.path_is_private_type(&trait_ref.trait_ref.path) {
self.old_error_set.insert(trait_ref.trait_ref.hir_ref_id);
}
}
}
fn item_is_public(&self, id: &hir::HirId, vis: &hir::Visibility<'_>) -> bool {
self.access_levels.is_reachable(*id) || vis.node.is_pub()
}
}
impl<'a, 'b, 'tcx, 'v> Visitor<'v> for ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &hir::Ty<'_>) {
if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = ty.kind {
if self.inner.path_is_private_type(path) {
self.contains_private = true;
// Found what we're looking for, so let's stop working.
return;
}
}
if let hir::TyKind::Path(_) = ty.kind {
if self.at_outer_type {
self.outer_type_is_public_path = true;
}
}
self.at_outer_type = false;
intravisit::walk_ty(self, ty)
}
// Don't want to recurse into `[, .. expr]`.
fn visit_expr(&mut self, _: &hir::Expr<'_>) {}
}
impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
match item.kind {
// Contents of a private mod can be re-exported, so we need
// to check internals.
hir::ItemKind::Mod(_) => {}
// An `extern {}` doesn't introduce a new privacy
// namespace (the contents have their own privacies).
hir::ItemKind::ForeignMod(_) => {}
hir::ItemKind::Trait(.., ref bounds, _) => {
if !self.trait_is_public(item.hir_id) {
return;
}
for bound in bounds.iter() {
self.check_generic_bound(bound)
}
}
// Impls need some special handling to try to offer useful
// error messages without (too many) false positives
// (i.e., we could just return here to not check them at
// all, or some worse estimation of whether an impl is
// publicly visible).
hir::ItemKind::Impl(.., ref g, ref trait_ref, ref self_, impl_item_refs) => {
// `impl [... for] Private` is never visible.
let self_contains_private;
// `impl [... for] Public<...>`, but not `impl [... for]
// Vec<Public>` or `(Public,)`, etc.
let self_is_public_path;
// Check the properties of the `Self` type:
{
let mut visitor = ObsoleteCheckTypeForPrivatenessVisitor {
inner: self,
contains_private: false,
at_outer_type: true,
outer_type_is_public_path: false,
};
visitor.visit_ty(&self_);
self_contains_private = visitor.contains_private;
self_is_public_path = visitor.outer_type_is_public_path;
}
// Miscellaneous info about the impl:
// `true` iff this is `impl Private for ...`.
let not_private_trait = trait_ref.as_ref().map_or(
true, // no trait counts as public trait
|tr| {
let did = tr.path.res.def_id();
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(did) {
self.trait_is_public(hir_id)
} else {
true // external traits must be public
}
},
);
// `true` iff this is a trait impl or at least one method is public.
//
// `impl Public { $( fn ...() {} )* }` is not visible.
//
// This is required over just using the methods' privacy
// directly because we might have `impl<T: Foo<Private>> ...`,
// and we shouldn't warn about the generics if all the methods
// are private (because `T` won't be visible externally).
let trait_or_some_public_method = trait_ref.is_some()
|| impl_item_refs.iter().any(|impl_item_ref| {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
hir::ImplItemKind::Const(..) | hir::ImplItemKind::Method(..) => {
self.access_levels.is_reachable(impl_item_ref.id.hir_id)
}
hir::ImplItemKind::OpaqueTy(..) | hir::ImplItemKind::TyAlias(_) => {
false
}
}
});
if !self_contains_private && not_private_trait && trait_or_some_public_method {
intravisit::walk_generics(self, g);
match *trait_ref {
None => {
for impl_item_ref in impl_item_refs {
// This is where we choose whether to walk down
// further into the impl to check its items. We
// should only walk into public items so that we
// don't erroneously report errors for private
// types in private items.
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
hir::ImplItemKind::Const(..)
| hir::ImplItemKind::Method(..)
if self
.item_is_public(&impl_item.hir_id, &impl_item.vis) =>
{
intravisit::walk_impl_item(self, impl_item)
}
hir::ImplItemKind::TyAlias(..) => {
intravisit::walk_impl_item(self, impl_item)
}
_ => {}
}
}
}
Some(ref tr) => {
// Any private types in a trait impl fall into three
// categories.
// 1. mentioned in the trait definition
// 2. mentioned in the type params/generics
// 3. mentioned in the associated types of the impl
//
// Those in 1. can only occur if the trait is in
// this crate and will've been warned about on the
// trait definition (there's no need to warn twice
// so we don't check the methods).
//
// Those in 2. are warned via walk_generics and this
// call here.
intravisit::walk_path(self, &tr.path);
// Those in 3. are warned with this call.
for impl_item_ref in impl_item_refs {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
if let hir::ImplItemKind::TyAlias(ref ty) = impl_item.kind {
self.visit_ty(ty);
}
}
}
}
} else if trait_ref.is_none() && self_is_public_path {
// `impl Public<Private> { ... }`. Any public static
// methods will be visible as `Public::foo`.
let mut found_pub_static = false;
for impl_item_ref in impl_item_refs {
if self.item_is_public(&impl_item_ref.id.hir_id, &impl_item_ref.vis) {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item_ref.kind {
AssocItemKind::Const => {
found_pub_static = true;
intravisit::walk_impl_item(self, impl_item);
}
AssocItemKind::Method { has_self: false } => {
found_pub_static = true;
intravisit::walk_impl_item(self, impl_item);
}
_ => {}
}
}
}
if found_pub_static {
intravisit::walk_generics(self, g)
}
}
return;
}
// `type ... = ...;` can contain private types, because
// we're introducing a new name.
hir::ItemKind::TyAlias(..) => return,
// Not at all public, so we don't care.
_ if !self.item_is_public(&item.hir_id, &item.vis) => {
return;
}
_ => {}
}
// We've carefully constructed it so that if we're here, then
// any `visit_ty`'s will be called on things that are in
// public signatures, i.e., things that we're interested in for
// this visitor.
intravisit::walk_item(self, item);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
for param in generics.params {
for bound in param.bounds {
self.check_generic_bound(bound);
}
}
for predicate in generics.where_clause.predicates {
match predicate {
hir::WherePredicate::BoundPredicate(bound_pred) => {
for bound in bound_pred.bounds.iter() {
self.check_generic_bound(bound)
}
}
hir::WherePredicate::RegionPredicate(_) => {}
hir::WherePredicate::EqPredicate(eq_pred) => {
self.visit_ty(&eq_pred.rhs_ty);
}
}
}
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
if self.access_levels.is_reachable(item.hir_id) {
intravisit::walk_foreign_item(self, item)
}
}
fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) {
if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = t.kind {
if self.path_is_private_type(path) {
self.old_error_set.insert(t.hir_id);
}
}
intravisit::walk_ty(self, t)
}
fn visit_variant(
&mut self,
v: &'tcx hir::Variant<'tcx>,
g: &'tcx hir::Generics<'tcx>,
item_id: hir::HirId,
) {
if self.access_levels.is_reachable(v.id) {
self.in_variant = true;
intravisit::walk_variant(self, v, g, item_id);
self.in_variant = false;
}
}
fn visit_struct_field(&mut self, s: &'tcx hir::StructField<'tcx>) {
if s.vis.node.is_pub() || self.in_variant {
intravisit::walk_struct_field(self, s);
}
}
// We don't need to introspect into these at all: an
// expression/block context can't possibly contain exported things.
// (Making them no-ops stops us from traversing the whole AST without
// having to be super careful about our `walk_...` calls above.)
fn visit_block(&mut self, _: &'tcx hir::Block<'tcx>) {}
fn visit_expr(&mut self, _: &'tcx hir::Expr<'tcx>) {}
}
///////////////////////////////////////////////////////////////////////////////
/// SearchInterfaceForPrivateItemsVisitor traverses an item's interface and
/// finds any private components in it.
/// PrivateItemsInPublicInterfacesVisitor ensures there are no private types
/// and traits in public interfaces.
///////////////////////////////////////////////////////////////////////////////
struct SearchInterfaceForPrivateItemsVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
item_id: hir::HirId,
item_def_id: DefId,
span: Span,
/// The visitor checks that each component type is at least this visible.
required_visibility: ty::Visibility,
has_pub_restricted: bool,
has_old_errors: bool,
in_assoc_ty: bool,
}
impl SearchInterfaceForPrivateItemsVisitor<'tcx> {
fn generics(&mut self) -> &mut Self {
for param in &self.tcx.generics_of(self.item_def_id).params {
match param.kind {
GenericParamDefKind::Lifetime => {}
GenericParamDefKind::Type { has_default, .. } => {
if has_default {
self.visit(self.tcx.type_of(param.def_id));
}
}
GenericParamDefKind::Const => {
self.visit(self.tcx.type_of(param.def_id));
}
}
}
self
}
fn predicates(&mut self) -> &mut Self {
// N.B., we use `explicit_predicates_of` and not `predicates_of`
// because we don't want to report privacy errors due to where
// clauses that the compiler inferred. We only want to
// consider the ones that the user wrote. This is important
// for the inferred outlives rules; see
// `src/test/ui/rfc-2093-infer-outlives/privacy.rs`.
self.visit_predicates(self.tcx.explicit_predicates_of(self.item_def_id));
self
}
fn ty(&mut self) -> &mut Self {
self.visit(self.tcx.type_of(self.item_def_id));
self
}
fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
if self.leaks_private_dep(def_id) {
self.tcx.lint_hir(
lint::builtin::EXPORTED_PRIVATE_DEPENDENCIES,
self.item_id,
self.span,
&format!(
"{} `{}` from private dependency '{}' in public \
interface",
kind,
descr,
self.tcx.crate_name(def_id.krate)
),
);
}
let hir_id = match self.tcx.hir().as_local_hir_id(def_id) {
Some(hir_id) => hir_id,
None => return false,
};
let (vis, vis_span, vis_descr) = def_id_visibility(self.tcx, def_id);
if !vis.is_at_least(self.required_visibility, self.tcx) {
let msg = format!("{} {} `{}` in public interface", vis_descr, kind, descr);
if self.has_pub_restricted || self.has_old_errors || self.in_assoc_ty {
let mut err = if kind == "trait" {
struct_span_err!(self.tcx.sess, self.span, E0445, "{}", msg)
} else {
struct_span_err!(self.tcx.sess, self.span, E0446, "{}", msg)
};
err.span_label(self.span, format!("can't leak {} {}", vis_descr, kind));
err.span_label(vis_span, format!("`{}` declared as {}", descr, vis_descr));
err.emit();
} else {
let err_code = if kind == "trait" { "E0445" } else { "E0446" };
self.tcx.lint_hir(
lint::builtin::PRIVATE_IN_PUBLIC,
hir_id,
self.span,
&format!("{} (error {})", msg, err_code),
);
}
}
false
}
/// An item is 'leaked' from a private dependency if all
/// of the following are true:
/// 1. It's contained within a public type
/// 2. It comes from a private crate
fn leaks_private_dep(&self, item_id: DefId) -> bool {
let ret = self.required_visibility == ty::Visibility::Public
&& self.tcx.is_private_dep(item_id.krate);
log::debug!("leaks_private_dep(item_id={:?})={}", item_id, ret);
return ret;
}
}
impl DefIdVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
self.check_def_id(def_id, kind, descr)
}
}
struct PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
has_pub_restricted: bool,
old_error_set: &'a HirIdSet,
}
impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
fn check(
&self,
item_id: hir::HirId,
required_visibility: ty::Visibility,
) -> SearchInterfaceForPrivateItemsVisitor<'tcx> {
let mut has_old_errors = false;
// Slow path taken only if there any errors in the crate.
for &id in self.old_error_set {
// Walk up the nodes until we find `item_id` (or we hit a root).
let mut id = id;
loop {
if id == item_id {
has_old_errors = true;
break;
}
let parent = self.tcx.hir().get_parent_node(id);
if parent == id {
break;
}
id = parent;
}
if has_old_errors {
break;
}
}
SearchInterfaceForPrivateItemsVisitor {
tcx: self.tcx,
item_id,
item_def_id: self.tcx.hir().local_def_id(item_id),
span: self.tcx.hir().span(item_id),
required_visibility,
has_pub_restricted: self.has_pub_restricted,
has_old_errors,
in_assoc_ty: false,
}
}
fn check_assoc_item(
&self,
hir_id: hir::HirId,
assoc_item_kind: AssocItemKind,
defaultness: hir::Defaultness,
vis: ty::Visibility,
) {
let mut check = self.check(hir_id, vis);
let (check_ty, is_assoc_ty) = match assoc_item_kind {
AssocItemKind::Const | AssocItemKind::Method { .. } => (true, false),
AssocItemKind::Type => (defaultness.has_value(), true),
// `ty()` for opaque types is the underlying type,
// it's not a part of interface, so we skip it.
AssocItemKind::OpaqueTy => (false, true),
};
check.in_assoc_ty = is_assoc_ty;
check.generics().predicates();
if check_ty {
check.ty();
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::OnlyBodies(&self.tcx.hir())
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let tcx = self.tcx;
let item_visibility = ty::Visibility::from_hir(&item.vis, item.hir_id, tcx);
match item.kind {
// Crates are always public.
hir::ItemKind::ExternCrate(..) => {}
// All nested items are checked by `visit_item`.
hir::ItemKind::Mod(..) => {}
// Checked in resolve.
hir::ItemKind::Use(..) => {}
// No subitems.
hir::ItemKind::GlobalAsm(..) => {}
// Subitems of these items have inherited publicity.
hir::ItemKind::Const(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::TyAlias(..) => {
self.check(item.hir_id, item_visibility).generics().predicates().ty();
}
hir::ItemKind::OpaqueTy(..) => {
// `ty()` for opaque types is the underlying type,
// it's not a part of interface, so we skip it.
self.check(item.hir_id, item_visibility).generics().predicates();
}
hir::ItemKind::Trait(.., trait_item_refs) => {
self.check(item.hir_id, item_visibility).generics().predicates();
for trait_item_ref in trait_item_refs {
self.check_assoc_item(
trait_item_ref.id.hir_id,
trait_item_ref.kind,
trait_item_ref.defaultness,
item_visibility,
);
}
}
hir::ItemKind::TraitAlias(..) => {
self.check(item.hir_id, item_visibility).generics().predicates();
}
hir::ItemKind::Enum(ref def, _) => {
self.check(item.hir_id, item_visibility).generics().predicates();
for variant in def.variants {
for field in variant.data.fields() {
self.check(field.hir_id, item_visibility).ty();
}
}
}
// Subitems of foreign modules have their own publicity.
hir::ItemKind::ForeignMod(ref foreign_mod) => {
for foreign_item in foreign_mod.items {
let vis = ty::Visibility::from_hir(&foreign_item.vis, item.hir_id, tcx);
self.check(foreign_item.hir_id, vis).generics().predicates().ty();
}
}
// Subitems of structs and unions have their own publicity.
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
self.check(item.hir_id, item_visibility).generics().predicates();
for field in struct_def.fields() {
let field_visibility = ty::Visibility::from_hir(&field.vis, item.hir_id, tcx);
self.check(field.hir_id, min(item_visibility, field_visibility, tcx)).ty();
}
}
// An inherent impl is public when its type is public
// Subitems of inherent impls have their own publicity.
// A trait impl is public when both its type and its trait are public
// Subitems of trait impls have inherited publicity.
hir::ItemKind::Impl(.., ref trait_ref, _, impl_item_refs) => {
let impl_vis = ty::Visibility::of_impl(item.hir_id, tcx, &Default::default());
self.check(item.hir_id, impl_vis).generics().predicates();
for impl_item_ref in impl_item_refs {
let impl_item = tcx.hir().impl_item(impl_item_ref.id);
let impl_item_vis = if trait_ref.is_none() {
min(
ty::Visibility::from_hir(&impl_item.vis, item.hir_id, tcx),
impl_vis,
tcx,
)
} else {
impl_vis
};
self.check_assoc_item(
impl_item_ref.id.hir_id,
impl_item_ref.kind,
impl_item_ref.defaultness,
impl_item_vis,
);
}
}
}
}
}
pub fn provide(providers: &mut Providers<'_>) {
*providers = Providers {
privacy_access_levels,
check_private_in_public,
check_mod_privacy,
..*providers
};
}
fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: DefId) {
let empty_tables = ty::TypeckTables::empty(None);
// Check privacy of names not checked in previous compilation stages.
let mut visitor = NamePrivacyVisitor {
tcx,
tables: &empty_tables,
current_item: hir::DUMMY_HIR_ID,
empty_tables: &empty_tables,
};
let (module, span, hir_id) = tcx.hir().get_module(module_def_id);
intravisit::walk_mod(&mut visitor, module, hir_id);
// Check privacy of explicitly written types and traits as well as
// inferred types of expressions and patterns.
let mut visitor = TypePrivacyVisitor {
tcx,
tables: &empty_tables,
current_item: module_def_id,
in_body: false,
span,
empty_tables: &empty_tables,
};
intravisit::walk_mod(&mut visitor, module, hir_id);
}
fn privacy_access_levels(tcx: TyCtxt<'_>, krate: CrateNum) -> &AccessLevels {
assert_eq!(krate, LOCAL_CRATE);
// Build up a set of all exported items in the AST. This is a set of all
// items which are reachable from external crates based on visibility.
let mut visitor = EmbargoVisitor {
tcx,
access_levels: Default::default(),
macro_reachable: Default::default(),
prev_level: Some(AccessLevel::Public),
changed: false,
};
loop {
intravisit::walk_crate(&mut visitor, tcx.hir().krate());
if visitor.changed {
visitor.changed = false;
} else {
break;
}
}
visitor.update(hir::CRATE_HIR_ID, Some(AccessLevel::Public));
tcx.arena.alloc(visitor.access_levels)
}
fn check_private_in_public(tcx: TyCtxt<'_>, krate: CrateNum) {
assert_eq!(krate, LOCAL_CRATE);
let access_levels = tcx.privacy_access_levels(LOCAL_CRATE);
let krate = tcx.hir().krate();
let mut visitor = ObsoleteVisiblePrivateTypesVisitor {
tcx,
access_levels: &access_levels,
in_variant: false,
old_error_set: Default::default(),
};
intravisit::walk_crate(&mut visitor, krate);
let has_pub_restricted = {
let mut pub_restricted_visitor = PubRestrictedVisitor { tcx, has_pub_restricted: false };
intravisit::walk_crate(&mut pub_restricted_visitor, krate);
pub_restricted_visitor.has_pub_restricted
};
// Check for private types and traits in public interfaces.
let mut visitor = PrivateItemsInPublicInterfacesVisitor {
tcx,
has_pub_restricted,
old_error_set: &visitor.old_error_set,
};
krate.visit_all_item_likes(&mut DeepVisitor::new(&mut visitor));
}
| 41.556878 | 114 | 0.516957 |
e565ec4df2f5d5b85cfc788aa07dfed2e350a3a9 | 45 | #[macro_use]
mod write;
float_module!(f64);
| 9 | 19 | 0.711111 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.