file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
batch_util_test.py | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test parallel Cirq simulations."""
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from scipy import stats
import cirq
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.python import util
BATCH_SIZE = 12
N_QUBITS = 5
PAULI_LENGTH = 3
SYMBOLS = ['alpha', 'beta', 'gamma']
def _get_mixed_batch(qubits, symbols, size):
circuit1, resolver1 = util.random_circuit_resolver_batch(qubits, size // 2)
circuit2, resolver2 = util.random_symbol_circuit_resolver_batch(
qubits, symbols, size // 2)
return circuit1 + circuit2, resolver1 + resolver2
def _pad_state(sim, state, n):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
state = state.final_state
if isinstance(sim, cirq.DensityMatrixSimulator):
state = state.final_density_matrix
return np.pad(state, (0, (1 << n) - state.shape[-1]),
'constant',
constant_values=-2)
def _expectation_helper(sim, circuit, params, op):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
state = sim.simulate(circuit, params).final_state.astype(np.complex128)
return [
op.expectation_from_wavefunction(
state,
dict(
zip(sorted(circuit.all_qubits()),
(j for j in range(len(circuit.all_qubits())))))).real
]
if isinstance(sim, cirq.DensityMatrixSimulator):
state = sim.simulate(circuit, params).final_density_matrix
return [
sum(
x._expectation_from_density_matrix_no_validation(
state,
dict(
zip(sorted(circuit.all_qubits()), (
j
for j in range(len(circuit.all_qubits()))))))
for x in op)
]
return NotImplemented
def _sample_helper(sim, state, n_qubits, n_samples):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
return cirq.sample_state_vector(state.final_state,
list(range(n_qubits)),
repetitions=n_samples)
if isinstance(sim, cirq.DensityMatrixSimulator):
return cirq.sample_density_matrix(state.final_density_matrix,
list(range(n_qubits)),
repetitions=n_samples)
return NotImplemented
class BatchUtilTest(tf.test.TestCase, parameterized.TestCase):
"""Test cases for BatchUtils main functions."""
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_simulate_state(self, sim):
"""Test variable sized wavefunction output."""
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), N_QUBITS)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_expectation(self, sim):
"""Test expectation."""
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sampled_expectation(self, sim):
"""Test expectation."""
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sample_basic(self, sim):
"""Test sampling."""
n_samples = 1
n_qubits = 8
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit = cirq.Circuit(*cirq.Z.on_each(*qubits[:n_qubits // 2]),
*cirq.X.on_each(*qubits[n_qubits // 2:]))
test_results = batch_util.batch_sample([circuit],
[cirq.ParamResolver({})],
n_samples, sim)
state = sim.simulate(circuit, cirq.ParamResolver({}))
expected_results = _sample_helper(sim, state, len(qubits), n_samples)
self.assertAllEqual(expected_results, test_results[0])
self.assertDTypeEqual(test_results, np.int32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sample(self, sim):
|
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_empty_circuits(self, sim):
"""Test functions with empty circuits."""
# Common preparation
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
qubits = cirq.GridQubit.rect(1, N_QUBITS)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
# If there is no op on a qubit, the expectation answer is -2.0
true_expectation = (-2.0,)
# (1) Test expectation
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
# (2) Test sampled_expectation
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
# (3) Test state
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), 0)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
# (4) Test sampling
n_samples = 2000 * (2**N_QUBITS)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
for circuit, resolver, a in zip(circuit_batch, resolver_batch, results):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
self.assertAllClose(r, a, atol=1e-5)
self.assertDTypeEqual(results, np.int32)
if __name__ == '__main__':
tf.test.main()
| """Test sampling."""
n_samples = 2000 * (2**N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
tfq_histograms = []
for r in results:
tfq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
cirq_histograms = []
for circuit, resolver in zip(circuit_batch, resolver_batch):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
cirq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
for a, b in zip(tfq_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)
self.assertDTypeEqual(results, np.int32) |
dist.rs | //! Implementation of the various distribution aspects of the compiler.
//!
//! This module is responsible for creating tarballs of the standard library,
//! compiler, and documentation. This ends up being what we distribute to
//! everyone as well.
//!
//! No tarball is actually created literally in this file, but rather we shell
//! out to `rust-installer` still. This may one day be replaced with bits and
//! pieces of `rustup.rs`!
use std::env;
use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use build_helper::{output, t};
use crate::builder::{Builder, RunConfig, ShouldRun, Step};
use crate::cache::{Interned, INTERNER};
use crate::channel;
use crate::compile;
use crate::config::TargetSelection;
use crate::tool::{self, Tool};
use crate::util::{exe, is_dylib, timeit};
use crate::{Compiler, DependencyType, Mode, LLVM_TOOLS};
use time::{self, Timespec};
pub fn pkgname(builder: &Builder<'_>, component: &str) -> String {
if component == "cargo" {
format!("{}-{}", component, builder.cargo_package_vers())
} else if component == "rls" {
format!("{}-{}", component, builder.rls_package_vers())
} else if component == "rust-analyzer" {
format!("{}-{}", component, builder.rust_analyzer_package_vers())
} else if component == "clippy" {
format!("{}-{}", component, builder.clippy_package_vers())
} else if component == "miri" {
format!("{}-{}", component, builder.miri_package_vers())
} else if component == "rustfmt" {
format!("{}-{}", component, builder.rustfmt_package_vers())
} else if component == "llvm-tools" {
format!("{}-{}", component, builder.llvm_tools_package_vers())
} else {
assert!(component.starts_with("rust"));
format!("{}-{}", component, builder.rust_package_vers())
}
}
fn distdir(builder: &Builder<'_>) -> PathBuf {
builder.out.join("dist")
}
pub fn tmpdir(builder: &Builder<'_>) -> PathBuf {
builder.out.join("tmp/dist")
}
fn rust_installer(builder: &Builder<'_>) -> Command {
builder.tool_cmd(Tool::RustInstaller)
}
fn missing_tool(tool_name: &str, skip: bool) {
if skip {
println!("Unable to build {}, skipping dist", tool_name)
} else {
panic!("Unable to build {}", tool_name)
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Docs {
pub host: TargetSelection,
}
impl Step for Docs {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/doc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Docs { host: run.target });
}
/// Builds the `rust-docs` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let host = self.host;
let name = pkgname(builder, "rust-docs");
if !builder.config.docs {
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
}
builder.default_doc(None);
builder.info(&format!("Dist docs ({})", host));
let _time = timeit(builder);
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let dst = image.join("share/doc/rust/html");
t!(fs::create_dir_all(&dst));
let src = builder.doc_out(host);
builder.cp_r(&src, &dst);
builder.install(&builder.src.join("src/doc/robots.txt"), &dst, 0o644);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust-Documentation")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-documentation-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rust-docs")
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--bulk-dirs=share/doc/rust/html");
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple))
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustcDocs {
pub host: TargetSelection,
}
impl Step for RustcDocs {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcDocs { host: run.target });
}
/// Builds the `rustc-docs` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let host = self.host;
let name = pkgname(builder, "rustc-docs");
if !builder.config.compiler_docs {
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
}
builder.default_doc(None);
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let dst = image.join("share/doc/rust/html/rustc");
t!(fs::create_dir_all(&dst));
let src = builder.compiler_doc_out(host);
builder.cp_r(&src, &dst);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rustc-Documentation")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rustc-documentation-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rustc-docs")
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--bulk-dirs=share/doc/rust/html/rustc");
builder.info(&format!("Dist compiler docs ({})", host));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple))
}
}
fn find_files(files: &[&str], path: &[PathBuf]) -> Vec<PathBuf> {
let mut found = Vec::with_capacity(files.len());
for file in files {
let file_path = path.iter().map(|dir| dir.join(file)).find(|p| p.exists());
if let Some(file_path) = file_path {
found.push(file_path);
} else {
panic!("Could not find '{}' in {:?}", file, path);
}
}
found
}
fn make_win_dist(
rust_root: &Path,
plat_root: &Path,
target: TargetSelection,
builder: &Builder<'_>,
) {
//Ask gcc where it keeps its stuff
let mut cmd = Command::new(builder.cc(target));
cmd.arg("-print-search-dirs");
let gcc_out = output(&mut cmd);
let mut bin_path: Vec<_> = env::split_paths(&env::var_os("PATH").unwrap_or_default()).collect();
let mut lib_path = Vec::new();
for line in gcc_out.lines() {
let idx = line.find(':').unwrap();
let key = &line[..idx];
let trim_chars: &[_] = &[' ', '='];
let value = env::split_paths(line[(idx + 1)..].trim_start_matches(trim_chars));
if key == "programs" {
bin_path.extend(value);
} else if key == "libraries" {
lib_path.extend(value);
}
}
let compiler = if target == "i686-pc-windows-gnu" {
"i686-w64-mingw32-gcc.exe"
} else if target == "x86_64-pc-windows-gnu" {
"x86_64-w64-mingw32-gcc.exe"
} else {
"gcc.exe"
};
let target_tools = [compiler, "ld.exe", "dlltool.exe", "libwinpthread-1.dll"];
let mut rustc_dlls = vec!["libwinpthread-1.dll"];
if target.starts_with("i686-") {
rustc_dlls.push("libgcc_s_dw2-1.dll");
} else {
rustc_dlls.push("libgcc_s_seh-1.dll");
}
let target_libs = [
//MinGW libs
"libgcc.a",
"libgcc_eh.a",
"libgcc_s.a",
"libm.a",
"libmingw32.a",
"libmingwex.a",
"libstdc++.a",
"libiconv.a",
"libmoldname.a",
"libpthread.a",
//Windows import libs
"libadvapi32.a",
"libbcrypt.a",
"libcomctl32.a",
"libcomdlg32.a",
"libcredui.a",
"libcrypt32.a",
"libdbghelp.a",
"libgdi32.a",
"libimagehlp.a",
"libiphlpapi.a",
"libkernel32.a",
"libmsimg32.a",
"libmsvcrt.a",
"libodbc32.a",
"libole32.a",
"liboleaut32.a",
"libopengl32.a",
"libpsapi.a",
"librpcrt4.a",
"libsecur32.a",
"libsetupapi.a",
"libshell32.a",
"libsynchronization.a",
"libuser32.a",
"libuserenv.a",
"libuuid.a",
"libwinhttp.a",
"libwinmm.a",
"libwinspool.a",
"libws2_32.a",
"libwsock32.a",
];
//Find mingw artifacts we want to bundle
let target_tools = find_files(&target_tools, &bin_path);
let rustc_dlls = find_files(&rustc_dlls, &bin_path);
let target_libs = find_files(&target_libs, &lib_path);
// Copy runtime dlls next to rustc.exe
let dist_bin_dir = rust_root.join("bin/");
fs::create_dir_all(&dist_bin_dir).expect("creating dist_bin_dir failed");
for src in rustc_dlls {
builder.copy_to_folder(&src, &dist_bin_dir);
}
//Copy platform tools to platform-specific bin directory
let target_bin_dir = plat_root
.join("lib")
.join("rustlib")
.join(target.triple)
.join("bin")
.join("self-contained");
fs::create_dir_all(&target_bin_dir).expect("creating target_bin_dir failed");
for src in target_tools {
builder.copy_to_folder(&src, &target_bin_dir);
}
// Warn windows-gnu users that the bundled GCC cannot compile C files
builder.create(
&target_bin_dir.join("GCC-WARNING.txt"),
"gcc.exe contained in this folder cannot be used for compiling C files - it is only \
used as a linker. In order to be able to compile projects containing C code use \
the GCC provided by MinGW or Cygwin.",
);
//Copy platform libs to platform-specific lib directory
let target_lib_dir = plat_root
.join("lib")
.join("rustlib")
.join(target.triple)
.join("lib")
.join("self-contained");
fs::create_dir_all(&target_lib_dir).expect("creating target_lib_dir failed");
for src in target_libs {
builder.copy_to_folder(&src, &target_lib_dir);
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Mingw {
pub host: TargetSelection,
}
impl Step for Mingw {
type Output = Option<PathBuf>;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Mingw { host: run.target });
}
/// Builds the `rust-mingw` installer component.
///
/// This contains all the bits and pieces to run the MinGW Windows targets
/// without any extra installed software (e.g., we bundle gcc, libraries, etc).
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let host = self.host;
if !host.contains("pc-windows-gnu") {
return None;
}
builder.info(&format!("Dist mingw ({})", host));
let _time = timeit(builder);
let name = pkgname(builder, "rust-mingw");
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
t!(fs::create_dir_all(&image));
// The first argument is a "temporary directory" which is just
// thrown away (this contains the runtime DLLs included in the rustc package
// above) and the second argument is where to place all the MinGW components
// (which is what we want).
make_win_dist(&tmpdir(builder), &image, host, &builder);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust-MinGW")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-MinGW-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rust-mingw")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.run(&mut cmd);
t!(fs::remove_dir_all(&image));
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rustc {
pub compiler: Compiler,
}
impl Step for Rustc {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc")
}
fn make_run(run: RunConfig<'_>) |
/// Creates the `rustc` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let host = self.compiler.host;
let name = pkgname(builder, "rustc");
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let overlay = tmpdir(builder).join(format!("{}-{}-overlay", name, host.triple));
let _ = fs::remove_dir_all(&overlay);
// Prepare the rustc "image", what will actually end up getting installed
prepare_image(builder, compiler, &image);
// Prepare the overlay which is part of the tarball but won't actually be
// installed
let cp = |file: &str| {
builder.install(&builder.src.join(file), &overlay, 0o644);
};
cp("COPYRIGHT");
cp("LICENSE-APACHE");
cp("LICENSE-MIT");
cp("README.md");
// tiny morsel of metadata is used by rust-packaging
let version = builder.rust_version();
builder.create(&overlay.join("version"), &version);
if let Some(sha) = builder.rust_sha() {
builder.create(&overlay.join("git-commit-hash"), &sha);
}
// On MinGW we've got a few runtime DLL dependencies that we need to
// include. The first argument to this script is where to put these DLLs
// (the image we're creating), and the second argument is a junk directory
// to ignore all other MinGW stuff the script creates.
//
// On 32-bit MinGW we're always including a DLL which needs some extra
// licenses to distribute. On 64-bit MinGW we don't actually distribute
// anything requiring us to distribute a license, but it's likely the
// install will *also* include the rust-mingw package, which also needs
// licenses, so to be safe we just include it here in all MinGW packages.
if host.contains("pc-windows-gnu") {
make_win_dist(&image, &tmpdir(builder), host, builder);
let dst = image.join("share/doc");
t!(fs::create_dir_all(&dst));
builder.cp_r(&builder.src.join("src/etc/third-party"), &dst);
}
// Finally, wrap everything up in a nice tarball!
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rustc")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!("Dist rustc stage{} ({})", compiler.stage, host.triple));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
builder.remove_dir(&overlay);
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
fn prepare_image(builder: &Builder<'_>, compiler: Compiler, image: &Path) {
let host = compiler.host;
let src = builder.sysroot(compiler);
// Copy rustc/rustdoc binaries
t!(fs::create_dir_all(image.join("bin")));
builder.cp_r(&src.join("bin"), &image.join("bin"));
builder.install(&builder.rustdoc(compiler), &image.join("bin"), 0o755);
let libdir_relative = builder.libdir_relative(compiler);
// Copy runtime DLLs needed by the compiler
if libdir_relative.to_str() != Some("bin") {
let libdir = builder.rustc_libdir(compiler);
for entry in builder.read_dir(&libdir) {
let name = entry.file_name();
if let Some(s) = name.to_str() {
if is_dylib(s) {
// Don't use custom libdir here because ^lib/ will be resolved again
// with installer
builder.install(&entry.path(), &image.join("lib"), 0o644);
}
}
}
}
// Copy libLLVM.so to the lib dir as well, if needed. While not
// technically needed by rustc itself it's needed by lots of other
// components like the llvm tools and LLD. LLD is included below and
// tools/LLDB come later, so let's just throw it in the rustc
// component for now.
maybe_install_llvm_runtime(builder, host, image);
// Copy over lld if it's there
if builder.config.lld_enabled {
let exe = exe("rust-lld", compiler.host);
let src =
builder.sysroot_libdir(compiler, host).parent().unwrap().join("bin").join(&exe);
// for the rationale about this rename check `compile::copy_lld_to_sysroot`
let dst = image.join("lib/rustlib").join(&*host.triple).join("bin").join(&exe);
t!(fs::create_dir_all(&dst.parent().unwrap()));
builder.copy(&src, &dst);
}
// Man pages
t!(fs::create_dir_all(image.join("share/man/man1")));
let man_src = builder.src.join("src/doc/man");
let man_dst = image.join("share/man/man1");
// Reproducible builds: If SOURCE_DATE_EPOCH is set, use that as the time.
let time = env::var("SOURCE_DATE_EPOCH")
.map(|timestamp| {
let epoch = timestamp
.parse()
.map_err(|err| format!("could not parse SOURCE_DATE_EPOCH: {}", err))
.unwrap();
time::at(Timespec::new(epoch, 0))
})
.unwrap_or_else(|_| time::now());
let month_year = t!(time::strftime("%B %Y", &time));
// don't use our `bootstrap::util::{copy, cp_r}`, because those try
// to hardlink, and we don't want to edit the source templates
for file_entry in builder.read_dir(&man_src) {
let page_src = file_entry.path();
let page_dst = man_dst.join(file_entry.file_name());
t!(fs::copy(&page_src, &page_dst));
// template in month/year and version number
builder.replace_in_file(
&page_dst,
&[
("<INSERT DATE HERE>", &month_year),
("<INSERT VERSION HERE>", channel::CFG_RELEASE_NUM),
],
);
}
// Debugger scripts
builder
.ensure(DebuggerScripts { sysroot: INTERNER.intern_path(image.to_owned()), host });
// Misc license info
let cp = |file: &str| {
builder.install(&builder.src.join(file), &image.join("share/doc/rust"), 0o644);
};
cp("COPYRIGHT");
cp("LICENSE-APACHE");
cp("LICENSE-MIT");
cp("README.md");
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct DebuggerScripts {
pub sysroot: Interned<PathBuf>,
pub host: TargetSelection,
}
impl Step for DebuggerScripts {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/lldb_batchmode.py")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(DebuggerScripts {
sysroot: run.builder.sysroot(run.builder.compiler(run.builder.top_stage, run.host)),
host: run.target,
});
}
/// Copies debugger scripts for `target` into the `sysroot` specified.
fn run(self, builder: &Builder<'_>) {
let host = self.host;
let sysroot = self.sysroot;
let dst = sysroot.join("lib/rustlib/etc");
t!(fs::create_dir_all(&dst));
let cp_debugger_script = |file: &str| {
builder.install(&builder.src.join("src/etc/").join(file), &dst, 0o644);
};
if host.contains("windows-msvc") {
// windbg debugger scripts
builder.install(
&builder.src.join("src/etc/rust-windbg.cmd"),
&sysroot.join("bin"),
0o755,
);
cp_debugger_script("natvis/intrinsic.natvis");
cp_debugger_script("natvis/liballoc.natvis");
cp_debugger_script("natvis/libcore.natvis");
cp_debugger_script("natvis/libstd.natvis");
} else {
cp_debugger_script("rust_types.py");
// gdb debugger scripts
builder.install(&builder.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), 0o755);
builder.install(&builder.src.join("src/etc/rust-gdbgui"), &sysroot.join("bin"), 0o755);
cp_debugger_script("gdb_load_rust_pretty_printers.py");
cp_debugger_script("gdb_lookup.py");
cp_debugger_script("gdb_providers.py");
// lldb debugger scripts
builder.install(&builder.src.join("src/etc/rust-lldb"), &sysroot.join("bin"), 0o755);
cp_debugger_script("lldb_lookup.py");
cp_debugger_script("lldb_providers.py");
cp_debugger_script("lldb_commands")
}
}
}
fn skip_host_target_lib(builder: &Builder<'_>, compiler: Compiler) -> bool {
// The only true set of target libraries came from the build triple, so
// let's reduce redundant work by only producing archives from that host.
if compiler.host != builder.config.build {
builder.info("\tskipping, not a build host");
true
} else {
false
}
}
/// Copy stamped files into an image's `target/lib` directory.
fn copy_target_libs(builder: &Builder<'_>, target: TargetSelection, image: &Path, stamp: &Path) {
let dst = image.join("lib/rustlib").join(target.triple).join("lib");
let self_contained_dst = dst.join("self-contained");
t!(fs::create_dir_all(&dst));
t!(fs::create_dir_all(&self_contained_dst));
for (path, dependency_type) in builder.read_stamp_file(stamp) {
if dependency_type == DependencyType::TargetSelfContained {
builder.copy(&path, &self_contained_dst.join(path.file_name().unwrap()));
} else if dependency_type == DependencyType::Target || builder.config.build == target {
builder.copy(&path, &dst.join(path.file_name().unwrap()));
}
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Std {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Std {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("library/std")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Std {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let name = pkgname(builder, "rust-std");
let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
if skip_host_target_lib(builder, compiler) {
return archive;
}
builder.ensure(compile::Std { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let _ = fs::remove_dir_all(&image);
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
let stamp = compile::libstd_stamp(builder, compiler_to_use, target);
copy_target_libs(builder, target, &image, &stamp);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=std-is-standing-at-the-ready.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rust-std-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder
.info(&format!("Dist std stage{} ({} -> {})", compiler.stage, &compiler.host, target));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
archive
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustcDev {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RustcDev {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rustc-dev")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcDev {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let name = pkgname(builder, "rustc-dev");
let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
if skip_host_target_lib(builder, compiler) {
return archive;
}
builder.ensure(compile::Rustc { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let _ = fs::remove_dir_all(&image);
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
let stamp = compile::librustc_stamp(builder, compiler_to_use, target);
copy_target_libs(builder, target, &image, &stamp);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-develop.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rustc-dev-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!(
"Dist rustc-dev stage{} ({} -> {})",
compiler.stage, &compiler.host, target
));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
archive
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Analysis {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Analysis {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("analysis").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Analysis {
// Find the actual compiler (handling the full bootstrap option) which
// produced the save-analysis data because that data isn't copied
// through the sysroot uplifting.
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
/// Creates a tarball of save-analysis metadata, if available.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let name = pkgname(builder, "rust-analysis");
if compiler.host != builder.config.build {
return distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
}
builder.ensure(compile::Std { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let src = builder
.stage_out(compiler, Mode::Std)
.join(target.triple)
.join(builder.cargo_dir())
.join("deps");
let image_src = src.join("save-analysis");
let dst = image.join("lib/rustlib").join(target.triple).join("analysis");
t!(fs::create_dir_all(&dst));
builder.info(&format!("image_src: {:?}, dst: {:?}", image_src, dst));
builder.cp_r(&image_src, &dst);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=save-analysis-saved.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rust-analysis-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info("Dist analysis");
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
/// Use the `builder` to make a filtered copy of `base`/X for X in (`src_dirs` - `exclude_dirs`) to
/// `dst_dir`.
fn copy_src_dirs(
builder: &Builder<'_>,
base: &Path,
src_dirs: &[&str],
exclude_dirs: &[&str],
dst_dir: &Path,
) {
fn filter_fn(exclude_dirs: &[&str], dir: &str, path: &Path) -> bool {
let spath = match path.to_str() {
Some(path) => path,
None => return false,
};
if spath.ends_with('~') || spath.ends_with(".pyc") {
return false;
}
const LLVM_PROJECTS: &[&str] = &[
"llvm-project/clang",
"llvm-project\\clang",
"llvm-project/libunwind",
"llvm-project\\libunwind",
"llvm-project/lld",
"llvm-project\\lld",
"llvm-project/lldb",
"llvm-project\\lldb",
"llvm-project/llvm",
"llvm-project\\llvm",
"llvm-project/compiler-rt",
"llvm-project\\compiler-rt",
];
if spath.contains("llvm-project")
&& !spath.ends_with("llvm-project")
&& !LLVM_PROJECTS.iter().any(|path| spath.contains(path))
{
return false;
}
const LLVM_TEST: &[&str] = &["llvm-project/llvm/test", "llvm-project\\llvm\\test"];
if LLVM_TEST.iter().any(|path| spath.contains(path))
&& (spath.ends_with(".ll") || spath.ends_with(".td") || spath.ends_with(".s"))
{
return false;
}
let full_path = Path::new(dir).join(path);
if exclude_dirs.iter().any(|excl| full_path == Path::new(excl)) {
return false;
}
let excludes = [
"CVS",
"RCS",
"SCCS",
".git",
".gitignore",
".gitmodules",
".gitattributes",
".cvsignore",
".svn",
".arch-ids",
"{arch}",
"=RELEASE-ID",
"=meta-update",
"=update",
".bzr",
".bzrignore",
".bzrtags",
".hg",
".hgignore",
".hgrags",
"_darcs",
];
!path.iter().map(|s| s.to_str().unwrap()).any(|s| excludes.contains(&s))
}
// Copy the directories using our filter
for item in src_dirs {
let dst = &dst_dir.join(item);
t!(fs::create_dir_all(dst));
builder.cp_filtered(&base.join(item), dst, &|path| filter_fn(exclude_dirs, item, path));
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Src;
impl Step for Src {
/// The output path of the src installer tarball
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Src);
}
/// Creates the `rust-src` installer component
fn run(self, builder: &Builder<'_>) -> PathBuf {
let name = pkgname(builder, "rust-src");
let image = tmpdir(builder).join(format!("{}-image", name));
let _ = fs::remove_dir_all(&image);
// A lot of tools expect the rust-src component to be entirely in this directory, so if you
// change that (e.g. by adding another directory `lib/rustlib/src/foo` or
// `lib/rustlib/src/rust/foo`), you will need to go around hunting for implicit assumptions
// and fix them...
//
// NOTE: if you update the paths here, you also should update the "virtual" path
// translation code in `imported_source_files` in `src/librustc_metadata/rmeta/decoder.rs`
let dst_src = image.join("lib/rustlib/src/rust");
t!(fs::create_dir_all(&dst_src));
let src_files = ["Cargo.lock"];
// This is the reduced set of paths which will become the rust-src component
// (essentially libstd and all of its path dependencies).
copy_src_dirs(
builder,
&builder.src,
&["library"],
&[
// not needed and contains symlinks which rustup currently
// chokes on when unpacking.
"library/backtrace/crates",
],
&dst_src,
);
for file in src_files.iter() {
builder.copy(&builder.src.join(file), &dst_src.join(file));
}
// Create source tarball in rust-installer format
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Awesome-Source.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}", name))
.arg("--component-name=rust-src")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info("Dist src");
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(&format!("{}.tar.gz", name))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct PlainSourceTarball;
impl Step for PlainSourceTarball {
/// Produces the location of the tarball generated
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("src").default_condition(builder.config.rust_dist_src)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(PlainSourceTarball);
}
/// Creates the plain source tarball
fn run(self, builder: &Builder<'_>) -> PathBuf {
// Make sure that the root folder of tarball has the correct name
let plain_name = format!("{}-src", pkgname(builder, "rustc"));
let plain_dst_src = tmpdir(builder).join(&plain_name);
let _ = fs::remove_dir_all(&plain_dst_src);
t!(fs::create_dir_all(&plain_dst_src));
// This is the set of root paths which will become part of the source package
let src_files = [
"COPYRIGHT",
"LICENSE-APACHE",
"LICENSE-MIT",
"CONTRIBUTING.md",
"README.md",
"RELEASES.md",
"configure",
"x.py",
"config.toml.example",
"Cargo.toml",
"Cargo.lock",
];
let src_dirs = ["src", "compiler", "library"];
copy_src_dirs(builder, &builder.src, &src_dirs, &[], &plain_dst_src);
// Copy the files normally
for item in &src_files {
builder.copy(&builder.src.join(item), &plain_dst_src.join(item));
}
// Create the version file
builder.create(&plain_dst_src.join("version"), &builder.rust_version());
if let Some(sha) = builder.rust_sha() {
builder.create(&plain_dst_src.join("git-commit-hash"), &sha);
}
// If we're building from git sources, we need to vendor a complete distribution.
if builder.rust_info.is_git() {
// Vendor all Cargo dependencies
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("vendor")
.arg("--sync")
.arg(builder.src.join("./src/tools/rust-analyzer/Cargo.toml"))
.current_dir(&plain_dst_src);
builder.run(&mut cmd);
}
// Create plain source tarball
let plain_name = format!("rustc-{}-src", builder.rust_package_vers());
let mut tarball = distdir(builder).join(&format!("{}.tar.gz", plain_name));
tarball.set_extension(""); // strip .gz
tarball.set_extension(""); // strip .tar
if let Some(dir) = tarball.parent() {
builder.create_dir(&dir);
}
builder.info("running installer");
let mut cmd = rust_installer(builder);
cmd.arg("tarball")
.arg("--input")
.arg(&plain_name)
.arg("--output")
.arg(&tarball)
.arg("--work-dir=.")
.current_dir(tmpdir(builder));
builder.info("Create plain source tarball");
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(&format!("{}.tar.gz", plain_name))
}
}
// We have to run a few shell scripts, which choke quite a bit on both `\`
// characters and on `C:\` paths, so normalize both of them away.
pub fn sanitize_sh(path: &Path) -> String {
let path = path.to_str().unwrap().replace("\\", "/");
return change_drive(&path).unwrap_or(path);
fn change_drive(s: &str) -> Option<String> {
let mut ch = s.chars();
let drive = ch.next().unwrap_or('C');
if ch.next() != Some(':') {
return None;
}
if ch.next() != Some('/') {
return None;
}
Some(format!("/{}/{}", drive, &s[drive.len_utf8() + 2..]))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Cargo {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Cargo {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("cargo")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargo {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let src = builder.src.join("src/tools/cargo");
let etc = src.join("src/etc");
let release_num = builder.release_num("cargo");
let name = pkgname(builder, "cargo");
let version = builder.cargo_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("cargo-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
builder.create_dir(&image.join("share/zsh/site-functions"));
builder.create_dir(&image.join("etc/bash_completion.d"));
let cargo = builder.ensure(tool::Cargo { compiler, target });
builder.install(&cargo, &image.join("bin"), 0o755);
for man in t!(etc.join("man").read_dir()) {
let man = t!(man);
builder.install(&man.path(), &image.join("share/man/man1"), 0o644);
}
builder.install(&etc.join("_cargo"), &image.join("share/zsh/site-functions"), 0o644);
builder.copy(&etc.join("cargo.bashcomp.sh"), &image.join("etc/bash_completion.d/cargo"));
let doc = image.join("share/doc/cargo");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-THIRD-PARTY"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("cargo-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.install(&src.join("LICENSE-THIRD-PARTY"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--component-name=cargo")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!("Dist cargo stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rls {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Rls {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rls")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rls {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/rls");
let release_num = builder.release_num("rls");
let name = pkgname(builder, "rls");
let version = builder.rls_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rls-image");
drop(fs::remove_dir_all(&image));
t!(fs::create_dir_all(&image));
// Prepare the image directory
// We expect RLS to build, because we've exited this step above if tool
// state for RLS isn't testing.
let rls = builder
.ensure(tool::Rls { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("RLS", builder.build.config.missing_tools);
None
})?;
builder.install(&rls, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rls");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rls-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=RLS-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rls-preview");
builder.info(&format!("Dist RLS stage{} ({})", compiler.stage, target.triple));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustAnalyzer {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RustAnalyzer {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rust-analyzer")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustAnalyzer {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
if target.contains("riscv64") {
// riscv64 currently has an LLVM bug that makes rust-analyzer unable
// to build. See #74813 for details.
return None;
}
let src = builder.src.join("src/tools/rust-analyzer");
let release_num = builder.release_num("rust-analyzer/crates/rust-analyzer");
let name = pkgname(builder, "rust-analyzer");
let version = builder.rust_analyzer_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rust-analyzer-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect rust-analyer to always build, as it doesn't depend on rustc internals
// and doesn't have associated toolstate.
let rust_analyzer = builder
.ensure(tool::RustAnalyzer { compiler, target, extra_features: Vec::new() })
.expect("rust-analyzer always builds");
builder.install(&rust_analyzer, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rust-analyzer");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rust-analyzer-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=rust-analyzer-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rust-analyzer-preview");
builder.info(&format!("Dist rust-analyzer stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Clippy {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Clippy {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("clippy")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Clippy {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/clippy");
let release_num = builder.release_num("clippy");
let name = pkgname(builder, "clippy");
let version = builder.clippy_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("clippy-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect clippy to build, because we've exited this step above if tool
// state for clippy isn't testing.
let clippy = builder
.ensure(tool::Clippy { compiler, target, extra_features: Vec::new() })
.expect("clippy expected to build - essential tool");
let cargoclippy = builder
.ensure(tool::CargoClippy { compiler, target, extra_features: Vec::new() })
.expect("clippy expected to build - essential tool");
builder.install(&clippy, &image.join("bin"), 0o755);
builder.install(&cargoclippy, &image.join("bin"), 0o755);
let doc = image.join("share/doc/clippy");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("clippy-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=clippy-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=clippy-preview");
builder.info(&format!("Dist clippy stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Miri {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Miri {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("miri")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Miri {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/miri");
let release_num = builder.release_num("miri");
let name = pkgname(builder, "miri");
let version = builder.miri_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("miri-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect miri to build, because we've exited this step above if tool
// state for miri isn't testing.
let miri = builder
.ensure(tool::Miri { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("miri", builder.build.config.missing_tools);
None
})?;
let cargomiri = builder
.ensure(tool::CargoMiri { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("cargo miri", builder.build.config.missing_tools);
None
})?;
builder.install(&miri, &image.join("bin"), 0o755);
builder.install(&cargomiri, &image.join("bin"), 0o755);
let doc = image.join("share/doc/miri");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("miri-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=miri-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=miri-preview");
builder.info(&format!("Dist miri stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rustfmt {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Rustfmt {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rustfmt")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustfmt {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
let src = builder.src.join("src/tools/rustfmt");
let release_num = builder.release_num("rustfmt");
let name = pkgname(builder, "rustfmt");
let version = builder.rustfmt_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rustfmt-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
let rustfmt = builder
.ensure(tool::Rustfmt { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("Rustfmt", builder.build.config.missing_tools);
None
})?;
let cargofmt = builder
.ensure(tool::Cargofmt { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("Cargofmt", builder.build.config.missing_tools);
None
})?;
builder.install(&rustfmt, &image.join("bin"), 0o755);
builder.install(&cargofmt, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rustfmt");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rustfmt-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=rustfmt-ready-to-fmt.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rustfmt-preview");
builder.info(&format!("Dist Rustfmt stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Extended {
stage: u32,
host: TargetSelection,
target: TargetSelection,
}
impl Step for Extended {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("extended").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Extended {
stage: run.builder.top_stage,
host: run.builder.config.build,
target: run.target,
});
}
/// Creates a combined installer for the specified target in the provided stage.
fn run(self, builder: &Builder<'_>) {
let target = self.target;
let stage = self.stage;
let compiler = builder.compiler_for(self.stage, self.host, self.target);
builder.info(&format!("Dist extended stage{} ({})", compiler.stage, target));
let rustc_installer = builder.ensure(Rustc { compiler: builder.compiler(stage, target) });
let cargo_installer = builder.ensure(Cargo { compiler, target });
let rustfmt_installer = builder.ensure(Rustfmt { compiler, target });
let rls_installer = builder.ensure(Rls { compiler, target });
let rust_analyzer_installer = builder.ensure(RustAnalyzer { compiler, target });
let llvm_tools_installer = builder.ensure(LlvmTools { target });
let clippy_installer = builder.ensure(Clippy { compiler, target });
let miri_installer = builder.ensure(Miri { compiler, target });
let mingw_installer = builder.ensure(Mingw { host: target });
let analysis_installer = builder.ensure(Analysis { compiler, target });
let docs_installer = builder.ensure(Docs { host: target });
let std_installer =
builder.ensure(Std { compiler: builder.compiler(stage, target), target });
let tmp = tmpdir(builder);
let overlay = tmp.join("extended-overlay");
let etc = builder.src.join("src/etc/installer");
let work = tmp.join("work");
let _ = fs::remove_dir_all(&overlay);
builder.install(&builder.src.join("COPYRIGHT"), &overlay, 0o644);
builder.install(&builder.src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.install(&builder.src.join("LICENSE-MIT"), &overlay, 0o644);
let version = builder.rust_version();
builder.create(&overlay.join("version"), &version);
if let Some(sha) = builder.rust_sha() {
builder.create(&overlay.join("git-commit-hash"), &sha);
}
builder.install(&etc.join("README.md"), &overlay, 0o644);
// When rust-std package split from rustc, we needed to ensure that during
// upgrades rustc was upgraded before rust-std. To avoid rustc clobbering
// the std files during uninstall. To do this ensure that rustc comes
// before rust-std in the list below.
let mut tarballs = Vec::new();
tarballs.push(rustc_installer);
tarballs.push(cargo_installer);
tarballs.extend(rls_installer.clone());
tarballs.extend(rust_analyzer_installer.clone());
tarballs.push(clippy_installer);
tarballs.extend(miri_installer.clone());
tarballs.extend(rustfmt_installer.clone());
tarballs.extend(llvm_tools_installer);
tarballs.push(analysis_installer);
tarballs.push(std_installer);
if builder.config.docs {
tarballs.push(docs_installer);
}
if target.contains("pc-windows-gnu") {
tarballs.push(mingw_installer.unwrap());
}
let mut input_tarballs = tarballs[0].as_os_str().to_owned();
for tarball in &tarballs[1..] {
input_tarballs.push(",");
input_tarballs.push(tarball);
}
builder.info("building combined installer");
let mut cmd = rust_installer(builder);
cmd.arg("combine")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--work-dir")
.arg(&work)
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", pkgname(builder, "rust"), target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--input-tarballs")
.arg(input_tarballs)
.arg("--non-installed-overlay")
.arg(&overlay);
let time = timeit(&builder);
builder.run(&mut cmd);
drop(time);
let mut license = String::new();
license += &builder.read(&builder.src.join("COPYRIGHT"));
license += &builder.read(&builder.src.join("LICENSE-APACHE"));
license += &builder.read(&builder.src.join("LICENSE-MIT"));
license.push_str("\n");
license.push_str("\n");
let rtf = r"{\rtf1\ansi\deff0{\fonttbl{\f0\fnil\fcharset0 Arial;}}\nowwrap\fs18";
let mut rtf = rtf.to_string();
rtf.push_str("\n");
for line in license.lines() {
rtf.push_str(line);
rtf.push_str("\\line ");
}
rtf.push_str("}");
fn filter(contents: &str, marker: &str) -> String {
let start = format!("tool-{}-start", marker);
let end = format!("tool-{}-end", marker);
let mut lines = Vec::new();
let mut omitted = false;
for line in contents.lines() {
if line.contains(&start) {
omitted = true;
} else if line.contains(&end) {
omitted = false;
} else if !omitted {
lines.push(line);
}
}
lines.join("\n")
}
let xform = |p: &Path| {
let mut contents = t!(fs::read_to_string(p));
if rls_installer.is_none() {
contents = filter(&contents, "rls");
}
if rust_analyzer_installer.is_none() {
contents = filter(&contents, "rust-analyzer");
}
if miri_installer.is_none() {
contents = filter(&contents, "miri");
}
if rustfmt_installer.is_none() {
contents = filter(&contents, "rustfmt");
}
let ret = tmp.join(p.file_name().unwrap());
t!(fs::write(&ret, &contents));
ret
};
if target.contains("apple-darwin") {
builder.info("building pkg installer");
let pkg = tmp.join("pkg");
let _ = fs::remove_dir_all(&pkg);
let pkgbuild = |component: &str| {
let mut cmd = Command::new("pkgbuild");
cmd.arg("--identifier")
.arg(format!("org.rust-lang.{}", component))
.arg("--scripts")
.arg(pkg.join(component))
.arg("--nopayload")
.arg(pkg.join(component).with_extension("pkg"));
builder.run(&mut cmd);
};
let prepare = |name: &str| {
builder.create_dir(&pkg.join(name));
builder.cp_r(
&work.join(&format!("{}-{}", pkgname(builder, name), target.triple)),
&pkg.join(name),
);
builder.install(&etc.join("pkg/postinstall"), &pkg.join(name), 0o755);
pkgbuild(name);
};
prepare("rustc");
prepare("cargo");
prepare("rust-docs");
prepare("rust-std");
prepare("rust-analysis");
prepare("clippy");
if rls_installer.is_some() {
prepare("rls");
}
if rust_analyzer_installer.is_some() {
prepare("rust-analyzer");
}
if miri_installer.is_some() {
prepare("miri");
}
// create an 'uninstall' package
builder.install(&etc.join("pkg/postinstall"), &pkg.join("uninstall"), 0o755);
pkgbuild("uninstall");
builder.create_dir(&pkg.join("res"));
builder.create(&pkg.join("res/LICENSE.txt"), &license);
builder.install(&etc.join("gfx/rust-logo.png"), &pkg.join("res"), 0o644);
let mut cmd = Command::new("productbuild");
cmd.arg("--distribution")
.arg(xform(&etc.join("pkg/Distribution.xml")))
.arg("--resources")
.arg(pkg.join("res"))
.arg(distdir(builder).join(format!(
"{}-{}.pkg",
pkgname(builder, "rust"),
target.triple
)))
.arg("--package-path")
.arg(&pkg);
let _time = timeit(builder);
builder.run(&mut cmd);
}
if target.contains("windows") {
let exe = tmp.join("exe");
let _ = fs::remove_dir_all(&exe);
let prepare = |name: &str| {
builder.create_dir(&exe.join(name));
let dir = if name == "rust-std" || name == "rust-analysis" {
format!("{}-{}", name, target.triple)
} else if name == "rls" {
"rls-preview".to_string()
} else if name == "rust-analyzer" {
"rust-analyzer-preview".to_string()
} else if name == "clippy" {
"clippy-preview".to_string()
} else if name == "miri" {
"miri-preview".to_string()
} else {
name.to_string()
};
builder.cp_r(
&work.join(&format!("{}-{}", pkgname(builder, name), target.triple)).join(dir),
&exe.join(name),
);
builder.remove(&exe.join(name).join("manifest.in"));
};
prepare("rustc");
prepare("cargo");
prepare("rust-analysis");
prepare("rust-docs");
prepare("rust-std");
prepare("clippy");
if rls_installer.is_some() {
prepare("rls");
}
if rust_analyzer_installer.is_some() {
prepare("rust-analyzer");
}
if miri_installer.is_some() {
prepare("miri");
}
if target.contains("windows-gnu") {
prepare("rust-mingw");
}
builder.install(&etc.join("gfx/rust-logo.ico"), &exe, 0o644);
// Generate msi installer
let wix = PathBuf::from(env::var_os("WIX").unwrap());
let heat = wix.join("bin/heat.exe");
let candle = wix.join("bin/candle.exe");
let light = wix.join("bin/light.exe");
let heat_flags = ["-nologo", "-gg", "-sfrag", "-srd", "-sreg"];
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rustc")
.args(&heat_flags)
.arg("-cg")
.arg("RustcGroup")
.arg("-dr")
.arg("Rustc")
.arg("-var")
.arg("var.RustcDir")
.arg("-out")
.arg(exe.join("RustcGroup.wxs")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-docs")
.args(&heat_flags)
.arg("-cg")
.arg("DocsGroup")
.arg("-dr")
.arg("Docs")
.arg("-var")
.arg("var.DocsDir")
.arg("-out")
.arg(exe.join("DocsGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/squash-components.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("cargo")
.args(&heat_flags)
.arg("-cg")
.arg("CargoGroup")
.arg("-dr")
.arg("Cargo")
.arg("-var")
.arg("var.CargoDir")
.arg("-out")
.arg(exe.join("CargoGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-std")
.args(&heat_flags)
.arg("-cg")
.arg("StdGroup")
.arg("-dr")
.arg("Std")
.arg("-var")
.arg("var.StdDir")
.arg("-out")
.arg(exe.join("StdGroup.wxs")),
);
if rls_installer.is_some() {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rls")
.args(&heat_flags)
.arg("-cg")
.arg("RlsGroup")
.arg("-dr")
.arg("Rls")
.arg("-var")
.arg("var.RlsDir")
.arg("-out")
.arg(exe.join("RlsGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
}
if rust_analyzer_installer.is_some() {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-analyzer")
.args(&heat_flags)
.arg("-cg")
.arg("RustAnalyzerGroup")
.arg("-dr")
.arg("RustAnalyzer")
.arg("-var")
.arg("var.RustAnalyzerDir")
.arg("-out")
.arg(exe.join("RustAnalyzerGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
}
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("clippy")
.args(&heat_flags)
.arg("-cg")
.arg("ClippyGroup")
.arg("-dr")
.arg("Clippy")
.arg("-var")
.arg("var.ClippyDir")
.arg("-out")
.arg(exe.join("ClippyGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
if miri_installer.is_some() {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("miri")
.args(&heat_flags)
.arg("-cg")
.arg("MiriGroup")
.arg("-dr")
.arg("Miri")
.arg("-var")
.arg("var.MiriDir")
.arg("-out")
.arg(exe.join("MiriGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
}
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-analysis")
.args(&heat_flags)
.arg("-cg")
.arg("AnalysisGroup")
.arg("-dr")
.arg("Analysis")
.arg("-var")
.arg("var.AnalysisDir")
.arg("-out")
.arg(exe.join("AnalysisGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
if target.contains("windows-gnu") {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-mingw")
.args(&heat_flags)
.arg("-cg")
.arg("GccGroup")
.arg("-dr")
.arg("Gcc")
.arg("-var")
.arg("var.GccDir")
.arg("-out")
.arg(exe.join("GccGroup.wxs")),
);
}
let candle = |input: &Path| {
let output = exe.join(input.file_stem().unwrap()).with_extension("wixobj");
let arch = if target.contains("x86_64") { "x64" } else { "x86" };
let mut cmd = Command::new(&candle);
cmd.current_dir(&exe)
.arg("-nologo")
.arg("-dRustcDir=rustc")
.arg("-dDocsDir=rust-docs")
.arg("-dCargoDir=cargo")
.arg("-dStdDir=rust-std")
.arg("-dAnalysisDir=rust-analysis")
.arg("-dClippyDir=clippy")
.arg("-arch")
.arg(&arch)
.arg("-out")
.arg(&output)
.arg(&input);
add_env(builder, &mut cmd, target);
if rls_installer.is_some() {
cmd.arg("-dRlsDir=rls");
}
if rust_analyzer_installer.is_some() {
cmd.arg("-dRustAnalyzerDir=rust-analyzer");
}
if miri_installer.is_some() {
cmd.arg("-dMiriDir=miri");
}
if target.contains("windows-gnu") {
cmd.arg("-dGccDir=rust-mingw");
}
builder.run(&mut cmd);
};
candle(&xform(&etc.join("msi/rust.wxs")));
candle(&etc.join("msi/ui.wxs"));
candle(&etc.join("msi/rustwelcomedlg.wxs"));
candle("RustcGroup.wxs".as_ref());
candle("DocsGroup.wxs".as_ref());
candle("CargoGroup.wxs".as_ref());
candle("StdGroup.wxs".as_ref());
candle("ClippyGroup.wxs".as_ref());
if rls_installer.is_some() {
candle("RlsGroup.wxs".as_ref());
}
if rust_analyzer_installer.is_some() {
candle("RustAnalyzerGroup.wxs".as_ref());
}
if miri_installer.is_some() {
candle("MiriGroup.wxs".as_ref());
}
candle("AnalysisGroup.wxs".as_ref());
if target.contains("windows-gnu") {
candle("GccGroup.wxs".as_ref());
}
builder.create(&exe.join("LICENSE.rtf"), &rtf);
builder.install(&etc.join("gfx/banner.bmp"), &exe, 0o644);
builder.install(&etc.join("gfx/dialogbg.bmp"), &exe, 0o644);
builder.info(&format!("building `msi` installer with {:?}", light));
let filename = format!("{}-{}.msi", pkgname(builder, "rust"), target.triple);
let mut cmd = Command::new(&light);
cmd.arg("-nologo")
.arg("-ext")
.arg("WixUIExtension")
.arg("-ext")
.arg("WixUtilExtension")
.arg("-out")
.arg(exe.join(&filename))
.arg("rust.wixobj")
.arg("ui.wixobj")
.arg("rustwelcomedlg.wixobj")
.arg("RustcGroup.wixobj")
.arg("DocsGroup.wixobj")
.arg("CargoGroup.wixobj")
.arg("StdGroup.wixobj")
.arg("AnalysisGroup.wixobj")
.arg("ClippyGroup.wixobj")
.current_dir(&exe);
if rls_installer.is_some() {
cmd.arg("RlsGroup.wixobj");
}
if rust_analyzer_installer.is_some() {
cmd.arg("RustAnalyzerGroup.wixobj");
}
if miri_installer.is_some() {
cmd.arg("MiriGroup.wixobj");
}
if target.contains("windows-gnu") {
cmd.arg("GccGroup.wixobj");
}
// ICE57 wrongly complains about the shortcuts
cmd.arg("-sice:ICE57");
let _time = timeit(builder);
builder.run(&mut cmd);
if !builder.config.dry_run {
t!(fs::rename(exe.join(&filename), distdir(builder).join(&filename)));
}
}
}
}
fn add_env(builder: &Builder<'_>, cmd: &mut Command, target: TargetSelection) {
let mut parts = channel::CFG_RELEASE_NUM.split('.');
cmd.env("CFG_RELEASE_INFO", builder.rust_version())
.env("CFG_RELEASE_NUM", channel::CFG_RELEASE_NUM)
.env("CFG_RELEASE", builder.rust_release())
.env("CFG_VER_MAJOR", parts.next().unwrap())
.env("CFG_VER_MINOR", parts.next().unwrap())
.env("CFG_VER_PATCH", parts.next().unwrap())
.env("CFG_VER_BUILD", "0") // just needed to build
.env("CFG_PACKAGE_VERS", builder.rust_package_vers())
.env("CFG_PACKAGE_NAME", pkgname(builder, "rust"))
.env("CFG_BUILD", target.triple)
.env("CFG_CHANNEL", &builder.config.channel);
if target.contains("windows-gnu") {
cmd.env("CFG_MINGW", "1").env("CFG_ABI", "GNU");
} else {
cmd.env("CFG_MINGW", "0").env("CFG_ABI", "MSVC");
}
if target.contains("x86_64") {
cmd.env("CFG_PLATFORM", "x64");
} else {
cmd.env("CFG_PLATFORM", "x86");
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct HashSign;
impl Step for HashSign {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("hash-and-sign")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(HashSign);
}
fn run(self, builder: &Builder<'_>) {
// This gets called by `promote-release`
// (https://github.com/rust-lang/rust-central-station/tree/master/promote-release).
let mut cmd = builder.tool_cmd(Tool::BuildManifest);
if builder.config.dry_run {
return;
}
let sign = builder.config.dist_sign_folder.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.sign-folder` in `config.toml`\n\n")
});
let addr = builder.config.dist_upload_addr.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.upload-addr` in `config.toml`\n\n")
});
let pass = if env::var("BUILD_MANIFEST_DISABLE_SIGNING").is_err() {
let file = builder.config.dist_gpg_password_file.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.gpg-password-file` in `config.toml`\n\n")
});
t!(fs::read_to_string(&file))
} else {
String::new()
};
let today = output(Command::new("date").arg("+%Y-%m-%d"));
cmd.arg(sign);
cmd.arg(distdir(builder));
cmd.arg(today.trim());
cmd.arg(builder.rust_package_vers());
cmd.arg(addr);
cmd.arg(builder.package_vers(&builder.release_num("cargo")));
cmd.arg(builder.package_vers(&builder.release_num("rls")));
cmd.arg(builder.package_vers(&builder.release_num("rust-analyzer/crates/rust-analyzer")));
cmd.arg(builder.package_vers(&builder.release_num("clippy")));
cmd.arg(builder.package_vers(&builder.release_num("miri")));
cmd.arg(builder.package_vers(&builder.release_num("rustfmt")));
cmd.arg(builder.llvm_tools_package_vers());
builder.create_dir(&distdir(builder));
let mut child = t!(cmd.stdin(Stdio::piped()).spawn());
t!(child.stdin.take().unwrap().write_all(pass.as_bytes()));
let status = t!(child.wait());
assert!(status.success());
}
}
/// Maybe add libLLVM.so to the given destination lib-dir. It will only have
/// been built if LLVM tools are linked dynamically.
///
/// Note: This function does not yet support Windows, but we also don't support
/// linking LLVM tools dynamically on Windows yet.
fn maybe_install_llvm(builder: &Builder<'_>, target: TargetSelection, dst_libdir: &Path) {
let src_libdir = builder.llvm_out(target).join("lib");
if target.contains("apple-darwin") {
let llvm_dylib_path = src_libdir.join("libLLVM.dylib");
if llvm_dylib_path.exists() {
builder.install(&llvm_dylib_path, dst_libdir, 0o644);
}
return;
}
// Usually libLLVM.so is a symlink to something like libLLVM-6.0.so.
// Since tools link to the latter rather than the former, we have to
// follow the symlink to find out what to distribute.
let llvm_dylib_path = src_libdir.join("libLLVM.so");
if llvm_dylib_path.exists() {
let llvm_dylib_path = llvm_dylib_path.canonicalize().unwrap_or_else(|e| {
panic!("dist: Error calling canonicalize path `{}`: {}", llvm_dylib_path.display(), e);
});
builder.install(&llvm_dylib_path, dst_libdir, 0o644);
}
}
/// Maybe add libLLVM.so to the target lib-dir for linking.
pub fn maybe_install_llvm_target(builder: &Builder<'_>, target: TargetSelection, sysroot: &Path) {
let dst_libdir = sysroot.join("lib/rustlib").join(&*target.triple).join("lib");
maybe_install_llvm(builder, target, &dst_libdir);
}
/// Maybe add libLLVM.so to the runtime lib-dir for rustc itself.
pub fn maybe_install_llvm_runtime(builder: &Builder<'_>, target: TargetSelection, sysroot: &Path) {
let dst_libdir =
sysroot.join(builder.sysroot_libdir_relative(Compiler { stage: 1, host: target }));
maybe_install_llvm(builder, target, &dst_libdir);
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct LlvmTools {
pub target: TargetSelection,
}
impl Step for LlvmTools {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("llvm-tools")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(LlvmTools { target: run.target });
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let target = self.target;
assert!(builder.config.extended);
/* run only if llvm-config isn't used */
if let Some(config) = builder.config.target_config.get(&target) {
if let Some(ref _s) = config.llvm_config {
builder.info(&format!("Skipping LlvmTools ({}): external LLVM", target));
return None;
}
}
builder.info(&format!("Dist LlvmTools ({})", target));
let _time = timeit(builder);
let src = builder.src.join("src/llvm-project/llvm");
let name = pkgname(builder, "llvm-tools");
let tmp = tmpdir(builder);
let image = tmp.join("llvm-tools-image");
drop(fs::remove_dir_all(&image));
// Prepare the image directory
let src_bindir = builder.llvm_out(target).join("bin");
let dst_bindir = image.join("lib/rustlib").join(&*target.triple).join("bin");
t!(fs::create_dir_all(&dst_bindir));
for tool in LLVM_TOOLS {
let exe = src_bindir.join(exe(tool, target));
builder.install(&exe, &dst_bindir, 0o755);
}
// Copy libLLVM.so to the target lib dir as well, so the RPATH like
// `$ORIGIN/../lib` can find it. It may also be used as a dependency
// of `rustc-dev` to support the inherited `-lLLVM` when using the
// compiler libraries.
maybe_install_llvm_target(builder, target, &image);
// Prepare the overlay
let overlay = tmp.join("llvm-tools-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.txt"), &overlay, 0o644);
builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644);
builder.create(&overlay.join("version"), &builder.llvm_tools_vers());
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=llvm-tools-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=llvm-tools-preview");
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
// Tarball intended for internal consumption to ease rustc/std development.
//
// Should not be considered stable by end users.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct RustDev {
pub target: TargetSelection,
}
impl Step for RustDev {
type Output = Option<PathBuf>;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rust-dev")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustDev { target: run.target });
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let target = self.target;
builder.info(&format!("Dist RustDev ({})", target));
let _time = timeit(builder);
let src = builder.src.join("src/llvm-project/llvm");
let name = pkgname(builder, "rust-dev");
let tmp = tmpdir(builder);
let image = tmp.join("rust-dev-image");
drop(fs::remove_dir_all(&image));
// Prepare the image directory
let dst_bindir = image.join("bin");
t!(fs::create_dir_all(&dst_bindir));
let exe = builder.llvm_out(target).join("bin").join(exe("llvm-config", target));
builder.install(&exe, &dst_bindir, 0o755);
builder.install(&builder.llvm_filecheck(target), &dst_bindir, 0o755);
// Copy the include directory as well; needed mostly to build
// librustc_llvm properly (e.g., llvm-config.h is in here). But also
// just broadly useful to be able to link against the bundled LLVM.
builder.cp_r(&builder.llvm_out(target).join("include"), &image.join("include"));
// Copy libLLVM.so to the target lib dir as well, so the RPATH like
// `$ORIGIN/../lib` can find it. It may also be used as a dependency
// of `rustc-dev` to support the inherited `-lLLVM` when using the
// compiler libraries.
maybe_install_llvm(builder, target, &image.join("lib"));
// Prepare the overlay
let overlay = tmp.join("rust-dev-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.txt"), &overlay, 0o644);
builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644);
builder.create(&overlay.join("version"), &builder.rust_version());
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=rust-dev-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rust-dev");
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
| {
run.builder
.ensure(Rustc { compiler: run.builder.compiler(run.builder.top_stage, run.target) });
} |
authenticateUser.ts | import { NextFunction, Request, Response } from "express";
import jwt from "jsonwebtoken";
import { promisify } from "util";
import { asyncMiddleware } from "./asyncMiddleware";
import { ErrorResponse } from "./globalError";
import { User } from "../models/User";
export const authenticateUser = asyncMiddleware(
async (req: Request, _: Response, next: NextFunction) => {
// 1) check authToken is provided in header
const token = req.headers.authorization
if (!token) {
return next(
new ErrorResponse("you're not logged in, please login !", 401)
);
}
// 2) varify token
const decoded = await promisify(jwt.verify)(
token,
// @ts-ignore
process.env.SECRET || "jwt-secret"
);
// 3) Check user still exists
// @ts-ignore
const currentUser = await User.findById(decoded.id);
if (!currentUser) {
return next(
new ErrorResponse("token expired or user doesn't exists", 401)
);
}
// 4) check password is changes after signin
// @ts-ignore
if (currentUser.isPasswordChangedAfterSignIn(decoded.iat)) {
return next( | )
);
}
req.user = currentUser;
next();
}
); | new ErrorResponse(
"you've changed password !, Enter latest password",
400 |
eng.js | export default {
'ADD_PREFIX': 'Add',
'ADD_PREFIX_2': 'Add another',
'ADD_SUFFIX': '',
'ADD_TO_PREFIX': 'Add to',
'ADD_TO_SUFFIX': '',
'less 10MB': '< 10MB',
'more 1GB': '> 1GB',
'SEARCH_PLACEHOLDER': 'Search',
'PHAIDRA_IS': 'Phaidra is the repository for the permanent secure storage of digital assets at the University of Vienna.',
'rdam:P30004': 'Alternative identifier', | 'bf:ParallelTitle': 'Parallel title',
'bf:Note': 'Description',
'bf:Summary': 'Abstract',
'phaidra:Remark': 'Remark',
'arm:ConditionAssessment': 'Condition',
'phaidra:ReproductionNote': 'Reproduction note',
'phaidra:DigitizationNote': 'Digitization note',
'bf:tableOfContents': 'Table of contents',
'dce:subject': 'Keywords',
'dcterms:subject': 'Subject',
'dcterms:language': 'Language',
'schema:subtitleLanguage': 'Subtitle language',
'dcterms:type': 'Resource type',
'edm:hasType': 'Type / Genre',
'bf:physicalLocation': 'Physical location',
'bf:shelfMark': 'Call number',
'edm:rights': 'License',
'dce:rights': 'Rights statement',
'oaire:version': 'Version type',
'dcterms:accessRights': 'Access right',
'dcterms:date': 'Date',
'dcterms:created': 'Date created',
'dcterms:modified': 'Date modified',
'dcterms:available': 'Date available',
'dcterms:issued': 'Date issued',
'dcterms:valid': 'Date valid',
'dcterms:dateAccepted': 'Date accepted',
'dcterms:dateCopyrighted': 'Date copyrighted',
'dcterms:dateSubmitted': 'Date submitted',
'rdau:P60071': 'Date of production',
'phaidra:dateAccessioned': 'Date accessioned',
'schema:pageStart': 'From page',
'schema:pageEnd': 'To page',
'rdau:P60193': 'Is in series',
'rdau:P60101': 'Is contained in',
'rdau:P60101_rdau:P60193': 'Series',
'rdau:P60101_bf:provisionActivity': 'Publisher',
'bf:instanceOf': 'Instance of',
'rdau:P60227': 'Is motion picture adaptation of',
'bf:provisionActivity': 'Provision activity: Publication',
'cito:cites': 'Cites',
'cito:isCitedBy': 'Is cited by',
'rdfs:seeAlso': 'See also',
'dcterms:spatial': 'Depicted / Represented place',
'vra:placeOfCreation': 'Place of creation',
'vra:placeOfSite': 'Place of site',
'ebucore:hasRelatedEvent': 'Event',
'frapo:isOutputOf': 'Project',
'frapo:hasFundingAgency': 'Funder',
'rdax:P00009': 'Association',
'dcterms:provenance': 'Provenance',
'schema:numberOfPages': 'Number of pages',
'bf:soundCharacteristic': 'Sound characteristic',
'bf:supplementaryContent': 'Supplementary content',
'bf:awards': 'Award',
'dcterms:audience': 'Audience',
'rdau:P60059': 'Regional encoding',
'ebucore:filename': 'Filename',
'ebucore:hasMimeType': 'MIME Type',
'opaque:cco_accessionNumber': 'Accession number',
'schema:width': 'Width',
'schema:height': 'Height',
'schema:depth': 'Depth',
'schema:weight': 'Weight',
'vra:diameter': 'Diameter',
'schema:duration': 'Duration',
'bf:scale': 'Scale',
'schema:genre': 'Genre',
'dcterms:temporal': 'Temporal coverage',
'vra:hasTechnique': 'Technique',
'dce:format': 'Format',
'rdau:P60048': 'Carrier type',
'vra:material': 'Material',
'vra:hasInscription': 'Inscription',
'phaidra:systemTag': 'System tag',
'MEMBERS_DELETE_ALERT_CONTAINER': 'This object is a Container with {nrmembers} members. Only an empty Container can be deleted. If you want to delete this object, you need to remove all members from this Container or delete them first.',
'DELETE_OBJECT': 'Here you can delete object {pid}.',
'DELETE_OBJECT_CONFIRM': 'Are you sure you want to permanently delete object {pid}?',
'PUBLISHER_VERLAG': 'Publisher',
'SELECTION_LIMIT': 'Cannot select more than {limit} results.',
'OTHER_FEMININE': 'Other',
'SUBJECT_SECTION': 'Subject',
uwm_etheses: 'E-Theses',
uwm_etheses_note_2: 'Note 2',
uwm_etheses_note_1: 'Note 1',
uwm_etheses_show_abstract: 'Show abstract',
uwm_etheses_pdf_identical: 'PDF identical',
uwm_etheses_title_page_identical: 'Title page identical',
uwm_etheses_title_page: 'Title page',
uwm_etheses_consent_form: 'Consent form',
uwm_digitalbook: 'Digital Book',
uwm_digitalbook_alephurl: 'Aleph-URL',
uwm_digitalbook_medium: 'Medium',
uwm_digitalbook_release_notes: 'Publication Dates',
uwm_digitalbook_edition: 'Edition/ Print Run',
uwm_digitalbook_releaseyear: 'Publication Date',
uwm_digitalbook_publisher: 'Publisher',
uwm_digitalbook_publisherlocation: 'Publishing Address',
uwm_digitalbook_name_collection: 'Name of Collection/Monograph',
uwm_digitalbook_to_page: 'To Page',
uwm_digitalbook_from_page: 'From Page',
uwm_digitalbook_booklet: 'Number',
uwm_digitalbook_volume: 'Volume',
uwm_digitalbook_reihentitel: 'Series Title',
uwm_digitalbook_pagination: 'Pages or Volume',
uwm_digitalbook_name_magazine: 'Name of Publication',
uwm_provenience: 'Provenience',
uwm_provenience_contribute: 'Details about the source',
uwm_provenience_contribute_location: 'Location',
uwm_provenience_contribute_chronological: 'Time Coverage',
uwm_provenience_contribute_date_to: 'Date until',
uwm_provenience_contribute_date_from: 'Date from',
uwm_provenience_contribute_entity: 'Personal or Institutional Data',
uwm_provenience_contribute_entity_institution: 'Institution',
uwm_provenience_contribute_entity_type: 'Type',
uwm_provenience_contribute_entity_title2: 'Title',
uwm_provenience_contribute_entity_title1: 'Title',
uwm_provenience_contribute_entity_lastname: 'Lastname',
uwm_provenience_contribute_entity_firstname: 'Firstname',
uwm_provenience_contribute_role: 'Role',
uwm_provenience_contribute_comment: 'Details about the Source',
uwm_provenience_contribute_resource: 'Digital or other source',
uwm_histkult: 'Contextual Allegation',
uwm_histkult_gps: 'Geograph. Coordinates',
uwm_histkult_note: 'Note',
uwm_histkult_stamp: 'Stamp',
uwm_histkult_reference_number: 'Reference Number',
uwm_histkult_reference_number_number: 'Number',
uwm_histkult_reference_number_reference: 'Reference',
uwm_histkult_dimensions: 'Dimensions',
uwm_histkult_dimensions_diameter: 'Circumference',
uwm_histkult_dimensions_height: 'Height',
uwm_histkult_dimensions_width: 'Width',
uwm_histkult_dimensions_length: 'Length',
uwm_histkult_dimensions_dimension_unit: 'Measuring Unit',
uwm_histkult_dimensions_resource: 'Source',
uwm_histkult_inscription: 'Inscription',
uwm_organization: 'Association',
uwm_organization_further_allocation: 'additional allocation',
uwm_organization_curriculum: 'Study',
uwm_organization_curriculum_kennzahl: 'Study ID',
uwm_organization_curriculum_spl: 'Study Program Direction',
uwm_organization_orgassignment: 'Organization Association',
uwm_organization_orgassignment_department: 'Institute',
uwm_organization_orgassignment_faculty: 'Faculty',
uwm_organization_approbation_period: 'Date of approbation period',
uwm_organization_hoschtyp: 'Type of publication',
uwm_classification: 'Classification',
uwm_classification_keyword: 'Keywords',
uwm_classification_description: 'Description or Additional Data',
uwm_classification_taxonpath: 'Classifications (Classes, Subclasses)',
uwm_classification_taxonpath_taxon: 'Path',
uwm_classification_taxonpath_source: 'Source',
uwm_classification_purpose: 'Purpose',
uwm_annotation: 'Comments',
uwm_annotation_annotations: 'Comments',
uwm_annotation_annotations_description: 'Description or Additional Data',
uwm_annotation_annotations_date: 'Date',
uwm_annotation_annotations_entity: 'Entity / Personal data',
uwm_annotation_annotations_entity_type: 'Type',
uwm_annotation_annotations_entity_title2: 'Title',
uwm_annotation_annotations_entity_title1: 'Title',
uwm_annotation_annotations_entity_institution: 'Institution',
uwm_annotation_annotations_entity_lastname: 'Lastname',
uwm_annotation_annotations_entity_firstname: 'Firstname',
uwm_rights: 'Rights & Licences',
uwm_rights_infoeurepoembargo: 'OpenAIRE Embargo End',
uwm_rights_infoeurepoaccess: 'OpenAIRE Access Rights',
uwm_rights_description: 'Description or Additional Data',
uwm_rights_license: 'Licence Selected',
uwm_rights_copyright: 'Copyright and Other Restrictions',
uwm_rights_cost: 'Costs',
uwm_educational: 'Educational',
uwm_educational_educationals: 'Educational',
uwm_educational_educationals_language: 'Language',
uwm_educational_educationals_description: 'Description',
uwm_educational_educationals_learningtime: 'Typical Learning Time',
uwm_educational_educationals_difficulty: 'Difficulty Level',
uwm_educational_educationals_agerange: 'Typical Age Range',
uwm_educational_educationals_context: 'Didactic Context of Use',
uwm_educational_educationals_enduserrole: 'Intended End User Role',
uwm_educational_educationals_interactivitylevel: 'Interactivity Level',
uwm_educational_educationals_interactivitytype: 'Interactivity Type',
uwm_educational_educationals_learningresourcetype: 'Type of Teaching and Educational Resource',
uwm_technical: 'Technical Data',
uwm_technical_duration: 'Duration',
uwm_technical_otherrequirements: 'Requirements for the Use of the Object',
uwm_technical_installremarks: 'Installation Guide',
uwm_technical_requirement: 'Requirements',
uwm_technical_requirement_orcomposite: 'Or-Composite',
uwm_technical_requirement_orcomposite_maxversion: 'Maximum version',
uwm_technical_requirement_orcomposite_minversion: 'Minimum version',
uwm_technical_requirement_orcomposite_name: 'Name',
uwm_technical_requirement_orcomposite_type: 'Type',
uwm_technical_location: 'Permanent Link',
uwm_technical_size: 'Size',
uwm_technical_format: 'Format',
uwm_lifecycle: 'Lifecycle',
uwm_lifecycle_metadataqualitycheck: 'Metadata quality check',
uwm_lifecycle_infoeurepoversion: 'OpenAIRE Version Type',
uwm_lifecycle_contribute: 'Contribute',
uwm_lifecycle_contribute_date: 'Date',
uwm_lifecycle_contribute_entity: 'Entity / Personal data',
uwm_lifecycle_contribute_entity_isni: 'ISNI',
uwm_lifecycle_contribute_entity_lcnaf: 'LCNAF',
uwm_lifecycle_contribute_entity_gnd: 'GND',
uwm_lifecycle_contribute_entity_wdq: 'WDQ',
uwm_lifecycle_contribute_entity_viaf: 'VIAF',
uwm_lifecycle_contribute_entity_orcid: 'ORCID',
uwm_lifecycle_contribute_entity_student_id: 'Student ID',
uwm_lifecycle_contribute_entity_type: 'Type',
uwm_lifecycle_contribute_entity_title2: 'Title',
uwm_lifecycle_contribute_entity_title1: 'Title',
uwm_lifecycle_contribute_entity_institution: 'Institution',
uwm_lifecycle_contribute_entity_lastname: 'Lastname',
uwm_lifecycle_contribute_entity_firstname: 'Firstname',
uwm_lifecycle_contribute_ext_role: 'Different role',
uwm_lifecycle_contribute_role: 'Role',
uwm_lifecycle_peer_reviewed: 'Peer Reviewed',
uwm_lifecycle_status: 'Status',
uwm_lifecycle_version: 'Version',
uwm_lifecycle_upload_date: 'Phaidra Upload Date',
uwm_general: 'General',
uwm_general_identifiers: 'Identifiers',
uwm_general_identifiers_identifier: 'Identifier',
uwm_general_identifiers_resource: 'Resource',
uwm_general_irdata: 'Institutional Repository',
uwm_general_coverage: 'Coverage',
uwm_general_keyword: 'Keywords',
uwm_general_description: 'Description',
uwm_general_language: 'Language',
uwm_general_alt_title: 'Alternative Title',
uwm_general_subtitle: 'Subtitle',
uwm_general_title: 'Title',
uwm_general_identifier: 'Identifier',
lang_aa: 'Afar',
lang_ab: 'Abkhazian',
lang_ae: 'Avestan',
lang_af: 'Afrikaans',
lang_ak: 'Akan',
lang_am: 'Amharic',
lang_an: 'Aragonese',
lang_ar: 'Arabic',
lang_as: 'Assamese',
lang_av: 'Avaric',
lang_ay: 'Aymara',
lang_az: 'Azerbaijani',
lang_ba: 'Bashkir',
lang_be: 'Byelorussian',
lang_bg: 'Bulgarian',
lang_bh: 'Bihari',
lang_bi: 'Bislama',
lang_bm: 'Bambara',
lang_bn: 'Bengali',
lang_bo: 'Tibetan',
lang_br: 'Breton',
lang_bs: 'Bosnian',
lang_ca: 'Catalan',
lang_ce: 'Chechen',
lang_ch: 'Chamorro',
lang_co: 'Corsican',
lang_cr: 'Cree',
lang_cs: 'Czech',
lang_cu: 'Church Slavic; Old Slavonic;',
lang_cv: 'Chuvash',
lang_cy: 'Welch',
lang_da: 'Danish',
lang_de: 'German',
lang_dv: 'Divehi; Dhivehi; Maldivian',
lang_dz: 'Bhutani',
lang_ee: 'Ewe',
lang_el: 'Greek',
lang_en: 'English',
lang_eo: 'Esperanto',
lang_es: 'Spanish',
lang_et: 'Estonian',
lang_eu: 'Basque',
lang_fa: 'Persian',
lang_ff: 'Fulah',
lang_fi: 'Finnish',
lang_fj: 'Fiji',
lang_fo: 'Faeroese',
lang_fr: 'French',
lang_fy: 'Frisian',
lang_ga: 'Irish',
lang_gd: 'Scots Gaelic',
lang_gl: 'Galician',
lang_gn: 'Guarani',
lang_gu: 'Gujarati',
lang_gv: 'Manx',
lang_ha: 'Hausa',
lang_he: 'Hebrew',
lang_hi: 'Hindi',
lang_hr: 'Croatian',
lang_ht: 'Haitian',
lang_hu: 'Hungarian',
lang_hy: 'Armenian ',
lang_hz: 'Herero',
lang_ia: 'Interlingua',
lang_id: 'Indonesian',
lang_ie: 'Interlingue',
lang_ig: 'Igbo',
lang_ii: 'Sichuan Yi; Nuosu',
lang_ik: 'Inupiak',
lang_in: 'former Indonesian',
lang_io: 'Ido',
lang_is: 'Icelandic',
lang_it: 'Italian',
lang_iu: 'Inuktitut (Eskimo)',
lang_iw: 'former Hebrew',
lang_ja: 'Japanese',
lang_ji: 'former Yiddish',
lang_jv: 'Javanese',
lang_jw: 'Javanese',
lang_ka: 'Georgian',
lang_kg: 'Kongo',
lang_ki: 'Kikuyu; Gikuyu',
lang_kj: 'Kuanyama; Kwanyama',
lang_kk: 'Kazakh',
lang_kl: 'Greenlandic',
lang_km: 'Cambodian',
lang_kn: 'Kannada',
lang_ko: 'Korean',
lang_kr: 'Kanuri',
lang_ks: 'Kashmiri',
lang_ku: 'Kurdish',
lang_kv: 'Komi',
lang_kw: 'Cornish',
lang_ky: 'Kirghiz',
lang_la: 'Latin',
lang_lb: 'Luxembourgish; Letzeburgesch',
lang_lg: 'Ganda',
lang_li: 'Limburgan; Limburger; Limburgish',
lang_ln: 'Lingala',
lang_lo: 'Laothian',
lang_lt: 'Lithuanian',
lang_lu: 'Luba-Katanga',
lang_lv: 'Latvian, Lettish',
lang_mg: 'Malagasy',
lang_mh: 'Marshallese',
lang_mi: 'Maori',
lang_mk: 'Macedonian',
lang_ml: 'Malayalam',
lang_mn: 'Mongolian',
lang_mo: 'Moldavian',
lang_mr: 'Marathi',
lang_ms: 'Malay',
lang_mt: 'Maltese',
lang_my: 'Burmese',
lang_na: 'Nauru',
lang_nb: 'Bokmรยฅl, Norwegian; Norwegian Bokmรยฅl',
lang_nd: 'Ndebele, North; North Ndebele',
lang_ne: 'Nepali',
lang_ng: 'Ndonga',
lang_nl: 'Dutch',
lang_nn: 'Norwegian Nynorsk; Nynorsk, Norwegian',
lang_no: 'Norwegian',
lang_nr: 'Ndebele, South; South Ndebele',
lang_nv: 'Navajo; Navaho',
lang_ny: 'Chichewa; Chewa; Nyanja',
lang_oc: 'Occitan',
lang_oj: 'Ojibwa',
lang_om: '(Afan) Oromo',
lang_or: 'Oriya',
lang_os: 'Ossetian; Ossetic',
lang_pa: 'Punjabi',
lang_pi: 'Pali',
lang_pl: 'Polish',
lang_ps: 'Pashto, Pushto',
lang_pt: 'Portuguese',
lang_qu: 'Quechua',
lang_rm: 'Rhaeto-Romance',
lang_rn: 'Kirundi',
lang_ro: 'Romanian',
lang_ru: 'Russian',
lang_rw: 'Kinyarwanda',
lang_sa: 'Sanskrit',
lang_sc: 'Sardinian',
lang_sd: 'Sindhi',
lang_se: 'Northern Sami',
lang_sg: 'Sangro',
lang_sh: 'Serbo-Croatian',
lang_si: 'Singhalese',
lang_sk: 'Slovak',
lang_sl: 'Slovenian',
lang_sm: 'Samoan',
lang_sn: 'Shona',
lang_so: 'Somali',
lang_sq: 'Albanian',
lang_sr: 'Serbian',
lang_ss: 'Siswati',
lang_st: 'Sesotho',
lang_su: 'Sudanese',
lang_sv: 'Swedish',
lang_sw: 'Swahili',
lang_ta: 'Tamil',
lang_te: 'Tegulu',
lang_tg: 'Tajik',
lang_th: 'Thai',
lang_ti: 'Tigrinya',
lang_tk: 'Turkmen',
lang_tl: 'Tagalog',
lang_tn: 'Setswana',
lang_to: 'Tonga',
lang_tr: 'Turkish',
lang_ts: 'Tsonga',
lang_tt: 'Tatar',
lang_tw: 'Twi',
lang_ty: 'Tahitian',
lang_ug: 'Uigur',
lang_uk: 'Ukrainian',
lang_ur: 'Urdu',
lang_uz: 'Uzbek',
lang_vi: 'Vietnamese',
lang_vo: 'Volapuk',
lang_wa: 'Walloon',
lang_wo: 'Wolof',
lang_xh: 'Xhosa',
lang_xx: 'Not applicable',
lang_yi: 'Yiddish',
lang_yo: 'Yoruba',
lang_za: 'Zhuang',
lang_zh: 'Chinese',
lang_zu: 'Zulu'
} |
'bf:Title': 'Title', |
dataframe.rs | use super::ColumnType;
use super::DataFrame;
use super::Series;
use crate::series::floats::SeriesF64;
use crate::series::integers::SeriesI32;
use crate::series::strings::SeriesSTR;
use std::collections::HashMap;
use wasm_bindgen::prelude::*;
impl DataFrame {
pub fn new_rs(
index: Vec<String>,
data: HashMap<String, Series>,
num_rows: usize,
num_cols: usize,
) -> DataFrame {
DataFrame {
index,
data,
num_rows,
num_cols,
}
}
}
#[wasm_bindgen]
impl DataFrame {
#[wasm_bindgen(constructor)]
pub fn new(vec_series: Vec<JsValue>) -> DataFrame {
//Get first Series data size
let mut num_rows = 0;
let first_series_int: Result<SeriesI32, serde_wasm_bindgen::Error> =
serde_wasm_bindgen::from_value(vec_series[0].clone());
if let Ok(series_int) = first_series_int {
num_rows = series_int.len()
}
let first_series_float: Result<SeriesF64, serde_wasm_bindgen::Error> =
serde_wasm_bindgen::from_value(vec_series[0].clone());
if let Ok(series_float) = first_series_float {
num_rows = series_float.len()
}
let first_series_str: Result<SeriesSTR, serde_wasm_bindgen::Error> =
serde_wasm_bindgen::from_value(vec_series[0].clone());
if let Ok(series_str) = first_series_str {
num_rows = series_str.len()
}
let mut series_data: HashMap<String, Series> = HashMap::new();
let mut index: Vec<String> = Vec::new();
let mut num_cols: usize = 0;
for ser in &vec_series {
let as_int: Result<SeriesI32, serde_wasm_bindgen::Error> =
serde_wasm_bindgen::from_value(ser.clone());
if let Ok(x) = as_int {
let col_name = x.name();
if x.len() == num_rows {
series_data
.entry(col_name.clone())
.or_insert(Series::Integers(x));
index.push(col_name);
num_cols += 1;
continue;
} else {
panic!("Series length does not match");
}
}
let as_float: Result<SeriesF64, serde_wasm_bindgen::Error> =
serde_wasm_bindgen::from_value(ser.clone());
if let Ok(x) = as_float {
let col_name = x.name();
if x.len() == num_rows {
series_data
.entry(col_name.clone())
.or_insert(Series::Floats(x));
index.push(col_name);
num_cols += 1;
continue;
} else {
panic!("Series length does not match");
}
}
let as_str: Result<SeriesSTR, serde_wasm_bindgen::Error> =
serde_wasm_bindgen::from_value(ser.clone());
if let Ok(x) = as_str {
let col_name = x.name();
if x.len() == num_rows {
series_data
.entry(col_name.clone())
.or_insert(Series::Strings(x));
index.push(col_name);
num_cols += 1;
continue;
} else {
panic!("Series length does not match");
}
}
}
DataFrame {
data: series_data,
index,
num_rows,
num_cols,
}
}
#[wasm_bindgen(js_name = columns)]
pub fn show_columns(&self) -> JsValue {
let res: Vec<String> = self.index.clone();
serde_wasm_bindgen::to_value(&res).unwrap()
}
#[wasm_bindgen(js_name = dataTypes)]
pub fn get_datatypes(&self) -> JsValue {
let mut res: HashMap<String, ColumnType> = HashMap::new();
for (name, ser) in &self.data {
match ser {
Series::Floats(_value) => {
res.entry(name.clone()).or_insert(ColumnType::FLOAT);
}
Series::Integers(_value) => {
res.entry(name.clone()).or_insert(ColumnType::INTEGER);
}
Series::Strings(_value) => {
res.entry(name.clone()).or_insert(ColumnType::STR);
}
}
}
serde_wasm_bindgen::to_value(&res).unwrap()
}
#[wasm_bindgen(js_name = append)]
pub fn add_column(&mut self, datatype: ColumnType, series: JsValue) {
match datatype {
ColumnType::FLOAT => {
let ser: SeriesF64 = serde_wasm_bindgen::from_value(series).unwrap();
self.data.entry(ser.name()).or_insert(Series::Floats(ser));
}
ColumnType::INTEGER => {
let ser: SeriesI32 = serde_wasm_bindgen::from_value(series).unwrap();
self.data.entry(ser.name()).or_insert(Series::Integers(ser));
}
ColumnType::STR => {
let ser: SeriesSTR = serde_wasm_bindgen::from_value(series).unwrap();
self.data.entry(ser.name()).or_insert(Series::Strings(ser));
}
}
}
#[wasm_bindgen(js_name = rowsCount)]
pub fn num_rows(&self) -> usize {
self.num_rows
}
#[wasm_bindgen(js_name = columnsCount)]
pub fn num_cols(&self) -> usize {
self.num_cols
}
#[wasm_bindgen(js_name = size)]
pub fn dataframe_size(&self) -> usize |
pub fn loc(&self, column_name: String) -> String {
let res;
let map = &self.data;
let ser = &map[&column_name];
match ser {
Series::Integers(x) => {
res = x.show().to_string();
}
Series::Floats(x) => {
res = x.show().to_string();
}
Series::Strings(x) => {
res = x.show().to_string();
}
};
res
}
pub fn ilocr(&self, row: usize) -> js_sys::Array {
let array = js_sys::Array::new();
let map = &self.data;
self.index.iter().for_each(|f| {
let ser = &map[f];
match ser {
Series::Integers(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(row)).unwrap();
array.push(&val);
}
Series::Floats(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(row)).unwrap();
array.push(&val);
}
Series::Strings(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(row)).unwrap();
array.push(&val);
}
};
});
array
}
pub fn ilocc(&self, col: usize) -> JsValue {
let val: JsValue;
let map = &self.data;
let col_idx_name = &self.index[col];
let ser = &map[col_idx_name];
match ser {
Series::Integers(x) => {
val = x.data();
}
Series::Floats(x) => {
val = x.data();
}
Series::Strings(x) => {
val = x.data();
}
};
val
}
#[wasm_bindgen(getter,js_name = display)]
pub fn show(&self) -> String {
let map = &self.data;
let mut res: String = String::from("");
self.index.iter().for_each(|f| {
let ser = &map[f];
match &ser {
Series::Integers(value) => {
res.push_str(&value.show());
}
Series::Floats(value) => {
res.push_str(&value.show());
}
Series::Strings(value) => {
res.push_str(&value.show());
}
}
});
res
}
#[wasm_bindgen(js_name = head)]
pub fn head(&self, value: Option<usize>) -> js_sys::Array {
let df = js_sys::Array::new();
let map = &self.data;
for i in 0..value.unwrap_or(5) {
let array_row = js_sys::Array::new();
self.index.iter().for_each(|f| {
let ser = &map[f];
match ser {
Series::Integers(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
Series::Floats(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
Series::Strings(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
};
});
df.push(&array_row);
}
df
}
#[wasm_bindgen(js_name = tail)]
pub fn tail(&self, value: Option<usize>) -> js_sys::Array {
let n = self.num_rows();
let df = js_sys::Array::new();
let map = &self.data;
for i in (n - value.unwrap_or(5)..n).rev() {
let array_row = js_sys::Array::new();
self.index.iter().for_each(|f| {
let ser = &map[f];
match ser {
Series::Integers(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
Series::Floats(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
Series::Strings(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
};
});
df.push(&array_row);
}
df
}
#[wasm_bindgen(getter,js_name = displayTable)]
pub fn show_table(&self) -> js_sys::Array {
let n = self.num_rows();
let array_col = js_sys::Array::new();
let map = &self.data;
for i in 0..n {
let array_row = js_sys::Array::new();
self.index.iter().for_each(|f| {
let ser = &map[f];
match ser {
Series::Integers(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
Series::Floats(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
Series::Strings(x) => {
let val = serde_wasm_bindgen::to_value(&x.get(i)).unwrap();
array_row.push(&val);
}
};
});
array_col.push(&array_row);
}
array_col
}
}
#[wasm_bindgen(js_name = readcsv)]
pub async fn read_csv(data: web_sys::File) -> Result<DataFrame, JsValue> {
let jsdata = wasm_bindgen_futures::JsFuture::from(data.text())
.await
.unwrap_throw();
let data = jsdata.as_string().unwrap_throw();
let mut reader = csv::Reader::from_reader(data.as_bytes());
let headers: Vec<String> = reader
.headers()
.unwrap_throw()
.clone()
.into_iter()
.map(|x| x.to_string())
.collect();
let mut data_map: HashMap<String, Series> = HashMap::new();
let mut rtc_map: HashMap<usize, Vec<String>> = HashMap::new();
reader.records().for_each(|row| {
let row_res = row.unwrap_throw();
row_res.iter().enumerate().for_each(|(index, value)| {
rtc_map
.entry(index)
.or_insert(Vec::new())
.push(value.to_string());
});
// rtc_map.keys().for_each(|key| {
// let mut flag: bool = false;
for key in rtc_map.keys() {
let col_name = headers[*key].clone();
let as_int = rtc_map[key][0].parse::<i32>();
if let Ok(_x) = as_int {
let int_data: Vec<i32> = rtc_map[key]
.iter()
.map(|value| value.parse::<i32>().unwrap())
.collect();
data_map.insert(
headers[*key].clone(),
Series::Integers(SeriesI32::new(
serde_wasm_bindgen::to_value(&col_name).unwrap(),
serde_wasm_bindgen::to_value(&int_data).unwrap(),
)),
);
continue;
}
let as_float = rtc_map[key][0].parse::<f64>();
if let Ok(_x) = as_float {
let float_data: Vec<f64> = rtc_map[key]
.iter()
.map(|value| value.parse::<f64>().unwrap())
.collect();
data_map.insert(
headers[*key].clone(),
Series::Floats(SeriesF64::new(
serde_wasm_bindgen::to_value(&col_name).unwrap(),
serde_wasm_bindgen::to_value(&float_data).unwrap(),
)),
);
continue;
}
let as_string = rtc_map[key][0].parse::<String>();
if let Ok(_x) = as_string {
data_map.insert(
headers[*key].clone(),
Series::Strings(SeriesSTR::new(
serde_wasm_bindgen::to_value(&col_name).unwrap(),
serde_wasm_bindgen::to_value(&rtc_map[key]).unwrap(),
)),
);
continue;
}
}
});
let num_cols = data_map.keys().len();
let num_rows = rtc_map[&0].len();
Ok(DataFrame {
data: data_map,
index: headers,
num_rows,
num_cols,
})
}
| {
self.data.iter().count()
} |
App.tsx | import { FC } from 'react'
import GlobalStyle from './App.styles'
export const App: FC = ({ children }) => {
return (
<main>
{children}
<GlobalStyle />
</main>
) | }
export default App |
|
bitmex.py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitmex(Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'], # Seychelles
'version': 'v1',
'userAgent': None,
'rateLimit': 2000,
'pro': True,
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': None, # has but not fully implemented
'future': None, # has but not fully implemented
'option': None, # has but not fully implemented
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchIndexOHLCV': False,
'fetchLedger': True,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': 'emulated',
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': {
'public': 'https://testnet.bitmex.com',
'private': 'https://testnet.bitmex.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': {
'public': 'https://www.bitmex.com',
'private': 'https://www.bitmex.com',
},
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/upZpOX',
},
'api': {
'public': {
'get': [
'announcement',
'announcement/urgent',
'funding',
'instrument',
'instrument/active',
'instrument/activeAndIndices',
'instrument/activeIntervals',
'instrument/compositeIndex',
'instrument/indices',
'insurance',
'leaderboard',
'liquidation',
'orderBook',
'orderBook/L2',
'quote',
'quote/bucketed',
'schema',
'schema/websocketHelp',
'settlement',
'stats',
'stats/history',
'trade',
'trade/bucketed',
],
},
'private': {
'get': [
'apiKey',
'chat',
'chat/channels',
'chat/connected',
'execution',
'execution/tradeHistory',
'notification',
'order',
'position',
'user',
'user/affiliateStatus',
'user/checkReferralCode',
'user/commission',
'user/depositAddress',
'user/executionHistory',
'user/margin',
'user/minWithdrawalFee',
'user/wallet',
'user/walletHistory',
'user/walletSummary',
],
'post': [
'apiKey',
'apiKey/disable',
'apiKey/enable',
'chat',
'order',
'order/bulk',
'order/cancelAllAfter',
'order/closePosition',
'position/isolate',
'position/leverage',
'position/riskLimit',
'position/transferMargin',
'user/cancelWithdrawal',
'user/confirmEmail',
'user/confirmEnableTFA',
'user/confirmWithdrawal',
'user/disableTFA',
'user/logout',
'user/logoutAll',
'user/preferences',
'user/requestEnableTFA',
'user/requestWithdrawal',
],
'put': [
'order',
'order/bulk',
'user',
],
'delete': [
'apiKey',
'order',
'order/all',
],
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'This key is disabled.': PermissionDenied,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'orderQty is invalid': InvalidOrder,
'Invalid price': InvalidOrder,
'Invalid stopPx for ordType': InvalidOrder,
},
'broad': {
'Signature not valid': AuthenticationError,
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
'Service unavailable': ExchangeNotAvailable, # {"error":{"message":"Service unavailable","name":"HTTPError"}}
'Server Error': ExchangeError, # {"error":{"message":"Server Error","name":"HTTPError"}}
'Unable to cancel order due to existing state': InvalidOrder,
},
},
'precisionMode': TICK_SIZE,
'options': {
# https://blog.bitmex.com/api_announcement/deprecation-of-api-nonce-header/
# https://github.com/ccxt/ccxt/issues/4789
'api-expires': 5, # in seconds
'fetchOHLCVOpenTimestamp': True,
},
'commonCurrencies': {
'USDt': 'USDT',
'XBt': 'BTC',
'XBT': 'BTC',
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstrumentActiveAndIndices(params)
#
# {
# "symbol": "LTCUSDT",
# "rootSymbol": "LTC",
# "state": "Open",
# "typ": "FFWCSX",
# "listing": "2021-11-10T04:00:00.000Z",
# "front": "2021-11-10T04:00:00.000Z",
# "expiry": null,
# "settle": null,
# "listedSettle": null,
# "relistInterval": null,
# "inverseLeg": "",
# "sellLeg": "",
# "buyLeg": "",
# "optionStrikePcnt": null,
# "optionStrikeRound": null,
# "optionStrikePrice": null,
# "optionMultiplier": null,
# "positionCurrency": "LTC",
# "underlying": "LTC",
# "quoteCurrency": "USDT",
# "underlyingSymbol": "LTCT=",
# "reference": "BMEX",
# "referenceSymbol": ".BLTCT",
# "calcInterval": null,
# "publishInterval": null,
# "publishTime": null,
# "maxOrderQty": 1000000000,
# "maxPrice": 1000000,
# "lotSize": 1000,
# "tickSize": 0.01,
# "multiplier": 100,
# "settlCurrency": "USDt",
# "underlyingToPositionMultiplier": 10000,
# "underlyingToSettleMultiplier": null,
# "quoteToSettleMultiplier": 1000000,
# "isQuanto": False,
# "isInverse": False,
# "initMargin": 0.03,
# "maintMargin": 0.015,
# "riskLimit": 1000000000000,
# "riskStep": 1000000000000,
# "limit": null,
# "capped": False,
# "taxed": True,
# "deleverage": True,
# "makerFee": -0.0001,
# "takerFee": 0.0005,
# "settlementFee": 0,
# "insuranceFee": 0,
# "fundingBaseSymbol": ".LTCBON8H",
# "fundingQuoteSymbol": ".USDTBON8H",
# "fundingPremiumSymbol": ".LTCUSDTPI8H",
# "fundingTimestamp": "2022-01-14T20:00:00.000Z",
# "fundingInterval": "2000-01-01T08:00:00.000Z",
# "fundingRate": 0.0001,
# "indicativeFundingRate": 0.0001,
# "rebalanceTimestamp": null,
# "rebalanceInterval": null,
# "openingTimestamp": "2022-01-14T17:00:00.000Z",
# "closingTimestamp": "2022-01-14T18:00:00.000Z",
# "sessionInterval": "2000-01-01T01:00:00.000Z",
# "prevClosePrice": 138.511,
# "limitDownPrice": null,
# "limitUpPrice": null,
# "bankruptLimitDownPrice": null,
# "bankruptLimitUpPrice": null,
# "prevTotalVolume": 12699024000,
# "totalVolume": 12702160000,
# "volume": 3136000,
# "volume24h": 114251000,
# "prevTotalTurnover": 232418052349000,
# "totalTurnover": 232463353260000,
# "turnover": 45300911000,
# "turnover24h": 1604331340000,
# "homeNotional24h": 11425.1,
# "foreignNotional24h": 1604331.3400000003,
# "prevPrice24h": 135.48,
# "vwap": 140.42165,
# "highPrice": 146.42,
# "lowPrice": 135.08,
# "lastPrice": 144.36,
# "lastPriceProtected": 144.36,
# "lastTickDirection": "MinusTick",
# "lastChangePcnt": 0.0655,
# "bidPrice": 143.75,
# "midPrice": 143.855,
# "askPrice": 143.96,
# "impactBidPrice": 143.75,
# "impactMidPrice": 143.855,
# "impactAskPrice": 143.96,
# "hasLiquidity": True,
# "openInterest": 38103000,
# "openValue": 547963053300,
# "fairMethod": "FundingRate",
# "fairBasisRate": 0.1095,
# "fairBasis": 0.004,
# "fairPrice": 143.811,
# "markMethod": "FairPrice",
# "markPrice": 143.811,
# "indicativeTaxRate": null,
# "indicativeSettlePrice": 143.807,
# "optionUnderlyingPrice": null,
# "settledPriceAdjustmentRate": null,
# "settledPrice": null,
# "timestamp": "2022-01-14T17:49:55.000Z"
# }
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'underlying')
quoteId = self.safe_string(market, 'quoteCurrency')
settleId = self.safe_string(market, 'settlCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
basequote = baseId + quoteId
swap = (id == basequote)
# 'positionCurrency' may be empty("", as Bitmex currently returns for ETHUSD)
# so let's take the quote currency first and then adjust if needed
type = None
future = False
prediction = False
index = False
symbol = base + '/' + quote + ':' + settle
expiryDatetime = self.safe_string(market, 'expiry')
expiry = self.parse8601(expiryDatetime)
inverse = self.safe_value(market, 'isInverse')
status = self.safe_string(market, 'state')
active = status != 'Unlisted'
if swap:
type = 'swap'
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
symbol = id
elif expiry is not None:
future = True
type = 'future'
symbol = symbol + '-' + self.yymmdd(expiry)
else:
index = True
type = 'index'
symbol = id
active = False
positionId = self.safe_string_2(market, 'positionCurrency', 'quoteCurrency')
position = self.safe_currency_code(positionId)
positionIsQuote = (position == quote)
maxOrderQty = self.safe_number(market, 'maxOrderQty')
contract = not index
initMargin = self.safe_string(market, 'initMargin', '1')
maxLeverage = self.parse_number(Precise.string_div('1', initMargin))
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'prediction': prediction,
'index': index,
'active': active,
'contract': contract,
'linear': not inverse if contract else None,
'inverse': inverse if contract else None,
'taker': self.safe_number(market, 'takerFee'),
'maker': self.safe_number(market, 'makerFee'),
'contractSize': self.safe_number(market, 'multiplier'),
'expiry': expiry,
'expiryDatetime': expiryDatetime,
'strike': self.safe_number(market, 'optionStrikePrice'),
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
},
'limits': {
'leverage': {
'min': self.parse_number('1') if contract else None,
'max': maxLeverage if contract else None,
},
'amount': {
'min': None,
'max': None if positionIsQuote else maxOrderQty,
},
'price': {
'min': None,
'max': self.safe_number(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': maxOrderQty if positionIsQuote else None,
},
},
'info': market,
})
return result
def parse_balance(self, response):
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
free = self.safe_string(balance, 'availableMargin')
total = self.safe_string(balance, 'marginBalance')
if code == 'BTC':
free = Precise.string_div(free, '1e8')
total = Precise.string_div(total, '1e8')
account['free'] = free
account['total'] = total
result[code] = account
return self.safe_balance(result)
def | (self, params={}):
self.load_markets()
request = {
'currency': 'all',
}
response = self.privateGetUserMargin(self.extend(request, params))
#
# [
# {
# "account":1455728,
# "currency":"XBt",
# "riskLimit":1000000000000,
# "prevState":"",
# "state":"",
# "action":"",
# "amount":263542,
# "pendingCredit":0,
# "pendingDebit":0,
# "confirmedDebit":0,
# "prevRealisedPnl":0,
# "prevUnrealisedPnl":0,
# "grossComm":0,
# "grossOpenCost":0,
# "grossOpenPremium":0,
# "grossExecCost":0,
# "grossMarkValue":0,
# "riskValue":0,
# "taxableMargin":0,
# "initMargin":0,
# "maintMargin":0,
# "sessionMargin":0,
# "targetExcessMargin":0,
# "varMargin":0,
# "realisedPnl":0,
# "unrealisedPnl":0,
# "indicativeTax":0,
# "unrealisedProfit":0,
# "syntheticMargin":null,
# "walletBalance":263542,
# "marginBalance":263542,
# "marginBalancePcnt":1,
# "marginLeverage":0,
# "marginUsedPcnt":0,
# "excessMargin":263542,
# "excessMarginPcnt":1,
# "availableMargin":263542,
# "withdrawableMargin":263542,
# "timestamp":"2020-08-03T12:01:01.246Z",
# "grossLastValue":0,
# "commission":null
# }
# ]
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(response)):
order = response[i]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_number(order, 'size')
price = self.safe_number(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
filter = {
'filter': {
'orderID': id,
},
}
response = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(response)
if numResults == 1:
return response[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'filter': {
'open': True,
},
}
return self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# Bitmex barfs if you set 'open': False in the filter...
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'Withdrawal': 'transaction',
'RealisedPNL': 'margin',
'UnrealisedPNL': 'margin',
'Deposit': 'transaction',
'Transfer': 'transfer',
'AffiliatePayout': 'referral',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
#
# ButMEX returns the unrealized pnl from the wallet history endpoint.
# The unrealized pnl transaction has an empty timestamp.
# It is not related to historical pnl it has status set to "Pending".
# Therefore it's not a part of the history at all.
# https://github.com/ccxt/ccxt/issues/6047
#
# {
# "transactID":"00000000-0000-0000-0000-000000000000",
# "account":121210,
# "currency":"XBt",
# "transactType":"UnrealisedPNL",
# "amount":-5508,
# "fee":0,
# "transactStatus":"Pending",
# "address":"XBTUSD",
# "tx":"",
# "text":"",
# "transactTime":null, # โ---------------------------- null
# "walletBalance":139198767,
# "marginBalance":139193259,
# "timestamp":null # โ---------------------------- null
# }
#
id = self.safe_string(item, 'transactID')
account = self.safe_string(item, 'account')
referenceId = self.safe_string(item, 'tx')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'transactType'))
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
if amount is not None:
amount = amount / 100000000
timestamp = self.parse8601(self.safe_string(item, 'transactTime'))
if timestamp is None:
# https://github.com/ccxt/ccxt/issues/6047
# set the timestamp to zero, 1970 Jan 1 00:00:00
# for unrealized pnl and other transactions without a timestamp
timestamp = 0 # see comments above
feeCost = self.safe_number(item, 'fee', 0)
if feeCost is not None:
feeCost = feeCost / 100000000
fee = {
'cost': feeCost,
'currency': code,
}
after = self.safe_number(item, 'walletBalance')
if after is not None:
after = after / 100000000
before = self.sum(after, -amount)
direction = None
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
status = self.parse_transaction_status(self.safe_string(item, 'transactStatus'))
return {
'id': id,
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
#
# [
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
# ]
#
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Canceled': 'canceled',
'Completed': 'ok',
'Pending': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# 'transactID': 'ffe699c2-95ee-4c13-91f9-0faf41daec25',
# 'account': 123456,
# 'currency': 'XBt',
# 'transactType': 'Withdrawal',
# 'amount': -100100000,
# 'fee': 100000,
# 'transactStatus': 'Completed',
# 'address': '385cR5DM96n1HvBDMzLHPYcw89fZAXULJP',
# 'tx': '3BMEXabcdefghijklmnopqrstuvwxyz123',
# 'text': '',
# 'transactTime': '2019-01-02T01:00:00.000Z',
# 'walletBalance': 99900000,
# 'marginBalance': None,
# 'timestamp': '2019-01-02T13:00:00.000Z'
# }
#
id = self.safe_string(transaction, 'transactID')
# For deposits, transactTime == timestamp
# For withdrawals, transactTime is submission, timestamp is processed
transactTime = self.parse8601(self.safe_string(transaction, 'transactTime'))
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
type = self.safe_string_lower(transaction, 'transactType')
# Deposits have no from address or to address, withdrawals have both
address = None
addressFrom = None
addressTo = None
if type == 'withdrawal':
address = self.safe_string(transaction, 'address')
addressFrom = self.safe_string(transaction, 'tx')
addressTo = address
amountString = self.safe_string(transaction, 'amount')
amountString = Precise.string_div(Precise.string_abs(amountString), '1e8')
feeCostString = self.safe_string(transaction, 'fee')
feeCostString = Precise.string_div(feeCostString, '1e8')
fee = {
'cost': self.parse_number(feeCostString),
'currency': 'BTC',
}
status = self.safe_string(transaction, 'transactStatus')
if status is not None:
status = self.parse_transaction_status(status)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': transactTime,
'datetime': self.iso8601(transactTime),
'network': None,
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': None,
'tag': None,
'tagTo': None,
'type': type,
'amount': self.parse_number(amountString),
# BTC is the only currency on Bitmex
'currency': 'BTC',
'status': status,
'updated': timestamp,
'comment': None,
'fee': fee,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
if not market['active']:
raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' is not tradable')
tickers = self.fetch_tickers([market['symbol']], params)
ticker = self.safe_value(tickers, market['symbol'])
if ticker is None:
raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
uniformSymbols = []
if symbols is not None:
for i in range(0, len(symbols)):
symbol = symbols[i]
market = self.market(symbol)
uniformSymbols.append(market['symbol'])
return self.filter_by_array(result, 'symbol', uniformSymbols)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "ETHH19",
# rootSymbol: "ETH",
# state: "Open",
# typ: "FFCCSX",
# listing: "2018-12-17T04:00:00.000Z",
# front: "2019-02-22T12:00:00.000Z",
# expiry: "2019-03-29T12:00:00.000Z",
# settle: "2019-03-29T12:00:00.000Z",
# relistInterval: null,
# inverseLeg: "",
# sellLeg: "",
# buyLeg: "",
# optionStrikePcnt: null,
# optionStrikeRound: null,
# optionStrikePrice: null,
# optionMultiplier: null,
# positionCurrency: "ETH",
# underlying: "ETH",
# quoteCurrency: "XBT",
# underlyingSymbol: "ETHXBT=",
# reference: "BMEX",
# referenceSymbol: ".BETHXBT30M",
# calcInterval: null,
# publishInterval: null,
# publishTime: null,
# maxOrderQty: 100000000,
# maxPrice: 10,
# lotSize: 1,
# tickSize: 0.00001,
# multiplier: 100000000,
# settlCurrency: "XBt",
# underlyingToPositionMultiplier: 1,
# underlyingToSettleMultiplier: null,
# quoteToSettleMultiplier: 100000000,
# isQuanto: False,
# isInverse: False,
# initMargin: 0.02,
# maintMargin: 0.01,
# riskLimit: 5000000000,
# riskStep: 5000000000,
# limit: null,
# capped: False,
# taxed: True,
# deleverage: True,
# makerFee: -0.0005,
# takerFee: 0.0025,
# settlementFee: 0,
# insuranceFee: 0,
# fundingBaseSymbol: "",
# fundingQuoteSymbol: "",
# fundingPremiumSymbol: "",
# fundingTimestamp: null,
# fundingInterval: null,
# fundingRate: null,
# indicativeFundingRate: null,
# rebalanceTimestamp: null,
# rebalanceInterval: null,
# openingTimestamp: "2019-02-13T08:00:00.000Z",
# closingTimestamp: "2019-02-13T09:00:00.000Z",
# sessionInterval: "2000-01-01T01:00:00.000Z",
# prevClosePrice: 0.03347,
# limitDownPrice: null,
# limitUpPrice: null,
# bankruptLimitDownPrice: null,
# bankruptLimitUpPrice: null,
# prevTotalVolume: 1386531,
# totalVolume: 1387062,
# volume: 531,
# volume24h: 17118,
# prevTotalTurnover: 4741294246000,
# totalTurnover: 4743103466000,
# turnover: 1809220000,
# turnover24h: 57919845000,
# homeNotional24h: 17118,
# foreignNotional24h: 579.19845,
# prevPrice24h: 0.03349,
# vwap: 0.03383564,
# highPrice: 0.03458,
# lowPrice: 0.03329,
# lastPrice: 0.03406,
# lastPriceProtected: 0.03406,
# lastTickDirection: "ZeroMinusTick",
# lastChangePcnt: 0.017,
# bidPrice: 0.03406,
# midPrice: 0.034065,
# askPrice: 0.03407,
# impactBidPrice: 0.03406,
# impactMidPrice: 0.034065,
# impactAskPrice: 0.03407,
# hasLiquidity: True,
# openInterest: 83679,
# openValue: 285010674000,
# fairMethod: "ImpactMidPrice",
# fairBasisRate: 0,
# fairBasis: 0,
# fairPrice: 0.03406,
# markMethod: "FairPrice",
# markPrice: 0.03406,
# indicativeTaxRate: 0,
# indicativeSettlePrice: 0.03406,
# optionUnderlyingPrice: null,
# settledPrice: null,
# timestamp: "2019-02-13T08:40:30.000Z",
# }
#
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_string(ticker, 'prevPrice24h')
last = self.safe_string(ticker, 'lastPrice')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'highPrice'),
'low': self.safe_string(ticker, 'lowPrice'),
'bid': self.safe_string(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_string(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_string(ticker, 'foreignNotional24h'),
'info': ticker,
}, market, False)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "timestamp":"2015-09-25T13:38:00.000Z",
# "symbol":"XBTUSD",
# "open":237.45,
# "high":237.45,
# "low":237.45,
# "close":237.45,
# "trades":0,
# "volume":0,
# "vwap":null,
# "lastSize":null,
# "turnover":0,
# "homeNotional":0,
# "foreignNotional":0
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'timestamp')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
# send JSON key/value pairs, such as {"key": "value"}
# filter by individual fields and do advanced queries on timestamps
# filter = {'key': 'value'}
# send a bare series(e.g. XBU) to nearest expiring contract in that series
# you can also send a timeframe, e.g. XBU:monthly
# timeframes: daily, weekly, monthly, quarterly, and biquarterly
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True, # True == include yet-incomplete current bins
# 'filter': filter, # filter by individual fields and do advanced queries
# 'columns': [], # will return all columns if omitted
# 'start': 0, # starting point for results(wtf?)
# 'reverse': False, # True == newest first
# 'endTime': '', # ending date filter for results
}
if limit is not None:
request['count'] = limit # default 100, max 500
duration = self.parse_timeframe(timeframe) * 1000
fetchOHLCVOpenTimestamp = self.safe_value(self.options, 'fetchOHLCVOpenTimestamp', True)
# if since is not set, they will return candles starting from 2017-01-01
if since is not None:
timestamp = since
if fetchOHLCVOpenTimestamp:
timestamp = self.sum(timestamp, duration)
ymdhms = self.ymdhms(timestamp)
request['startTime'] = ymdhms # starting date filter for results
else:
request['reverse'] = True
response = self.publicGetTradeBucketed(self.extend(request, params))
#
# [
# {"timestamp":"2015-09-25T13:38:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:39:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0},
# {"timestamp":"2015-09-25T13:40:00.000Z","symbol":"XBTUSD","open":237.45,"high":237.45,"low":237.45,"close":237.45,"trades":0,"volume":0,"vwap":null,"lastSize":null,"turnover":0,"homeNotional":0,"foreignNotional":0}
# ]
#
result = self.parse_ohlcvs(response, market, timeframe, since, limit)
if fetchOHLCVOpenTimestamp:
# bitmex returns the candle's close timestamp - https://github.com/ccxt/ccxt/issues/4446
# we can emulate the open timestamp by shifting all the timestamps one place
# so the previous close becomes the current open, and we drop the first candle
for i in range(0, len(result)):
result[i][0] = result[i][0] - duration
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
priceString = self.safe_string_2(trade, 'avgPx', 'price')
amountString = self.safe_string_2(trade, 'size', 'lastQty')
execCost = self.safe_string(trade, 'execCost')
costString = Precise.string_div(Precise.string_abs(execCost), '1e8')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string_lower(trade, 'side')
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
fee = None
feeCostString = Precise.string_div(self.safe_string(trade, 'execComm'), '1e8')
if feeCostString is not None:
currencyId = self.safe_string(trade, 'settlCurrency')
feeCurrencyCode = self.safe_currency_code(currencyId)
feeRateString = self.safe_string(trade, 'commission')
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': feeRateString,
}
# Trade or Funding
execType = self.safe_string(trade, 'execType')
takerOrMaker = None
if feeCostString is not None and execType == 'Trade':
takerOrMaker = 'maker' if Precise.string_lt(feeCostString, '0') else 'taker'
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
type = self.safe_string_lower(trade, 'ordType')
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'cost': costString,
'amount': amountString,
'fee': fee,
}, market)
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'Day': 'Day',
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# {
# "orderID":"56222c7a-9956-413a-82cf-99f4812c214b",
# "clOrdID":"",
# "clOrdLinkID":"",
# "account":1455728,
# "symbol":"XBTUSD",
# "side":"Sell",
# "simpleOrderQty":null,
# "orderQty":1,
# "price":40000,
# "displayQty":null,
# "stopPx":null,
# "pegOffsetValue":null,
# "pegPriceType":"",
# "currency":"USD",
# "settlCurrency":"XBt",
# "ordType":"Limit",
# "timeInForce":"GoodTillCancel",
# "execInst":"",
# "contingencyType":"",
# "exDestination":"XBME",
# "ordStatus":"New",
# "triggered":"",
# "workingIndicator":true,
# "ordRejReason":"",
# "simpleLeavesQty":null,
# "leavesQty":1,
# "simpleCumQty":null,
# "cumQty":0,
# "avgPx":null,
# "multiLegReportingType":"SingleSecurity",
# "text":"Submitted via API.",
# "transactTime":"2021-01-02T21:38:49.246Z",
# "timestamp":"2021-01-02T21:38:49.246Z"
# }
#
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
filled = self.safe_string(order, 'cumQty', 0.0)
average = self.safe_string(order, 'avgPx')
id = self.safe_string(order, 'orderID')
type = self.safe_string_lower(order, 'ordType')
side = self.safe_string_lower(order, 'side')
clientOrderId = self.safe_string(order, 'clOrdID')
timeInForce = self.parse_time_in_force(self.safe_string(order, 'timeInForce'))
stopPrice = self.safe_number(order, 'stopPx')
execInst = self.safe_string(order, 'execInst')
postOnly = (execInst == 'ParticipateDoNotInitiate')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': None,
'trades': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
else:
# by default reverse=false, i.e. trades are fetched since the time of market inception(year 2015 for XBTUSD)
request['reverse'] = True
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# [
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# },
# {
# timestamp: '2018-08-28T00:00:03.778Z',
# symbol: 'XBTUSD',
# side: 'Sell',
# size: 1000,
# price: 6906,
# tickDirection: 'MinusTick',
# trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',
# grossValue: 14480000,
# homeNotional: 0.1448,
# foreignNotional: 1000
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderType = self.capitalize(type)
request = {
'symbol': market['id'],
'side': self.capitalize(side),
'orderQty': float(self.amount_to_precision(symbol, amount)),
'ordType': orderType,
}
if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):
stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')
else:
request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))
params = self.omit(params, ['stopPx', 'stopPrice'])
if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):
request['price'] = float(self.price_to_precision(symbol, price))
clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privatePostOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {}
origClOrdID = self.safe_string_2(params, 'origClOrdID', 'clientOrderId')
if origClOrdID is not None:
request['origClOrdID'] = origClOrdID
clientOrderId = self.safe_string(params, 'clOrdID', 'clientOrderId')
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['origClOrdID', 'clOrdID', 'clientOrderId'])
else:
request['orderID'] = id
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
# https://github.com/ccxt/ccxt/issues/6507
clientOrderId = self.safe_value_2(params, 'clOrdID', 'clientOrderId')
request = {}
if clientOrderId is None:
request['orderID'] = id
else:
request['clOrdID'] = clientOrderId
params = self.omit(params, ['clOrdID', 'clientOrderId'])
response = self.privateDeleteOrder(self.extend(request, params))
order = self.safe_value(response, 0, {})
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
return self.parse_order(order)
def cancel_orders(self, ids, symbol=None, params={}):
return self.cancel_order(ids, symbol, params)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateDeleteOrderAll(self.extend(request, params))
#
# [
# {
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "multiLegReportingType": "string",
# "text": "string",
# "transactTime": "2020-06-01T09:36:35.290Z",
# "timestamp": "2020-06-01T09:36:35.290Z"
# }
# ]
#
return self.parse_orders(response, market)
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.privateGetPosition(params)
# [
# {
# "account": 0,
# "symbol": "string",
# "currency": "string",
# "underlying": "string",
# "quoteCurrency": "string",
# "commission": 0,
# "initMarginReq": 0,
# "maintMarginReq": 0,
# "riskLimit": 0,
# "leverage": 0,
# "crossMargin": True,
# "deleveragePercentile": 0,
# "rebalancedPnl": 0,
# "prevRealisedPnl": 0,
# "prevUnrealisedPnl": 0,
# "prevClosePrice": 0,
# "openingTimestamp": "2020-11-09T06:53:59.892Z",
# "openingQty": 0,
# "openingCost": 0,
# "openingComm": 0,
# "openOrderBuyQty": 0,
# "openOrderBuyCost": 0,
# "openOrderBuyPremium": 0,
# "openOrderSellQty": 0,
# "openOrderSellCost": 0,
# "openOrderSellPremium": 0,
# "execBuyQty": 0,
# "execBuyCost": 0,
# "execSellQty": 0,
# "execSellCost": 0,
# "execQty": 0,
# "execCost": 0,
# "execComm": 0,
# "currentTimestamp": "2020-11-09T06:53:59.893Z",
# "currentQty": 0,
# "currentCost": 0,
# "currentComm": 0,
# "realisedCost": 0,
# "unrealisedCost": 0,
# "grossOpenCost": 0,
# "grossOpenPremium": 0,
# "grossExecCost": 0,
# "isOpen": True,
# "markPrice": 0,
# "markValue": 0,
# "riskValue": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "posState": "string",
# "posCost": 0,
# "posCost2": 0,
# "posCross": 0,
# "posInit": 0,
# "posComm": 0,
# "posLoss": 0,
# "posMargin": 0,
# "posMaint": 0,
# "posAllowance": 0,
# "taxableMargin": 0,
# "initMargin": 0,
# "maintMargin": 0,
# "sessionMargin": 0,
# "targetExcessMargin": 0,
# "varMargin": 0,
# "realisedGrossPnl": 0,
# "realisedTax": 0,
# "realisedPnl": 0,
# "unrealisedGrossPnl": 0,
# "longBankrupt": 0,
# "shortBankrupt": 0,
# "taxBase": 0,
# "indicativeTaxRate": 0,
# "indicativeTax": 0,
# "unrealisedTax": 0,
# "unrealisedPnl": 0,
# "unrealisedPnlPcnt": 0,
# "unrealisedRoePcnt": 0,
# "simpleQty": 0,
# "simpleCost": 0,
# "simpleValue": 0,
# "simplePnl": 0,
# "simplePnlPcnt": 0,
# "avgCostPrice": 0,
# "avgEntryPrice": 0,
# "breakEvenPrice": 0,
# "marginCallPrice": 0,
# "liquidationPrice": 0,
# "bankruptPrice": 0,
# "timestamp": "2020-11-09T06:53:59.894Z",
# "lastPrice": 0,
# "lastValue": 0
# }
# ]
#
# todo unify parsePosition/parsePositions
return response
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
# currency = self.currency(code)
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
request = {
'currency': 'XBt', # temporarily
'amount': amount,
'address': address,
# 'otpToken': '123456', # requires if two-factor auth(OTP) is enabled
# 'fee': 0.001, # bitcoin network fee
}
response = self.privatePostUserRequestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'transactID'),
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method == 'GET':
if params:
query += '?' + self.urlencode(params)
else:
format = self.safe_string(params, '_format')
if format is not None:
query += '?' + self.urlencode({'_format': format})
params = self.omit(params, '_format')
url = self.urls['api'][api] + query
if api == 'private':
self.check_required_credentials()
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT' or method == 'DELETE':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| fetch_balance |
automate_event.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.15.6
// source: interservice/ingest/automate_event.proto
package ingest
import (
context "context"
event "github.com/chef/automate/api/interservice/event"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type ProjectUpdateStatusReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *ProjectUpdateStatusReq) Reset() {
*x = ProjectUpdateStatusReq{}
if protoimpl.UnsafeEnabled {
mi := &file_interservice_ingest_automate_event_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProjectUpdateStatusReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProjectUpdateStatusReq) ProtoMessage() {}
func (x *ProjectUpdateStatusReq) ProtoReflect() protoreflect.Message {
mi := &file_interservice_ingest_automate_event_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProjectUpdateStatusReq.ProtoReflect.Descriptor instead.
func (*ProjectUpdateStatusReq) Descriptor() ([]byte, []int) {
return file_interservice_ingest_automate_event_proto_rawDescGZIP(), []int{0}
}
type ProjectUpdateStatusResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
State string `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty" toml:"state,omitempty" mapstructure:"state,omitempty"`
EstimatedTimeComplete *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=estimated_time_complete,json=estimatedTimeComplete,proto3" json:"estimated_time_complete,omitempty" toml:"estimated_time_complete,omitempty" mapstructure:"estimated_time_complete,omitempty"`
PercentageComplete float32 `protobuf:"fixed32,3,opt,name=percentage_complete,json=percentageComplete,proto3" json:"percentage_complete,omitempty" toml:"percentage_complete,omitempty" mapstructure:"percentage_complete,omitempty"`
}
func (x *ProjectUpdateStatusResp) Reset() {
*x = ProjectUpdateStatusResp{}
if protoimpl.UnsafeEnabled {
mi := &file_interservice_ingest_automate_event_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProjectUpdateStatusResp) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProjectUpdateStatusResp) ProtoMessage() {}
func (x *ProjectUpdateStatusResp) ProtoReflect() protoreflect.Message {
mi := &file_interservice_ingest_automate_event_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProjectUpdateStatusResp.ProtoReflect.Descriptor instead.
func (*ProjectUpdateStatusResp) Descriptor() ([]byte, []int) {
return file_interservice_ingest_automate_event_proto_rawDescGZIP(), []int{1}
}
func (x *ProjectUpdateStatusResp) GetState() string {
if x != nil {
return x.State
}
return ""
}
func (x *ProjectUpdateStatusResp) GetEstimatedTimeComplete() *timestamppb.Timestamp {
if x != nil {
return x.EstimatedTimeComplete
}
return nil
}
func (x *ProjectUpdateStatusResp) GetPercentageComplete() float32 {
if x != nil {
return x.PercentageComplete
}
return 0
}
var File_interservice_ingest_automate_event_proto protoreflect.FileDescriptor
var file_interservice_ingest_automate_event_proto_rawDesc = []byte{
0x0a, 0x28, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x69,
0x6e, 0x67, 0x65, 0x73, 0x74, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x5f, 0x65,
0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x63, 0x68, 0x65, 0x66,
0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
0x2e, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2f, 0x65, 0x76, 0x65,
0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x18, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6a,
0x65, 0x63, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
0x65, 0x71, 0x22, 0xb4, 0x01, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x14,
0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
0x74, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x17, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65,
0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x15, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65,
0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x65, 0x72, 0x63,
0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18,
0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67,
0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x32, 0x80, 0x02, 0x0a, 0x13, 0x45, 0x76,
0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x12, 0x66, 0x0a, 0x0b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74,
0x12, 0x28, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65,
0x2e, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x61, 0x70,
0x69, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x2d, 0x2e, 0x63, 0x68, 0x65,
0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x64, 0x6f, 0x6d, 0x61, 0x69,
0x6e, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x13, 0x50, 0x72,
0x6f, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75,
0x73, 0x12, 0x33, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74,
0x65, 0x2e, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x2e,
0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x34, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75,
0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x69, 0x6e,
0x67, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x42, 0x32, 0x5a, 0x30,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x65, 0x66, 0x2f,
0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_interservice_ingest_automate_event_proto_rawDescOnce sync.Once
file_interservice_ingest_automate_event_proto_rawDescData = file_interservice_ingest_automate_event_proto_rawDesc
)
func file_interservice_ingest_automate_event_proto_rawDescGZIP() []byte {
file_interservice_ingest_automate_event_proto_rawDescOnce.Do(func() {
file_interservice_ingest_automate_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_interservice_ingest_automate_event_proto_rawDescData)
})
return file_interservice_ingest_automate_event_proto_rawDescData
}
var file_interservice_ingest_automate_event_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_interservice_ingest_automate_event_proto_goTypes = []interface{}{
(*ProjectUpdateStatusReq)(nil), // 0: chef.automate.domain.ingest.ProjectUpdateStatusReq
(*ProjectUpdateStatusResp)(nil), // 1: chef.automate.domain.ingest.ProjectUpdateStatusResp
(*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
(*event.EventMsg)(nil), // 3: chef.automate.domain.event.api.EventMsg
(*event.EventResponse)(nil), // 4: chef.automate.domain.event.api.EventResponse
}
var file_interservice_ingest_automate_event_proto_depIdxs = []int32{
2, // 0: chef.automate.domain.ingest.ProjectUpdateStatusResp.estimated_time_complete:type_name -> google.protobuf.Timestamp
3, // 1: chef.automate.domain.ingest.EventHandlerService.HandleEvent:input_type -> chef.automate.domain.event.api.EventMsg
0, // 2: chef.automate.domain.ingest.EventHandlerService.ProjectUpdateStatus:input_type -> chef.automate.domain.ingest.ProjectUpdateStatusReq
4, // 3: chef.automate.domain.ingest.EventHandlerService.HandleEvent:output_type -> chef.automate.domain.event.api.EventResponse
1, // 4: chef.automate.domain.ingest.EventHandlerService.ProjectUpdateStatus:output_type -> chef.automate.domain.ingest.ProjectUpdateStatusResp
3, // [3:5] is the sub-list for method output_type
1, // [1:3] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_interservice_ingest_automate_event_proto_init() }
func file_interservice_ingest_automate_event_proto_init() {
if File_interservice_ingest_automate_event_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_interservice_ingest_automate_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProjectUpdateStatusReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_interservice_ingest_automate_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProjectUpdateStatusResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_interservice_ingest_automate_event_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_interservice_ingest_automate_event_proto_goTypes,
DependencyIndexes: file_interservice_ingest_automate_event_proto_depIdxs,
MessageInfos: file_interservice_ingest_automate_event_proto_msgTypes,
}.Build()
File_interservice_ingest_automate_event_proto = out.File
file_interservice_ingest_automate_event_proto_rawDesc = nil
file_interservice_ingest_automate_event_proto_goTypes = nil
file_interservice_ingest_automate_event_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// EventHandlerServiceClient is the client API for EventHandlerService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type EventHandlerServiceClient interface {
HandleEvent(ctx context.Context, in *event.EventMsg, opts ...grpc.CallOption) (*event.EventResponse, error)
ProjectUpdateStatus(ctx context.Context, in *ProjectUpdateStatusReq, opts ...grpc.CallOption) (*ProjectUpdateStatusResp, error)
}
type eventHandlerServiceClient struct {
cc grpc.ClientConnInterface
}
func | (cc grpc.ClientConnInterface) EventHandlerServiceClient {
return &eventHandlerServiceClient{cc}
}
func (c *eventHandlerServiceClient) HandleEvent(ctx context.Context, in *event.EventMsg, opts ...grpc.CallOption) (*event.EventResponse, error) {
out := new(event.EventResponse)
err := c.cc.Invoke(ctx, "/chef.automate.domain.ingest.EventHandlerService/HandleEvent", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *eventHandlerServiceClient) ProjectUpdateStatus(ctx context.Context, in *ProjectUpdateStatusReq, opts ...grpc.CallOption) (*ProjectUpdateStatusResp, error) {
out := new(ProjectUpdateStatusResp)
err := c.cc.Invoke(ctx, "/chef.automate.domain.ingest.EventHandlerService/ProjectUpdateStatus", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// EventHandlerServiceServer is the server API for EventHandlerService service.
type EventHandlerServiceServer interface {
HandleEvent(context.Context, *event.EventMsg) (*event.EventResponse, error)
ProjectUpdateStatus(context.Context, *ProjectUpdateStatusReq) (*ProjectUpdateStatusResp, error)
}
// UnimplementedEventHandlerServiceServer can be embedded to have forward compatible implementations.
type UnimplementedEventHandlerServiceServer struct {
}
func (*UnimplementedEventHandlerServiceServer) HandleEvent(context.Context, *event.EventMsg) (*event.EventResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method HandleEvent not implemented")
}
func (*UnimplementedEventHandlerServiceServer) ProjectUpdateStatus(context.Context, *ProjectUpdateStatusReq) (*ProjectUpdateStatusResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ProjectUpdateStatus not implemented")
}
func RegisterEventHandlerServiceServer(s *grpc.Server, srv EventHandlerServiceServer) {
s.RegisterService(&_EventHandlerService_serviceDesc, srv)
}
func _EventHandlerService_HandleEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(event.EventMsg)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EventHandlerServiceServer).HandleEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/chef.automate.domain.ingest.EventHandlerService/HandleEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EventHandlerServiceServer).HandleEvent(ctx, req.(*event.EventMsg))
}
return interceptor(ctx, in, info, handler)
}
func _EventHandlerService_ProjectUpdateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ProjectUpdateStatusReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EventHandlerServiceServer).ProjectUpdateStatus(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/chef.automate.domain.ingest.EventHandlerService/ProjectUpdateStatus",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EventHandlerServiceServer).ProjectUpdateStatus(ctx, req.(*ProjectUpdateStatusReq))
}
return interceptor(ctx, in, info, handler)
}
var _EventHandlerService_serviceDesc = grpc.ServiceDesc{
ServiceName: "chef.automate.domain.ingest.EventHandlerService",
HandlerType: (*EventHandlerServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "HandleEvent",
Handler: _EventHandlerService_HandleEvent_Handler,
},
{
MethodName: "ProjectUpdateStatus",
Handler: _EventHandlerService_ProjectUpdateStatus_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "interservice/ingest/automate_event.proto",
}
| NewEventHandlerServiceClient |
util.py | import asyncio
import sys
from urllib.parse import urlparse, parse_qsl
from .log import logger
_NOTSET = object()
IS_PY38 = sys.version_info >= (3, 8)
# NOTE: never put here anything else;
# just this basic types
_converters = {
bytes: lambda val: val,
bytearray: lambda val: val,
str: lambda val: val.encode(),
int: lambda val: b'%d' % val,
float: lambda val: b'%r' % val,
}
def encode_command(*args, buf=None):
"""Encodes arguments into redis bulk-strings array.
Raises TypeError if any of args not of bytearray, bytes, float, int, or str
type.
"""
if buf is None:
buf = bytearray()
buf.extend(b'*%d\r\n' % len(args))
try:
for arg in args:
barg = _converters[type(arg)](arg)
buf.extend(b'$%d\r\n%s\r\n' % (len(barg), barg))
except KeyError:
raise TypeError("Argument {!r} expected to be of bytearray, bytes,"
" float, int, or str type".format(arg))
return buf
def decode(obj, encoding, errors):
if errors is None:
errors = 'strict'
if isinstance(obj, bytes):
return obj.decode(encoding, errors)
elif isinstance(obj, list):
return [decode(o, encoding, errors) for o in obj]
return obj
async def wait_ok(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
return res in (b'OK', 'OK')
async def wait_convert(fut, type_, **kwargs):
result = await fut
if result in (b'QUEUED', 'QUEUED'):
return result
return type_(result, **kwargs)
async def wait_make_dict(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
it = iter(res)
return dict(zip(it, it))
class coerced_keys_dict(dict):
def __getitem__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__getitem__(self, other)
def __contains__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__contains__(self, other)
class _ScanIter:
__slots__ = ('_scan', '_cur', '_ret')
def __init__(self, scan):
self._scan = scan
self._cur = b'0'
self._ret = []
def __aiter__(self):
return self
async def __anext__(self):
while not self._ret and self._cur:
self._cur, self._ret = await self._scan(self._cur)
if not self._cur and not self._ret:
raise StopAsyncIteration # noqa
else:
ret = self._ret.pop(0)
return ret
def _set_result(fut, result, *info):
if fut.done():
logger.debug("Waiter future is already done %r %r", fut, info)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, result, info)
else:
fut.set_result(result)
def _set_exception(fut, exception):
if fut.done():
logger.debug("Waiter future is already done %r", fut)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, exception)
else:
fut.set_exception(exception)
def parse_url(url):
"""Parse Redis connection URI.
Parse according to IANA specs:
* https://www.iana.org/assignments/uri-schemes/prov/redis
* https://www.iana.org/assignments/uri-schemes/prov/rediss
Also more rules applied:
* empty scheme is treated as unix socket path no further parsing is done.
* 'unix://' scheme is treated as unix socket path and parsed.
* Multiple query parameter values and blank values are considered error.
* DB number specified as path and as query parameter is considered error.
* Password specified in userinfo and as query parameter is
considered error.
"""
r = urlparse(url)
assert r.scheme in ('', 'redis', 'rediss', 'unix'), (
"Unsupported URI scheme", r.scheme)
if r.scheme == '':
return url, {}
query = {}
for p, v in parse_qsl(r.query, keep_blank_values=True):
assert p not in query, ("Multiple parameters are not allowed", p, v)
assert v, ("Empty parameters are not allowed", p, v)
query[p] = v
if r.scheme == 'unix':
assert r.path, ("Empty path is not allowed", url)
assert not r.netloc, (
"Netlocation is not allowed for unix scheme", r.netloc)
return r.path, _parse_uri_options(query, '', r.password)
address = (r.hostname or 'localhost', int(r.port or 6379))
path = r.path
if path.startswith('/'):
path = r.path[1:]
options = _parse_uri_options(query, path, r.password)
if r.scheme == 'rediss':
options['ssl'] = True
return address, options
def _parse_uri_options(params, path, password):
def parse_db_num(val):
if not val:
return
assert val.isdecimal(), ("Invalid decimal integer", val)
assert val == '0' or not val.startswith('0'), (
"Expected integer without leading zeroes", val)
return int(val)
options = {}
db1 = parse_db_num(path)
db2 = parse_db_num(params.get('db'))
assert db1 is None or db2 is None, (
"Single DB value expected, got path and query", db1, db2)
if db1 is not None:
options['db'] = db1
elif db2 is not None:
options['db'] = db2
password2 = params.get('password')
assert not password or not password2, (
"Single password value is expected, got in net location and query")
if password:
options['password'] = password
elif password2:
options['password'] = password2
if 'encoding' in params: | if 'errors' in params:
options['errors'] = params['errors']
if 'ssl' in params:
assert params['ssl'] in ('true', 'false'), (
"Expected 'ssl' param to be 'true' or 'false' only",
params['ssl'])
options['ssl'] = params['ssl'] == 'true'
if 'timeout' in params:
options['timeout'] = float(params['timeout'])
return options
class CloseEvent:
def __init__(self, on_close):
self._close_init = asyncio.Event()
self._close_done = asyncio.Event()
self._on_close = on_close
async def wait(self):
await self._close_init.wait()
await self._close_done.wait()
def is_set(self):
return self._close_done.is_set() or self._close_init.is_set()
def set(self):
if self._close_init.is_set():
return
task = asyncio.ensure_future(self._on_close())
task.add_done_callback(self._cleanup)
self._close_init.set()
def _cleanup(self, task):
self._on_close = None
self._close_done.set()
get_event_loop = getattr(asyncio, 'get_running_loop', asyncio.get_event_loop) | options['encoding'] = params['encoding'] |
rpc_util_test.go | /*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"bytes"
"compress/gzip"
"io"
"math"
"reflect"
"testing"
"github.com/sunnogo/protobuf/proto"
"github.com/sunnogo/grpc-go/codes"
"github.com/sunnogo/grpc-go/encoding"
protoenc "github.com/sunnogo/grpc-go/encoding/proto"
"github.com/sunnogo/grpc-go/internal/transport"
"github.com/sunnogo/grpc-go/status"
perfpb "github.com/sunnogo/grpc-go/test/codec_perf"
)
type fullReader struct {
reader io.Reader
}
func (f fullReader) Read(p []byte) (int, error) {
return io.ReadFull(f.reader, p)
}
var _ CallOption = EmptyCallOption{} // ensure EmptyCallOption implements the interface
func TestSimpleParsing(t *testing.T) {
bigMsg := bytes.Repeat([]byte{'x'}, 1<<24)
for _, test := range []struct {
// input
p []byte
// outputs
err error
b []byte
pt payloadFormat
}{
{nil, io.EOF, nil, compressionNone},
{[]byte{0, 0, 0, 0, 0}, nil, nil, compressionNone},
{[]byte{0, 0, 0, 0, 1, 'a'}, nil, []byte{'a'}, compressionNone},
{[]byte{1, 0}, io.ErrUnexpectedEOF, nil, compressionNone},
{[]byte{0, 0, 0, 0, 10, 'a'}, io.ErrUnexpectedEOF, nil, compressionNone},
// Check that messages with length >= 2^24 are parsed.
{append([]byte{0, 1, 0, 0, 0}, bigMsg...), nil, bigMsg, compressionNone},
} {
buf := fullReader{bytes.NewReader(test.p)}
parser := &parser{r: buf}
pt, b, err := parser.recvMsg(math.MaxInt32)
if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt {
t.Fatalf("parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err)
}
}
}
func TestMultipleParsing(t *testing.T) {
// Set a byte stream consists of 3 messages with their headers.
p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'}
b := fullReader{bytes.NewReader(p)}
parser := &parser{r: b}
wantRecvs := []struct {
pt payloadFormat
data []byte
}{
{compressionNone, []byte("a")},
{compressionNone, []byte("bc")},
{compressionNone, []byte("d")},
}
for i, want := range wantRecvs {
pt, data, err := parser.recvMsg(math.MaxInt32)
if err != nil || pt != want.pt || !reflect.DeepEqual(data, want.data) {
t.Fatalf("after %d calls, parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, <nil>",
i, p, pt, data, err, want.pt, want.data)
}
}
pt, data, err := parser.recvMsg(math.MaxInt32)
if err != io.EOF {
t.Fatalf("after %d recvMsgs calls, parser{%v}.recvMsg(_) = %v, %v, %v\nwant _, _, %v",
len(wantRecvs), p, pt, data, err, io.EOF)
}
}
func TestEncode(t *testing.T) {
for _, test := range []struct {
// input
msg proto.Message
// outputs
hdr []byte
data []byte
err error
}{
{nil, []byte{0, 0, 0, 0, 0}, []byte{}, nil},
} {
data, err := encode(encoding.GetCodec(protoenc.Name), test.msg)
if err != test.err || !bytes.Equal(data, test.data) {
t.Errorf("encode(_, %v) = %v, %v; want %v, %v", test.msg, data, err, test.data, test.err)
continue
}
if hdr, _ := msgHeader(data, nil); !bytes.Equal(hdr, test.hdr) {
t.Errorf("msgHeader(%v, false) = %v; want %v", data, hdr, test.hdr)
}
}
}
func TestCompress(t *testing.T) {
bestCompressor, err := NewGZIPCompressorWithLevel(gzip.BestCompression)
if err != nil {
t.Fatalf("Could not initialize gzip compressor with best compression.")
}
bestSpeedCompressor, err := NewGZIPCompressorWithLevel(gzip.BestSpeed)
if err != nil {
t.Fatalf("Could not initialize gzip compressor with best speed compression.")
}
defaultCompressor, err := NewGZIPCompressorWithLevel(gzip.BestSpeed)
if err != nil {
t.Fatalf("Could not initialize gzip compressor with default compression.")
}
level5, err := NewGZIPCompressorWithLevel(5)
if err != nil {
t.Fatalf("Could not initialize gzip compressor with level 5 compression.")
}
for _, test := range []struct {
// input
data []byte
cp Compressor
dc Decompressor
// outputs
err error
}{
{make([]byte, 1024), NewGZIPCompressor(), NewGZIPDecompressor(), nil},
{make([]byte, 1024), bestCompressor, NewGZIPDecompressor(), nil},
{make([]byte, 1024), bestSpeedCompressor, NewGZIPDecompressor(), nil},
{make([]byte, 1024), defaultCompressor, NewGZIPDecompressor(), nil},
{make([]byte, 1024), level5, NewGZIPDecompressor(), nil},
} {
b := new(bytes.Buffer)
if err := test.cp.Do(b, test.data); err != test.err {
t.Fatalf("Compressor.Do(_, %v) = %v, want %v", test.data, err, test.err)
}
if b.Len() >= len(test.data) {
t.Fatalf("The compressor fails to compress data.")
}
if p, err := test.dc.Do(b); err != nil || !bytes.Equal(test.data, p) {
t.Fatalf("Decompressor.Do(%v) = %v, %v, want %v, <nil>", b, p, err, test.data)
}
}
}
func TestToRPCErr(t *testing.T) {
for _, test := range []struct {
// input
errIn error
// outputs
errOut error
}{
{transport.ErrConnClosing, status.Error(codes.Unavailable, transport.ErrConnClosing.Desc)},
{io.ErrUnexpectedEOF, status.Error(codes.Internal, io.ErrUnexpectedEOF.Error())},
} {
err := toRPCErr(test.errIn)
if _, ok := status.FromError(err); !ok {
t.Fatalf("toRPCErr{%v} returned type %T, want %T", test.errIn, err, status.Error(codes.Unknown, ""))
}
if !reflect.DeepEqual(err, test.errOut) {
t.Fatalf("toRPCErr{%v} = %v \nwant %v", test.errIn, err, test.errOut)
}
}
}
func TestParseDialTarget(t *testing.T) {
for _, test := range []struct {
target, wantNet, wantAddr string
}{
{"unix:etcd:0", "unix", "etcd:0"},
{"unix:///tmp/unix-3", "unix", "/tmp/unix-3"},
{"unix://domain", "unix", "domain"},
{"unix://etcd:0", "unix", "etcd:0"},
{"unix:///etcd:0", "unix", "/etcd:0"},
{"passthrough://unix://domain", "tcp", "passthrough://unix://domain"},
{"https://google.com:443", "tcp", "https://google.com:443"},
{"dns:///google.com", "tcp", "dns:///google.com"},
{"/unix/socket/address", "tcp", "/unix/socket/address"},
} {
gotNet, gotAddr := parseDialTarget(test.target)
if gotNet != test.wantNet || gotAddr != test.wantAddr {
t.Errorf("parseDialTarget(%q) = %s, %s want %s, %s", test.target, gotNet, gotAddr, test.wantNet, test.wantAddr)
}
}
}
// bmEncode benchmarks encoding a Protocol Buffer message containing mSize
// bytes.
func bmEncode(b *testing.B, mSize int) {
cdc := encoding.GetCodec(protoenc.Name)
msg := &perfpb.Buffer{Body: make([]byte, mSize)}
encodeData, _ := encode(cdc, msg)
encodedSz := int64(len(encodeData))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
encode(cdc, msg)
}
b.SetBytes(encodedSz)
}
func BenchmarkEncode1B(b *testing.B) {
bmEncode(b, 1)
}
func BenchmarkEncode1KiB(b *testing.B) {
bmEncode(b, 1024)
}
func BenchmarkEncode8KiB(b *testing.B) {
bmEncode(b, 8*1024)
}
func BenchmarkEncode64KiB(b *testing.B) {
bmEncode(b, 64*1024)
}
func BenchmarkEncode512KiB(b *testing.B) {
bmEncode(b, 512*1024)
}
func BenchmarkEncode1MiB(b *testing.B) {
bmEncode(b, 1024*1024)
}
// bmCompressor benchmarks a compressor of a Protocol Buffer message containing
// mSize bytes.
func bmCompressor(b *testing.B, mSize int, cp Compressor) {
payload := make([]byte, mSize)
cBuf := bytes.NewBuffer(make([]byte, mSize))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cp.Do(cBuf, payload)
cBuf.Reset()
}
}
func BenchmarkGZIPCompressor1B(b *testing.B) {
bmCompressor(b, 1, NewGZIPCompressor())
}
func BenchmarkGZIPCompressor1KiB(b *testing.B) {
bmCompressor(b, 1024, NewGZIPCompressor())
}
func BenchmarkGZIPCompressor8KiB(b *testing.B) {
bmCompressor(b, 8*1024, NewGZIPCompressor())
}
func BenchmarkGZIPCompressor64KiB(b *testing.B) {
bmCompressor(b, 64*1024, NewGZIPCompressor())
}
func | (b *testing.B) {
bmCompressor(b, 512*1024, NewGZIPCompressor())
}
func BenchmarkGZIPCompressor1MiB(b *testing.B) {
bmCompressor(b, 1024*1024, NewGZIPCompressor())
}
| BenchmarkGZIPCompressor512KiB |
definition.go | package generated |
//go:generate go-bindata -pkg generated -o ./bindata.go files/ |
|
Footer.js | import React from "react";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faFacebook, faGithub, faLinkedin } from "@fortawesome/free-brands-svg-icons";
import "./Footer.scss";
const Footer = () => {
return (
<div className="footer d-flex flex-column justify-content-center align-items-center">
<div className="get_In_touch">Get in touch</div>
<div className="contact d-flex flex-row justify-content-center align-items-center">
<div className="fb">
<a href="https://www.facebook.com/profile.php?id=100007399969441">
<FontAwesomeIcon icon={faFacebook} />
</a>
</div>
<div className="lin">
<a href="https://www.linkedin.com/in/kananmikayilov/">
<FontAwesomeIcon icon={faLinkedin} />
</a>
</div>
<div className="git"> | </div>
</div>
</div>
);
};
export default Footer; | <a href="https://github.com/kmikayilov">
<FontAwesomeIcon icon={faGithub} />
</a> |
kmall_player.test.py | import unittest
import pandas as pd
import os
from kmall_player import *
class KmallPlayerTest(unittest.TestCase):
def setUp(self) -> None:
file_name = "data/MRZ_LARGE_SIZE.kmall"
self.f = open(file_name, "rb")
self.file_size = os.fstat(self.f.fileno()).st_size
self.player = KmallPlayer()
k = KMALL.kmall(file_name)
k.index_file()
# Panda DataFrame type | self.f.close()
def test_packet(self):
self.assertEqual(self.index.shape[0], 1)
self.assertTrue(self.mrz_pack['MessageSize'] > self.player.MAX_DATAGRAM_SIZE)
self.assertTrue('#MRZ' in self.mrz_pack['MessageType'])
def test_raw_header_reading(self):
header_dict = self.player.read_header_raw(self.f.read(self.player.HEADER_STRUCT_SIZE))
# Our test file contains only one packet
self.assertEqual(header_dict['numBytesDgm'], self.file_size)
self.assertTrue('#MRZ' in str(header_dict['dgmType']))
def test_partitionning(self):
msgs = self.player.partition_msg(self.f.read(self.mrz_pack['MessageSize']))
# Expecting 2 partitions
self.assertEqual(len(msgs), 2)
# Let's check the newly generated header content for our splits :
# First split should be of maximum size
self.assertEqual(self.player.read_header_raw(msgs[0])['numBytesDgm'], self.player.MAX_DATAGRAM_SIZE)
# Second and last split should take up the rest
last_packet_content_size = (self.file_size - self.player.HEADER_AND_PART_SIZE - 4)\
% self.player.MAX_DATA_SIZE
last_packet_size = last_packet_content_size + self.player.HEADER_AND_PART_SIZE + 4
self.assertEqual(self.player.read_header_raw(msgs[1])['numBytesDgm'], last_packet_size)
# Run tests
unittest.main() | self.index: pd.DataFrame = k.Index
self.mrz_pack = self.index.iloc[0]
def tearDown(self) -> None: |
Matching.ts | import { GraphAlgorithm, Step } from "../../GraphAlgorithm";
import { AdjacencyList, hasMultipleEdges, hasSelfLoop, Edge, Graph, Node, NodeEdgeList } from "../../GraphStructure";
import { Queue } from "../../utils/DataStructure";
import { EdgeRenderHint, NodeRenderHint } from "@/pages/graph-editor/ui/CanvasGraphRenderer";
class | extends GraphAlgorithm {
// constructor() {
// super("Gabow", "Gabow algorithm for Maximum Matching in General Graph");
// }
id() {
return "mm_gabow";
}
nodeRenderPatcher(): Partial<NodeRenderHint> {
return {
borderColor: node => (node.datum.label === 0 ? "#333333" : node.datum.label === 1 ? "#77ff77" : "#7777ff"),
fillingColor: node => (node.datum.label === 0 ? "#cccccc" : "#ffffff"),
floatingData: node => node.id.toString()
};
}
edgeRenderPatcher(): Partial<EdgeRenderHint> {
return {
thickness: edge => (edge.datum.matched || edge.datum.marked ? 5 : 3),
color: edge => {
if (edge.datum.matched) return "#333333";
if (edge.datum.marked) return "#ff3333";
return "#cccccc";
}
};
}
private n: number = 0;
private nodes: Node[] = [];
private edges: Edge[] = [];
private adjlist: AdjacencyList;
private matched: number = 0;
private mark: number[] = [];
private match: number[] = [];
private label: number[] = [];
private path: number[][] = [];
private first: number[] = [];
private visit: boolean[] = [];
private que: Queue<number> = new Queue<number>();
clear(buf: any[], val: any = -1, cnt: number = this.n) {
for (let _ = 0; _ < cnt; ++_) buf[_] = val;
}
reverse(buf: any[], l: number = 0, r: number = buf.length) {
for (let i = l, j = r - 1; i < j; ++i, --j) {
let tmp = buf[i];
(buf[i] = buf[j]), (buf[j] = tmp);
}
}
gen1(p: number, x: number, z: number) {
this.path[z] = [-1];
this.path[z].push(z);
this.path[z].push(this.match[z]);
for (let i = 1; ; ++i) {
this.path[z].push(this.path[x][i]);
if (this.path[x][i] === p) break;
}
}
gen2(p: number, y: number, z: number, t: number) {
this.path[t] = [-1];
for (let i = 1; ; ++i) {
this.path[t].push(this.path[y][i]);
if (this.path[y][i] === t) break;
}
this.reverse(this.path[t], 1);
for (let i = 1; ; ++i) {
this.path[t].push(this.path[z][i]);
if (this.path[z][i] === p) break;
}
}
is_matched(e: Edge): boolean {
return this.match[e.source] === e.target;
}
is_marked(e: Edge): boolean {
return this.mark[e.source] === e.target;
}
report(): NodeEdgeList {
this.nodes.forEach((node, i) =>
Object.assign(node.datum, {
match: this.match[i],
label: this.label[i],
first: this.first[i]
})
);
this.edges.forEach(edge =>
Object.assign(edge.datum, {
marked: this.is_marked(edge),
matched: this.is_matched(edge)
})
);
this.clear(this.mark);
return new NodeEdgeList(this.nodes, this.edges);
}
getStep(lineId: number): Step {
return {
graph: this.report(),
codePosition: new Map<string, number>([["pseudo", lineId]]),
extraData: [
["$matched$", "number", this.matched],
["$first$", "array", this.first]
]
};
}
*rematch(p: number, x: number, y: number) {
this.path[x][0] = y;
// path[x] is the augmenting path to be fliped
for (let i = 0; ; ++i) {
this.mark[this.path[x][i]] = this.path[x][i ^ 1];
if (this.path[x][i] === p) break;
}
yield this.getStep(25); // found augmenting path
for (let i = 0; ; ++i) {
this.match[this.path[x][i]] = this.path[x][i ^ 1];
if (this.path[x][i] === p) break;
}
yield this.getStep(27); // augmented
}
next(pos: number): number {
return this.first[this.path[this.match[pos]][3]];
}
*check(pos: number) {
this.clear(this.label, 0), this.clear(this.first);
this.clear(this.path, []);
this.que.clear();
this.que.push(pos);
(this.path[pos] = [-1]), this.path[pos].push(pos);
this.label[pos] = 2;
while (!this.que.empty()) {
let x = this.que.front();
this.que.pop();
for (let e of this.adjlist.adjacentEdges(x)) {
let y = e.target;
if (this.label[y] === 0) {
if (this.match[y] === -1) {
// find an augmenting path
yield* this.rematch(pos, x, y);
return true;
}
let z = this.match[y];
(this.label[y] = 1), (this.label[z] = 2);
this.first[z] = y;
this.que.push(z);
this.gen1(pos, x, z);
} else if (this.label[y] === 2) {
if (this.first[x] === this.first[y]) continue;
let t = -1;
this.clear(this.visit, false);
for (let j = this.first[x]; j !== -1; j = this.next(j)) this.visit[j] = true;
for (let j = this.first[y]; j !== -1; j = this.next(j)) {
if (this.visit[j]) {
t = j;
break;
}
}
for (let j = this.first[x]; j !== t; j = this.next(j)) {
this.gen2(pos, x, y, j);
this.label[j] = 2;
this.que.push(j);
this.first[j] = t;
}
for (let j = this.first[y]; j !== t; j = this.next(j)) {
this.gen2(pos, y, x, j);
this.label[j] = 2;
this.que.push(j);
this.first[j] = t;
}
for (let j = 0; j < this.n; ++j)
if (this.label[j] === 2 && this.label[this.first[j]] === 2) this.first[j] = t;
}
}
}
return false;
}
*run(graph: Graph): Generator<Step> {
if (hasSelfLoop(graph)) throw new Error("algo Gabow : self loop");
this.adjlist = AdjacencyList.from(graph, false);
if (hasMultipleEdges(this.adjlist)) throw new Error("algo Gabow : mutiple edges");
(this.edges = graph.edges()), (this.nodes = graph.nodes()), (this.n = this.nodes.length);
this.matched = 0;
yield this.getStep(23); // inited
this.clear(this.match), this.clear(this.mark);
for (let i = 0; i < this.n; ++i) if (this.match[i] === -1 && (yield* this.check(i))) ++this.matched;
//console.log(`algo Gabow : {matched: ${res}}`);
yield this.getStep(28); // return
return { matched: this.matched };
}
}
export { Gabow };
/*
Reference:
void PROC_GEN1(int u,int x,int z){
path[z][1] = z;
path[z][2] = mate[z];
for(int i = 1;;++i){
path[z][i + 2] = path[x][i];
if(path[x][i] == u) return;
}
}//path(z)็็ฌฌไธไธช็นไธบz๏ผ็ฌฌไบไธช็นไธบmate(z)๏ผๅ
ถไฝ้จๅไธบpath(x)
void PROC_GEN2(int u,int y,int z,int p){
int i,j;
for(i = 1;;++i){
path[p][i] = path[y][i];
if(path[y][i] == p) break;
}
for(j = 1;j < i + 1 - j;++j) swap(path[p][j],path[p][i + 1 - j]);
//ๅ
ๅฐy~p่ฟไธๆฎต่ทฏๅพๆท่ดไธๆฅ๏ผๅ็ฟป่ฝฌ่ฟๆก่ทฏๅพ
for(j = 1;;++j){
path[p][j + i] = path[z][j];
if(path[z][j] == u) return;
}
}//path(p)็ๅๅ้จๅไธบpath(y)็ๅๅ้จๅ(ไปyๅฐp่ฟไธๆฎต)็้ๅบ๏ผๅๅ้จๅไธบpath(z)
void PROC_REMATCH(int u,int x,int y){
path[x][0] = y;//ๆญคๆถ๏ผpath(x)ไธๆ ไป0ๅผๅง๏ผๅญๆพไบไธๆกไปyๅฐu็ๅขๅนฟ่ทฏ
for(int i = 0;;++i){
mate[path[x][i]] = path[x][i ^ 1];
//่ฟไธๅฅ็ไฝ็จๆฏๅฐๅ่ฟฐๅขๅนฟ่ทฏไธญ็ธ้ปไธค็น่ฟ่ก้
ๅฏน
//ๅ
ทไฝๆๆไธบ:math[x][0]ไธmath[x][1]้
ๅฏน๏ผmath[x][2]ไธmath[x][3]้
ๅฏนโฆโฆ
if(path[x][i] == u) return;
}
}
bool PROC_FIND(int u){
int i,j,x,y,z,join;
for(i = 1;i <= n;++i) label[i] = path[i][0] = path[i][1] = path[i][2] = path[i][3] = first[i] = 0;
h = t = 0;//ไปฅไธไธบๅๅงๅ
queue[++t] = u;path[u][1] = u;label[u] = 2;
while(h < t){//้่ฟbfsๆ็ดข่ทฏๅพ
x = queue[++h];
for(i = fir[x];i;i = e[i].nxt){
y = e[i].to;//่ฎฟ้ฎ่พน(x,y)
if(!label[y]){
if(!mate[y]){//ๆ
ๅต1๏ผyๆฏ้้ฅฑๅ็น
PROC_REMATCH(u,x,y);//ๆพๅฐไบๅขๅนฟ่ทฏ๏ผ็ซๅป่ฟ่กๅขๅนฟ
return 1;
}
//ๆ
ๅต2๏ผyๆฏๅฐๆช่ขซๆข็ดข็้ฅฑๅ็น
//ๆญคๆถๆๆถๅฐy็ฝฎไธบๅฅ็น๏ผๅนถๅฐไธๅ
ถ้
ๅฏน็็นz็ฝฎไธบๅถ็น
label[y] = 1;
z = mate[y];
queue[++t] = z;label[z] = 2;first[z] = y;
PROC_GEN1(u,x,z);//็ๆzๅฐๆ น็ๅถไบคไบ่ทฏ
}
else if(label[y] == 2){//ๆ
ๅต3๏ผyๆฏๅทฒ่ขซๆข็ดข่ฟ็ๅถ็น๏ผๆญคๆถๆพๅฐไบไธไธชๆ ่ฑ
if(first[x] == first[y]) continue;//่ฟ็งๆ
ๅตไธๆ ่ฑไธญไธๅญๅจๅฅ็น๏ผๅฏไปฅ็ดๆฅ่ทณ่ฟ
join = 0;
for(j = first[x];j;j = first[path[mate[j]][3]]) visit[j] = 1;
for(j = first[y];j;j = first[path[mate[j]][3]]) if(visit[j]){
join = j;break;
}
for(j = first[x];j;j = first[path[mate[j]][3]]) visit[j] = 0;
//ไธ่ฟฐไปฃ็ ็จไบๆพๅฐjoin
//ๅ
ถๅ็ไธบ๏ผไปxๅผๅง๏ผๆฏๆฌกๆพๅฐๅถไบคไบ่ทฏไธ็ไธไธไธชๅฅ็นๅนถๆ ่ฎฐ๏ผ
//ๅไปyๅผๅงๆฏๆฌกๆพๅฐๅถไบคไบ่ทฏไธ็ไธไธไธชๅฅ็น๏ผๆพๅฐ็็ฌฌไธไธช่ขซๆ ่ฎฐ็็นๅฐฑๆฏjoin
//ๅฐx~join่ฟไธๆฎต่ทฏไธๅๆฌ็ๅฅ็นไฟฎๆนไธบๅถ็น๏ผๅนถ็ๆๅ
ถๅฏนๅบ็ๅถไบคไบ่ทฏ
for(j = first[x];j != join;j = first[path[mate[j]][3]]){
PROC_GEN2(u,x,y,j);
label[j] = 2;queue[++t] = j;first[j] = join;
}
//ๅฐy~join่ฟไธๆฎต่ทฏไธๅๆฌ็ๅฅ็นไฟฎๆนไธบๅถ็น๏ผๅนถ็ๆๅ
ถๅฏนๅบ็ๅถไบคไบ่ทฏ
for(j = first[y];j != join;j = first[path[mate[j]][3]]){
PROC_GEN2(u,y,x,j);
label[j] = 2;queue[++t] = j;first[j] = join;
}
//ไฟฎๆนfirst็ๅผ๏ผ่ฅๆไธชๅถ็น็firstๅๆไบๅถ็น๏ผๅฐฑ่ฆๆนไธบjoin
for(j = 1;j <= n;++j) if(label[j] == 2 && label[first[j]] == 2) first[j] = join;
}
}
}
return 0;
}
*/
| Gabow |
FeedbackBlock.tsx | import React from 'react';
import { Grid, Typography, makeStyles } from '@material-ui/core';
import { Favorite as FavoriteIcon } from '@material-ui/icons';
import UserAvatar from 'app/components/Avatar/Avatar';
import { mockTutor } from 'types/Tutor';
import { getUserName } from 'helpers';
import RatingBlock from './RatingBlock';
function | (): JSX.Element {
const classes = useStyles();
const tutor = mockTutor;
const name = getUserName(tutor);
return (
<Grid container className={classes.container}>
<Grid item className={classes.avatar}>
<UserAvatar
size={7}
user={tutor}
icon={<FavoriteIcon className={classes.icon} />}
/>
</Grid>
<Grid container direction="column" className={classes.comment}>
<Typography component="p" className={classes.name}>
{name}
</Typography>
<Typography component="p">
I enjoyed my first session very much. I found Mr Rudi is kind and
sincere. Hopefully I can continue learning from him.
</Typography>
<Grid item container justify="space-around" className={classes.rating}>
<RatingBlock />
</Grid>
</Grid>
</Grid>
);
}
export default FeedbackBlock;
const useStyles = makeStyles(theme => ({
container: {
marginTop: theme.spacing(2),
flexWrap: 'nowrap',
},
avatar: {
marginRight: theme.spacing(2),
},
comment: {
backgroundColor: theme.palette.background.white,
padding: 12,
},
name: {
fontWeight: 'bold',
marginBottom: theme.spacing(1),
},
rating: {
textTransform: 'uppercase',
marginTop: theme.spacing(2),
},
icon: {
color: 'red',
},
}));
| FeedbackBlock |
shared.rs | // adapted from https://github.com/bodil/im-rs/blob/10.2.0/src/shared.rs
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//! Automatic `Arc` wrapping.
use alloc::sync::Arc;
/// # Automatic `Arc` wrapping
///
/// The `Shared` trait provides automatic wrapping for things which
/// take [`Arc`][alloc::sync::Arc]s, meaning that anything which takes
/// an argument of type `Shared<A>` will accept either an `A` or an
/// `Arc<A>`.
///
/// Because everything stored in `im`'s persistent data structures is
/// wrapped in [`Arc`][alloc::sync::Arc]s, `Shared` makes you have to
/// worry less about whether what you've got is an `A` or an `Arc<A>`
/// or a reference to such - the compiler will just figure it out for
/// you, which is as it should be.
///
/// [alloc::sync::Arc]: https://doc.rust-lang.org/alloc/sync/struct.Arc.html
pub trait Shared<A> {
/// Get a new Arc pointer for this value
fn shared(self) -> Arc<A>;
}
impl<A> Shared<A> for A {
fn shared(self) -> Arc<A> { Arc::new(self) }
} |
impl<'a, A> Shared<A> for &'a A
where A: Clone
{
fn shared(self) -> Arc<A> { Arc::new(self.clone()) }
}
impl<A> Shared<A> for Arc<A> {
fn shared(self) -> Arc<A> { self }
}
impl<'a, A> Shared<A> for &'a Arc<A> {
fn shared(self) -> Arc<A> { self.clone() }
} | |
run_align.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Modifications copyright (C) 2020 Zi-Yi Dou
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
import itertools
import os
import numpy as np
import torch
from tqdm import trange
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, SequentialSampler
import modeling
from configuration_bert import BertConfig
from modeling import BertForMaskedLM
from tokenization_bert import BertTokenizer
from tokenization_utils import PreTrainedTokenizer
from modeling_utils import PreTrainedModel
def | (args):
if args.seed >= 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path):
assert os.path.isfile(file_path)
print('Loading the dataset...')
self.examples = []
with open(file_path, encoding="utf-8") as f:
for idx, line in enumerate(f.readlines()):
if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:
raise ValueError(f'Line {idx+1} is not in the correct format!')
src, tgt = line.split(' ||| ')
if src.rstrip() == '' or tgt.rstrip() == '':
raise ValueError(f'Line {idx+1} is not in the correct format!')
sent_src, sent_tgt = src.strip().split(), tgt.strip().split()
token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]
wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]
ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids']
bpe2word_map_src = []
for i, word_list in enumerate(token_src):
bpe2word_map_src += [i for x in word_list]
bpe2word_map_tgt = []
for i, word_list in enumerate(token_tgt):
bpe2word_map_tgt += [i for x in word_list]
self.examples.append( (ids_src[0], ids_tgt[0], bpe2word_map_src, bpe2word_map_tgt) )
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
def word_align(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, output_word_alignments = False):
def collate(examples):
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = zip(*examples)
ids_src = pad_sequence(ids_src, batch_first=True, padding_value=tokenizer.pad_token_id)
ids_tgt = pad_sequence(ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id)
return ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt
dataset = LineByLineTextDataset(tokenizer, args, file_path=args.data_file)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate
)
model.to(args.device)
model.eval()
tqdm_iterator = trange(dataset.__len__(), desc="Extracting")
with open(args.output_file, 'w') as writer:
for batch in dataloader:
with torch.no_grad():
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = batch
word_aligns_list = model.get_aligned_word(ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt, args.device, 0, 0, align_layer=args.align_layer, extraction=args.extraction, softmax_threshold=args.softmax_threshold, test=True)
for word_aligns in word_aligns_list:
output_str = []
for word_align in word_aligns:
output_str.append(f'{word_align[0]}-{word_align[1]}')
writer.write(' '.join(output_str)+'\n')
tqdm_iterator.update(len(ids_src))
if output_word_alignments:
with open(args.output_file, 'r') as fh:
outputf = (fh.read()).split("\n")
with open(args.data_file, 'r') as fh:
datalines = (fh.read()).split("\n")
with open(args.output_file+".outtxt", 'w') as fwriter:
for indices, line in zip(outputf, datalines):
srcline, tgtline = line.split(' ||| ')
indices = indices.split()
srcwrds = srcline.split()
tgtwrds = tgtline.split()
output_wrds = []
for wrd in indices:
srcix,tgtix = wrd.split("-")
srcix, tgtix = int(srcix), int(tgtix)
output_wrds.append(f"{srcwrds[srcix]}-{tgtwrds[tgtix]}")
fwriter.write(' '.join(output_wrds)+'\n')
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument(
"--output_file",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--align_layer", type=int, default=8, help="layer for alignment extraction")
parser.add_argument(
"--extraction", default='softmax', type=str, help='softmax or entmax15'
)
parser.add_argument(
"--softmax_threshold", type=float, default=0.001
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--config_name",
default=None,
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.",
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument(
"--cache_dir",
default='cache_dir',
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.device = device
# Set seed
set_seed(args)
config_class, model_class, tokenizer_class = BertConfig, BertForMaskedLM, BertTokenizer
if args.config_name:
config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
config = config_class()
if args.tokenizer_name:
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
raise ValueError(
"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name".format(tokenizer_class.__name__)
)
modeling.PAD_ID = tokenizer.pad_token_id
modeling.CLS_ID = tokenizer.cls_token_id
modeling.SEP_ID = tokenizer.sep_token_id
if args.model_name_or_path:
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
else:
model = model_class(config=config)
word_align(args, model, tokenizer)
if __name__ == "__main__":
main()
| set_seed |
getLoggingUnifiedAgentConfiguration.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package oci
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// This data source provides details about a specific Unified Agent Configuration resource in Oracle Cloud Infrastructure Logging service.
//
// Get the unified agent configuration for an ID.
//
// ## Example Usage
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-oci/sdk/go/oci"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := oci.GetLoggingUnifiedAgentConfiguration(ctx, &GetLoggingUnifiedAgentConfigurationArgs{
// UnifiedAgentConfigurationId: oci_logging_unified_agent_configuration.Test_unified_agent_configuration.Id,
// }, nil)
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
func | (ctx *pulumi.Context, args *LookupLoggingUnifiedAgentConfigurationArgs, opts ...pulumi.InvokeOption) (*LookupLoggingUnifiedAgentConfigurationResult, error) {
var rv LookupLoggingUnifiedAgentConfigurationResult
err := ctx.Invoke("oci:index/getLoggingUnifiedAgentConfiguration:GetLoggingUnifiedAgentConfiguration", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
}
// A collection of arguments for invoking GetLoggingUnifiedAgentConfiguration.
type LookupLoggingUnifiedAgentConfigurationArgs struct {
// The OCID of the Unified Agent configuration.
UnifiedAgentConfigurationId string `pulumi:"unifiedAgentConfigurationId"`
}
// A collection of values returned by GetLoggingUnifiedAgentConfiguration.
type LookupLoggingUnifiedAgentConfigurationResult struct {
// The OCID of the compartment that the resource belongs to.
CompartmentId string `pulumi:"compartmentId"`
// State of unified agent service configuration.
ConfigurationState string `pulumi:"configurationState"`
// Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
DefinedTags map[string]interface{} `pulumi:"definedTags"`
// Description for this resource.
Description string `pulumi:"description"`
// The user-friendly display name. This must be unique within the enclosing resource, and it's changeable. Avoid entering confidential information.
DisplayName string `pulumi:"displayName"`
// Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
FreeformTags map[string]interface{} `pulumi:"freeformTags"`
// Groups using the configuration.
GroupAssociation GetLoggingUnifiedAgentConfigurationGroupAssociation `pulumi:"groupAssociation"`
// The OCID of the resource.
Id string `pulumi:"id"`
// Whether or not this resource is currently enabled.
IsEnabled bool `pulumi:"isEnabled"`
// Top level Unified Agent service configuration object.
ServiceConfiguration GetLoggingUnifiedAgentConfigurationServiceConfiguration `pulumi:"serviceConfiguration"`
// The pipeline state.
State string `pulumi:"state"`
// Time the resource was created.
TimeCreated string `pulumi:"timeCreated"`
// Time the resource was last modified.
TimeLastModified string `pulumi:"timeLastModified"`
UnifiedAgentConfigurationId string `pulumi:"unifiedAgentConfigurationId"`
}
| LookupLoggingUnifiedAgentConfiguration |
check_button.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::Accessible;
use crate::AccessibleRole;
use crate::Actionable;
use crate::Align;
use crate::Buildable;
use crate::ConstraintTarget;
use crate::LayoutManager;
use crate::Overflow;
use crate::Widget;
use glib::object::Cast;
use glib::object::IsA;
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
use glib::object::ObjectExt;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
pub struct CheckButton(Object<ffi::GtkCheckButton, ffi::GtkCheckButtonClass>) @extends Widget, @implements Accessible, Buildable, ConstraintTarget, Actionable;
match fn {
type_ => || ffi::gtk_check_button_get_type(),
}
}
impl CheckButton {
#[doc(alias = "gtk_check_button_new")]
pub fn new() -> CheckButton {
assert_initialized_main_thread!();
unsafe { Widget::from_glib_none(ffi::gtk_check_button_new()).unsafe_cast() }
}
#[doc(alias = "gtk_check_button_new_with_label")]
#[doc(alias = "new_with_label")]
pub fn with_label(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_label(label.to_glib_none().0))
.unsafe_cast()
}
}
#[doc(alias = "gtk_check_button_new_with_mnemonic")]
#[doc(alias = "new_with_mnemonic")]
pub fn with_mnemonic(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_mnemonic(
label.to_glib_none().0,
))
.unsafe_cast()
}
}
// rustdoc-stripper-ignore-next
/// Creates a new builder-style object to construct a [`CheckButton`]
/// This method returns an instance of [`CheckButtonBuilder`] which can be used to create a [`CheckButton`].
pub fn builder() -> CheckButtonBuilder {
CheckButtonBuilder::default()
}
}
impl Default for CheckButton {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone, Default)]
// rustdoc-stripper-ignore-next
/// A builder for generating a [`CheckButton`].
pub struct CheckButtonBuilder {
active: Option<bool>,
group: Option<CheckButton>,
inconsistent: Option<bool>,
label: Option<String>,
use_underline: Option<bool>,
can_focus: Option<bool>,
can_target: Option<bool>,
css_classes: Option<Vec<String>>,
css_name: Option<String>,
cursor: Option<gdk::Cursor>,
focus_on_click: Option<bool>,
focusable: Option<bool>,
halign: Option<Align>,
has_tooltip: Option<bool>,
height_request: Option<i32>,
hexpand: Option<bool>,
hexpand_set: Option<bool>,
layout_manager: Option<LayoutManager>,
margin_bottom: Option<i32>,
margin_end: Option<i32>,
margin_start: Option<i32>,
margin_top: Option<i32>,
name: Option<String>,
opacity: Option<f64>,
overflow: Option<Overflow>,
receives_default: Option<bool>,
sensitive: Option<bool>,
tooltip_markup: Option<String>,
tooltip_text: Option<String>,
valign: Option<Align>,
vexpand: Option<bool>,
vexpand_set: Option<bool>,
visible: Option<bool>,
width_request: Option<i32>,
accessible_role: Option<AccessibleRole>,
action_name: Option<String>,
action_target: Option<glib::Variant>,
}
impl CheckButtonBuilder {
// rustdoc-stripper-ignore-next
/// Create a new [`CheckButtonBuilder`].
pub fn new() -> Self {
Self::default()
}
// rustdoc-stripper-ignore-next
/// Build the [`CheckButton`].
pub fn build(self) -> CheckButton {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref active) = self.active {
properties.push(("active", active));
}
if let Some(ref group) = self.group {
properties.push(("group", group));
}
if let Some(ref inconsistent) = self.inconsistent {
properties.push(("inconsistent", inconsistent));
}
if let Some(ref label) = self.label {
properties.push(("label", label));
}
if let Some(ref use_underline) = self.use_underline {
properties.push(("use-underline", use_underline));
}
if let Some(ref can_focus) = self.can_focus {
properties.push(("can-focus", can_focus));
}
if let Some(ref can_target) = self.can_target {
properties.push(("can-target", can_target));
}
if let Some(ref css_classes) = self.css_classes {
properties.push(("css-classes", css_classes));
}
if let Some(ref css_name) = self.css_name {
properties.push(("css-name", css_name));
}
if let Some(ref cursor) = self.cursor {
properties.push(("cursor", cursor));
}
if let Some(ref focus_on_click) = self.focus_on_click {
properties.push(("focus-on-click", focus_on_click));
}
if let Some(ref focusable) = self.focusable {
properties.push(("focusable", focusable));
}
if let Some(ref halign) = self.halign {
properties.push(("halign", halign));
}
if let Some(ref has_tooltip) = self.has_tooltip {
properties.push(("has-tooltip", has_tooltip));
}
if let Some(ref height_request) = self.height_request {
properties.push(("height-request", height_request));
}
if let Some(ref hexpand) = self.hexpand {
properties.push(("hexpand", hexpand));
}
if let Some(ref hexpand_set) = self.hexpand_set {
properties.push(("hexpand-set", hexpand_set));
}
if let Some(ref layout_manager) = self.layout_manager {
properties.push(("layout-manager", layout_manager));
}
if let Some(ref margin_bottom) = self.margin_bottom {
properties.push(("margin-bottom", margin_bottom));
}
if let Some(ref margin_end) = self.margin_end {
properties.push(("margin-end", margin_end));
}
if let Some(ref margin_start) = self.margin_start {
properties.push(("margin-start", margin_start));
}
if let Some(ref margin_top) = self.margin_top {
properties.push(("margin-top", margin_top));
}
if let Some(ref name) = self.name {
properties.push(("name", name));
}
if let Some(ref opacity) = self.opacity {
properties.push(("opacity", opacity));
}
if let Some(ref overflow) = self.overflow {
properties.push(("overflow", overflow));
}
if let Some(ref receives_default) = self.receives_default {
properties.push(("receives-default", receives_default));
}
if let Some(ref sensitive) = self.sensitive {
properties.push(("sensitive", sensitive));
}
if let Some(ref tooltip_markup) = self.tooltip_markup {
properties.push(("tooltip-markup", tooltip_markup));
}
if let Some(ref tooltip_text) = self.tooltip_text {
properties.push(("tooltip-text", tooltip_text));
}
if let Some(ref valign) = self.valign {
properties.push(("valign", valign));
}
if let Some(ref vexpand) = self.vexpand {
properties.push(("vexpand", vexpand));
}
if let Some(ref vexpand_set) = self.vexpand_set {
properties.push(("vexpand-set", vexpand_set));
}
if let Some(ref visible) = self.visible {
properties.push(("visible", visible));
}
if let Some(ref width_request) = self.width_request {
properties.push(("width-request", width_request));
}
if let Some(ref accessible_role) = self.accessible_role {
properties.push(("accessible-role", accessible_role));
}
if let Some(ref action_name) = self.action_name {
properties.push(("action-name", action_name));
}
if let Some(ref action_target) = self.action_target {
properties.push(("action-target", action_target));
}
glib::Object::new::<CheckButton>(&properties)
.expect("Failed to create an instance of CheckButton")
}
pub fn active(mut self, active: bool) -> Self {
self.active = Some(active);
self
}
pub fn group<P: IsA<CheckButton>>(mut self, group: &P) -> Self {
self.group = Some(group.clone().upcast());
self
}
pub fn inconsistent(mut self, inconsistent: bool) -> Self {
self.inconsistent = Some(inconsistent);
self | self.label = Some(label.to_string());
self
}
pub fn use_underline(mut self, use_underline: bool) -> Self {
self.use_underline = Some(use_underline);
self
}
pub fn can_focus(mut self, can_focus: bool) -> Self {
self.can_focus = Some(can_focus);
self
}
pub fn can_target(mut self, can_target: bool) -> Self {
self.can_target = Some(can_target);
self
}
pub fn css_classes(mut self, css_classes: Vec<String>) -> Self {
self.css_classes = Some(css_classes);
self
}
pub fn css_name(mut self, css_name: &str) -> Self {
self.css_name = Some(css_name.to_string());
self
}
pub fn cursor(mut self, cursor: &gdk::Cursor) -> Self {
self.cursor = Some(cursor.clone());
self
}
pub fn focus_on_click(mut self, focus_on_click: bool) -> Self {
self.focus_on_click = Some(focus_on_click);
self
}
pub fn focusable(mut self, focusable: bool) -> Self {
self.focusable = Some(focusable);
self
}
pub fn halign(mut self, halign: Align) -> Self {
self.halign = Some(halign);
self
}
pub fn has_tooltip(mut self, has_tooltip: bool) -> Self {
self.has_tooltip = Some(has_tooltip);
self
}
pub fn height_request(mut self, height_request: i32) -> Self {
self.height_request = Some(height_request);
self
}
pub fn hexpand(mut self, hexpand: bool) -> Self {
self.hexpand = Some(hexpand);
self
}
pub fn hexpand_set(mut self, hexpand_set: bool) -> Self {
self.hexpand_set = Some(hexpand_set);
self
}
pub fn layout_manager<P: IsA<LayoutManager>>(mut self, layout_manager: &P) -> Self {
self.layout_manager = Some(layout_manager.clone().upcast());
self
}
pub fn margin_bottom(mut self, margin_bottom: i32) -> Self {
self.margin_bottom = Some(margin_bottom);
self
}
pub fn margin_end(mut self, margin_end: i32) -> Self {
self.margin_end = Some(margin_end);
self
}
pub fn margin_start(mut self, margin_start: i32) -> Self {
self.margin_start = Some(margin_start);
self
}
pub fn margin_top(mut self, margin_top: i32) -> Self {
self.margin_top = Some(margin_top);
self
}
pub fn name(mut self, name: &str) -> Self {
self.name = Some(name.to_string());
self
}
pub fn opacity(mut self, opacity: f64) -> Self {
self.opacity = Some(opacity);
self
}
pub fn overflow(mut self, overflow: Overflow) -> Self {
self.overflow = Some(overflow);
self
}
pub fn receives_default(mut self, receives_default: bool) -> Self {
self.receives_default = Some(receives_default);
self
}
pub fn sensitive(mut self, sensitive: bool) -> Self {
self.sensitive = Some(sensitive);
self
}
pub fn tooltip_markup(mut self, tooltip_markup: &str) -> Self {
self.tooltip_markup = Some(tooltip_markup.to_string());
self
}
pub fn tooltip_text(mut self, tooltip_text: &str) -> Self {
self.tooltip_text = Some(tooltip_text.to_string());
self
}
pub fn valign(mut self, valign: Align) -> Self {
self.valign = Some(valign);
self
}
pub fn vexpand(mut self, vexpand: bool) -> Self {
self.vexpand = Some(vexpand);
self
}
pub fn vexpand_set(mut self, vexpand_set: bool) -> Self {
self.vexpand_set = Some(vexpand_set);
self
}
pub fn visible(mut self, visible: bool) -> Self {
self.visible = Some(visible);
self
}
pub fn width_request(mut self, width_request: i32) -> Self {
self.width_request = Some(width_request);
self
}
pub fn accessible_role(mut self, accessible_role: AccessibleRole) -> Self {
self.accessible_role = Some(accessible_role);
self
}
pub fn action_name(mut self, action_name: &str) -> Self {
self.action_name = Some(action_name.to_string());
self
}
pub fn action_target(mut self, action_target: &glib::Variant) -> Self {
self.action_target = Some(action_target.clone());
self
}
}
pub const NONE_CHECK_BUTTON: Option<&CheckButton> = None;
pub trait CheckButtonExt: 'static {
#[doc(alias = "gtk_check_button_get_active")]
#[doc(alias = "get_active")]
fn is_active(&self) -> bool;
#[doc(alias = "gtk_check_button_get_inconsistent")]
#[doc(alias = "get_inconsistent")]
fn is_inconsistent(&self) -> bool;
#[doc(alias = "gtk_check_button_get_label")]
#[doc(alias = "get_label")]
fn label(&self) -> Option<glib::GString>;
#[doc(alias = "gtk_check_button_get_use_underline")]
#[doc(alias = "get_use_underline")]
fn uses_underline(&self) -> bool;
#[doc(alias = "gtk_check_button_set_active")]
fn set_active(&self, setting: bool);
#[doc(alias = "gtk_check_button_set_group")]
fn set_group<P: IsA<CheckButton>>(&self, group: Option<&P>);
#[doc(alias = "gtk_check_button_set_inconsistent")]
fn set_inconsistent(&self, inconsistent: bool);
#[doc(alias = "gtk_check_button_set_label")]
fn set_label(&self, label: Option<&str>);
#[doc(alias = "gtk_check_button_set_use_underline")]
fn set_use_underline(&self, setting: bool);
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
#[doc(alias = "activate")]
fn connect_activate<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
fn emit_activate(&self);
#[doc(alias = "toggled")]
fn connect_toggled<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "active")]
fn connect_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "group")]
fn connect_group_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "inconsistent")]
fn connect_inconsistent_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "label")]
fn connect_label_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "use-underline")]
fn connect_use_underline_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<CheckButton>> CheckButtonExt for O {
fn is_active(&self) -> bool {
unsafe {
from_glib(ffi::gtk_check_button_get_active(
self.as_ref().to_glib_none().0,
))
}
}
fn is_inconsistent(&self) -> bool {
unsafe {
from_glib(ffi::gtk_check_button_get_inconsistent(
self.as_ref().to_glib_none().0,
))
}
}
fn label(&self) -> Option<glib::GString> {
unsafe {
from_glib_none(ffi::gtk_check_button_get_label(
self.as_ref().to_glib_none().0,
))
}
}
fn uses_underline(&self) -> bool {
unsafe {
from_glib(ffi::gtk_check_button_get_use_underline(
self.as_ref().to_glib_none().0,
))
}
}
fn set_active(&self, setting: bool) {
unsafe {
ffi::gtk_check_button_set_active(self.as_ref().to_glib_none().0, setting.into_glib());
}
}
fn set_group<P: IsA<CheckButton>>(&self, group: Option<&P>) {
unsafe {
ffi::gtk_check_button_set_group(
self.as_ref().to_glib_none().0,
group.map(|p| p.as_ref()).to_glib_none().0,
);
}
}
fn set_inconsistent(&self, inconsistent: bool) {
unsafe {
ffi::gtk_check_button_set_inconsistent(
self.as_ref().to_glib_none().0,
inconsistent.into_glib(),
);
}
}
fn set_label(&self, label: Option<&str>) {
unsafe {
ffi::gtk_check_button_set_label(self.as_ref().to_glib_none().0, label.to_glib_none().0);
}
}
fn set_use_underline(&self, setting: bool) {
unsafe {
ffi::gtk_check_button_set_use_underline(
self.as_ref().to_glib_none().0,
setting.into_glib(),
);
}
}
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
#[doc(alias = "activate")]
fn connect_activate<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn activate_trampoline<P: IsA<CheckButton>, F: Fn(&P) + 'static>(
this: *mut ffi::GtkCheckButton,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&CheckButton::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"activate\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
activate_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
fn emit_activate(&self) {
let _ = unsafe {
glib::Object::from_glib_borrow(self.as_ptr() as *mut glib::gobject_ffi::GObject)
.emit_by_name("activate", &[])
.unwrap()
};
}
#[doc(alias = "toggled")]
fn connect_toggled<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn toggled_trampoline<P: IsA<CheckButton>, F: Fn(&P) + 'static>(
this: *mut ffi::GtkCheckButton,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&CheckButton::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"toggled\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
toggled_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "active")]
fn connect_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_active_trampoline<P: IsA<CheckButton>, F: Fn(&P) + 'static>(
this: *mut ffi::GtkCheckButton,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&CheckButton::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::active\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_active_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "group")]
fn connect_group_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_group_trampoline<P: IsA<CheckButton>, F: Fn(&P) + 'static>(
this: *mut ffi::GtkCheckButton,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&CheckButton::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::group\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_group_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "inconsistent")]
fn connect_inconsistent_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_inconsistent_trampoline<
P: IsA<CheckButton>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::GtkCheckButton,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&CheckButton::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::inconsistent\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_inconsistent_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "label")]
fn connect_label_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_label_trampoline<P: IsA<CheckButton>, F: Fn(&P) + 'static>(
this: *mut ffi::GtkCheckButton,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&CheckButton::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::label\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_label_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "use-underline")]
fn connect_use_underline_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_use_underline_trampoline<
P: IsA<CheckButton>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::GtkCheckButton,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&CheckButton::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::use-underline\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_use_underline_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for CheckButton {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("CheckButton")
}
} | }
pub fn label(mut self, label: &str) -> Self { |
selectionsort.py | #algoritmo utilizado para ordenaรงรฃo de uma lista.
#a cada execuรงรฃo ele percorre toda lista e coloca o menor na posiรงรฃo (n-1)
def encontraMenor(lista):
#armazena o valor do indice 0 a variavel
menorValor = lista[0]
#considera que index zero tem o menor valor
menorIndex = 0
#percorre lista do indice 1 ao ultimo
for i in range(1,len(lista) - 1):
#compra se lista[i] รฉ menor que menor valor e
#se verdadeiro atualiza menorValor e menorIndex
if lista[i] < menorValor:
menorValor = lista[i]
menorIndex = i
#retorna o index do menor valor encontrado
return menorIndex
#funcao que utiliza a funcaencontra menor
#para gerar outra lista ordenada
def ordenaS | :
#lista que receberรก itens ordenados
ordLista = []
#percorre todos elementos da lista
for x in range(len(lista)):
# a cada iteracao encontra menor item e o insere
# na nova lista. Funcao pop armazena o item na nova lista
# e apaga na antiga ao mesmo tempo.
menor = encontraMenor(lista)
ordLista.append(lista.pop(menor))
#retorna nova lista ordenada
return ordLista
#teste programa
lista = [3,1,13,5,0,100]
print(ordenaSelecao(lista))
| elecao(lista) |
success.dto.ts | import { Expose } from 'class-transformer';
export class | {
@Expose()
success: boolean;
}
| SuccessDto |
index.tsx | import { ReactText } from "react";
interface Props{
intervalMs: number,
getData: (url:string)=>Promise<any>
}
class NetworkCheck extends React.Component<{
enqueueSnackbar: (message: SnackbarMessage, options?: OptionsObject) => SnackbarKey;
closeSnackbar: (key?: SnackbarKey) => void;
intervalMs: number,
getData: (url:string)=>Promise<any>
},{}>{
constructor(props:any) {
super(props);
if (this.props.intervalMs) setInterval(this.checkNetwork, this.props.intervalMs);
}
isOfflineMessage:ReactText|null;
checkNetwork = ()=>{
this.props.getData('/me?fields=id').then(()=>{
if (this.isOfflineMessage) {
this.props.closeSnackbar(this.isOfflineMessage);
this.isOfflineMessage = null;
this.props.enqueueSnackbar(`You're back online`, {variant:'success'});
}
}).catch(()=>{
if (!this.isOfflineMessage) this.isOfflineMessage = this.props.enqueueSnackbar(`You're now offline`, {variant:'error', persist: true});
})
}
render(){
return null;
}
}
export default withSnackbar(NetworkCheck) as any as React.ElementType<Props>; | import * as React from 'react'
import { OptionsObject, SnackbarKey, SnackbarMessage, withSnackbar} from "notistack"; |
|
memtest.py | """
Membrane test routines for voltage clamp experiments.
creates abf.MTs[sweep]={} #with keys like Ih, Ra, Rm, etc
Example usage:
abf=swhlab.ABF('../abfs/group/16701010.abf')
swhlab.memtest.memtest(abf) #performs memtest on all sweeps
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
pylab.show()
"""
import os
import sys
import pylab
import numpy as np
import time
import swhlab
import swhlab.core.common as cm
exampleABF=swhlab.ABF()
def memtestSweepVC(abf=exampleABF):
"""
perform memtest on current sweep in VC mode. Return Ih, Ra, Rm, etc.
All variable names are explained in /swhlab/docs/memtest.ppt
"""
if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3:
return "protocol doesn't step down and back up"
TA,TB=int(abf.protoSeqX[1]),int(abf.protoSeqX[2])
dT=int(TB-TA)
T1A=int(TA+.5*dT)
T1B=int(TA+.9*dT)
T2A=T1A+dT
T2B=T1B+dT
P1=np.average(abf.dataY[T1A:T1B])
P2=np.average(abf.dataY[T2A:T2B])
dI=P2-P1
dV=abf.protoSeqY[2]-abf.protoSeqY[1]
PP=np.max(abf.dataY[TB:TB+100])# peak found within first 100 points
TP=np.where(abf.dataY[TB:TB+150]==PP)[0][0]+TB
dP=PP-P1
dTC=PP-P2
PCA=P2+.9*dTC # upper fraction for Cm detection
PCB=P2+.1*dTC # upper fraction for Cm detection
PCtau=P2+.37*dTC # crossing point of theoretical tau
TCA=np.where(abf.dataY[TP:T2A]<PCA)[0][0]+TP
TCB=np.where(abf.dataY[TP:T2A]<PCB)[0][0]+TP
dTCT=TCB-TCA #number of points available for fitting
Ih=P2
Ra=(dV*10**3)/(PP-P2) #MOhm=uV/pA
Rm=(dV*10**3)/(P2-P1) #MOhm=uV/pA
fitM,fitT,fitB,fitTau=cm.fit_exp(abf.dataY[TCA:TCB]) #same units as given
fitTau=fitTau*1000/abf.rate #time constant convert to ms units
Tv=fitTau #time constant of extrinsic voltage clamp
Cm=Tv/Ra*1000 #us/MOhm is pF
Tm=Rm*Cm/1000 #time constant of cell membrane (intrinsic voltage clamp)
del abf
return locals()
def memtestIC(abf=exampleABF):
"""
IC memtest is different. Make an average sweep, then curve fit it.
This only RETURNS the memtest, it does not assign it.
"""
if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3:
return "protocol doesn't step down and back up"
abf.baseline=[abf.protoSeqX[1]/abf.rate*.75,abf.protoSeqX[1]/abf.rate]
T1A,T1B=np.array(abf.baseline)*abf.rate
Xs,Ys,Er=abf.average_sweep()
T2A=abf.protoSeqX[2]-abf.protoSeqX[1]
T2B=abf.protoSeqX[2]
M2=np.average(Ys[T2A:T2B])
MCA=.1*M2 # set 90% here
MCB=.9*M2 # set 10% here
TCA=np.where(Ys<MCA)[0][0]
TCB=np.where(Ys<MCB)[0][0]
m,t,b,tc=cm.fit_exp(Ys[TCA:TCB]) #do the fit!
dI=abs(abf.protoSeqY[2]-abf.protoSeqY[1]) #pA
dV=abs(M2) #mV
Rm=dV/dI*1000 #uV/pA = MOhm
Cm=tc/Rm #ms/MOhm
del abf,Ys,Xs,Er
return locals() #convert to structured array
def memtest(abf=exampleABF,firstSweepOnly=False,plotToo=False,saveToo=True):
"""perform memtest on all sweeps."""
timeStart=time.clock()
if abf.units=="mV":
abf.MTs = memtestIC(abf)
else:
abf.MTs=[None]*abf.sweeps
for sweep in range(abf.sweeps):
abf.setSweep(sweep)
result=memtestSweepVC(abf)
if type(result) is dict:
abf.MTs[abf.currentSweep]=result
else:
print("MEMTEST FAILED - sweep %d -"%sweep,result)
if firstSweepOnly:
return
abf.MTs = cm.matrixfromDicts(abf.MTs) #convert to structured array
took=time.clock()-timeStart
print(" -- memtest performed on %d sweeps in %.02f ms"%(abf.sweeps,took*1000))
if saveToo:
abf.saveThing(abf.MTs,"MTs")
def plot_standard4(abf=exampleABF):
"""make a standard memtest plot showing Ih, Ra, etc. with time."""
if abf.sweeps<2:
return
swhlab.plot.new(abf)
Xs=np.arange(abf.sweeps)*abf.sweepInterval/60
subplots=[221,222,223,224]
features=['Ih','Ra','Rm','Cm']
units=['pA','MOhm','MOhm','pF']
for subplot,feature,unit in zip(subplots,features,units):
pylab.subplot(subplot)
pylab.grid(alpha=.5)
#pylab.title(feature)
pylab.plot(Xs,cm.dictVals(abf.MTs,feature),'.-',alpha=.5)
pylab.xlabel(None)
pylab.ylabel("%s (%s)"%(feature,unit))
swhlab.plot.comments(abf,True)
pylab.margins(0,.1)
def checkSweepIC(abf=exampleABF,sweep=0):
"""Produce an eyeball-ready indication how the MT was calculated in IC."""
_keys = abf.MTs.dtype.names
for key in _keys:
globals()[key]=abf.MTs[key] # only global for this module, that's fine
fitted=cm.algo_exp(np.arange(TCB-TCA),m,t,b)
swhlab.plot.new(abf,forceNewFigure=True)
Xs,Ys,Er=abf.average_sweep()
for subplot in [121,122]:
pylab.subplot(subplot)
pylab.axhline(0,color='b',lw=2,alpha=.5,ls="--")
pylab.axhline(M2,color='b',lw=2,alpha=.5,ls="--")
swhlab.plot.sweep(abf,'all',rainbow=False,color='#CCCCCC',alpha=.5)
pylab.plot(Xs,Ys,color='k',alpha=.5)
pylab.plot(Xs[T1A:T1B],Ys[T1A:T1B],color='b',lw=2)
pylab.plot(Xs[T2A:T2B],Ys[T2A:T2B],color='b',lw=2)
pylab.plot(abf.dataX[TCA:TCB],fitted,color='r',lw=2,ls='--')
pylab.axis([(TCA-100)/abf.rate,(TCB+100)/abf.rate,None,None])
pylab.tight_layout()
msg="tau: %.02f ms\n"%(tc/abf.rate*1000)
msg+="Rm: %.02f MOhm\n"%(Rm)
msg+="Cm: %.02f pF"%(Cm)
pylab.annotate(msg,(.75,.95),ha='left',va='top',weight='bold',family='monospace',
xycoords='figure fraction',size=12,color='g')
swhlab.plot.annotate(abf)
return
def checkSweep(abf=exampleABF,sweep=0):
"""Produce an eyeball-ready indication how the MT was calculated in VC."""
if abf.units=="mV":
return checkSweepIC(abf,sweep)
if abf.MTs[sweep] is None:
return False #no memtest data even found
_keys = abf.MTs[sweep].dtype.names
for key in _keys:
globals()[key]=abf.MTs[sweep][key] # only global for this module, that's fine.
_msg2="Average (n=%d)\n"%abf.sweeps
_msg=""
for i in range(len(_keys)):
_msg+="%s=%s\n"%(_keys[i],abf.MTs[sweep][i])
if _keys[i] in ['Ih','Ra','Rm','Cm','Tv','Tm']:
_msg2+="%s=%.02f\n"%(_keys[i],abf.MTs[sweep][i])
fitted=cm.algo_exp(np.arange(TCB-TCA),fitM,fitT,fitB)
pylab.figure(figsize=(8,8))
for subplot in [211,212]:
pylab.subplot(subplot)
#pylab.plot(abf.dataX,abf.dataY,alpha=.2,color='k',lw=2)
pylab.plot(abf.dataX[:TCA],abf.dataY[:TCA],alpha=.2,color='k',lw=2)
pylab.plot(abf.dataX[TCB:],abf.dataY[TCB:],alpha=.2,color='k',lw=2)
pylab.plot(abf.dataX[TCA:TCB],abf.dataY[TCA:TCB],'o',alpha=.5,lw=4,mfc='none',mec='r')
pylab.plot(abf.dataX[T1A:T1B],abf.dataY[T1A:T1B],alpha=.4,color='b')
pylab.plot(abf.dataX[T2A:T2B],abf.dataY[T2A:T2B],alpha=.4,color='b')
pylab.plot(abf.dataX[TCA:TCB],fitted,color='k',lw=2,ls="--")
for i in [TA, TB]:
pylab.axvline(i/abf.rate,color='k',ls='--',alpha=.4)
for i in [P1,P2]:
pylab.axhline(i,color='b',ls="--",alpha=.5)
for i in [PCA,PCB,PP]:
pylab.axhline(i,color='g',ls="--",alpha=.5)
pylab.tight_layout()
pylab.subplots_adjust(right=0.75)
pylab.annotate(_msg,(.8,.75),ha='left',va='top',alpha=.5,
xycoords='figure fraction',family='monospace',size=10)
pylab.annotate(_msg2,(.8,.95),ha='left',va='top',weight='bold',family='monospace',
xycoords='figure fraction',size=12,color='g')
pylab.subplot(211)
pylab.axis([None,abf.dataX[T2B]+.05,None,None])
pylab.subplot(212)
pylab.axis([(TB-20)/abf.rate,(TCB+20)/abf.rate,P1-20,PP+20])
swhlab.plot.annotate(abf)
for key in _keys:
del key #be clean about screwing with globals()
return
def test():
"""voltage clamp MT."""
abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701010.abf')
swhlab.memtest.memtest(abf) #performs memtest on all sweeps
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
pylab.show()
def test2():
|
if __name__=="__main__":
#test()
#test2()
test3()
print("DONE") | """current clamp MT."""
abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701006.abf')
swhlab.memtest.memtest(abf) #performs memtest on all sweeps
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
pylab.show() |
standalone.py | """Standalone Authenticator."""
import collections
import errno
import logging
import socket
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from OpenSSL import crypto
from acme import challenges
from acme import standalone as acme_standalone
from certbot import achallenges
from certbot import errors
from certbot import interfaces
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
ServedType = DefaultDict[
acme_standalone.BaseDualNetworkedServers,
Set[achallenges.KeyAuthorizationAnnotatedChallenge]
]
class ServerManager:
"""Standalone servers manager.
Manager for `ACMEServer` and `ACMETLSServer` instances.
`certs` and `http_01_resources` correspond to
`acme.crypto_util.SSLSocket.certs` and
`acme.crypto_util.SSLSocket.http_01_resources` respectively. All
created servers share the same certificates and resources, so if
you're running both TLS and non-TLS instances, HTTP01 handlers
will serve the same URLs!
"""
def __init__(self, certs: Mapping[bytes, Tuple[crypto.PKey, crypto.X509]],
http_01_resources: Set[acme_standalone.HTTP01RequestHandler.HTTP01Resource]
) -> None:
self._instances: Dict[int, acme_standalone.HTTP01DualNetworkedServers] = {}
self.certs = certs
self.http_01_resources = http_01_resources
def run(self, port: int, challenge_type: Type[challenges.Challenge],
listenaddr: str = "") -> acme_standalone.HTTP01DualNetworkedServers:
"""Run ACME server on specified ``port``.
This method is idempotent, i.e. all calls with the same pair of
``(port, challenge_type)`` will reuse the same server.
:param int port: Port to run the server on.
:param challenge_type: Subclass of `acme.challenges.Challenge`,
currently only `acme.challenge.HTTP01`.
:param str listenaddr: (optional) The address to listen on. Defaults to all addrs.
:returns: DualNetworkedServers instance.
:rtype: ACMEServerMixin
"""
assert challenge_type == challenges.HTTP01
if port in self._instances:
return self._instances[port]
address = (listenaddr, port)
try:
servers = acme_standalone.HTTP01DualNetworkedServers(
address, self.http_01_resources)
except socket.error as error:
raise errors.StandaloneBindError(error, port)
servers.serve_forever()
# if port == 0, then random free port on OS is taken
# both servers, if they exist, have the same port
real_port = servers.getsocknames()[0][1]
self._instances[real_port] = servers
return servers
def stop(self, port: int) -> None:
"""Stop ACME server running on the specified ``port``.
:param int port:
"""
instance = self._instances[port]
for sockname in instance.getsocknames():
logger.debug("Stopping server at %s:%d...",
*sockname[:2])
instance.shutdown_and_server_close()
del self._instances[port]
def running(self) -> Dict[int, acme_standalone.HTTP01DualNetworkedServers]:
"""Return all running instances.
Once the server is stopped using `stop`, it will not be
returned.
:returns: Mapping from ``port`` to ``servers``.
:rtype: tuple
"""
return self._instances.copy()
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Standalone Authenticator.
This authenticator creates its own ephemeral TCP listener on the
necessary port in order to respond to incoming http-01
challenges from the certificate authority. Therefore, it does not
rely on any existing server program.
"""
description = "Spin up a temporary webserver"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.served: ServedType = collections.defaultdict(set)
# Stuff below is shared across threads (i.e. servers read
# values, main thread writes). Due to the nature of CPython's
# GIL, the operations are safe, c.f.
# https://docs.python.org/2/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.certs: Mapping[bytes, Tuple[crypto.PKey, crypto.X509]] = {}
self.http_01_resources: Set[acme_standalone.HTTP01RequestHandler.HTTP01Resource] = set()
self.servers = ServerManager(self.certs, self.http_01_resources)
@classmethod
def add_parser_arguments(cls, add: Callable[..., None]) -> None:
pass # No additional argument for the standalone plugin parser
def more_info(self) -> str: # pylint: disable=missing-function-docstring
return("This authenticator creates its own ephemeral TCP listener "
"on the necessary port in order to respond to incoming "
"http-01 challenges from the certificate authority. Therefore, "
"it does not rely on any existing server program.")
def prepare(self) -> None: # pylint: disable=missing-function-docstring
pass
def get_chall_pref(self, domain: str) -> Iterable[Type[challenges.Challenge]]:
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def perform(self, achalls: Iterable[achallenges.AnnotatedChallenge]
) -> List[challenges.ChallengeResponse]: # pylint: disable=missing-function-docstring
return [self._try_perform_single(achall) for achall in achalls]
def _try_perform_single(self,
achall: achallenges.AnnotatedChallenge) -> challenges.ChallengeResponse:
while True: | try:
return self._perform_single(achall)
except errors.StandaloneBindError as error:
_handle_perform_error(error)
def _perform_single(self,
achall: achallenges.AnnotatedChallenge) -> challenges.ChallengeResponse:
servers, response = self._perform_http_01(achall)
self.served[servers].add(achall)
return response
def _perform_http_01(self, achall: achallenges.AnnotatedChallenge
) -> Tuple[acme_standalone.HTTP01DualNetworkedServers,
challenges.ChallengeResponse]:
port = self.config.http01_port
addr = self.config.http01_address
servers = self.servers.run(port, challenges.HTTP01, listenaddr=addr)
response, validation = achall.response_and_validation()
resource = acme_standalone.HTTP01RequestHandler.HTTP01Resource(
chall=achall.chall, response=response, validation=validation)
self.http_01_resources.add(resource)
return servers, response
def cleanup(self, achalls: Iterable[achallenges.AnnotatedChallenge]) -> None: # pylint: disable=missing-function-docstring
# reduce self.served and close servers if no challenges are served
for unused_servers, server_achalls in self.served.items():
for achall in achalls:
if achall in server_achalls:
server_achalls.remove(achall)
for port, servers in self.servers.running().items():
if not self.served[servers]:
self.servers.stop(port)
def auth_hint(self, failed_achalls: List[achallenges.AnnotatedChallenge]) -> str:
port, addr = self.config.http01_port, self.config.http01_address
neat_addr = f"{addr}:{port}" if addr else f"port {port}"
return ("The Certificate Authority failed to download the challenge files from "
f"the temporary standalone webserver started by Certbot on {neat_addr}. "
"Ensure that the listed domains point to this machine and that it can "
"accept inbound connections from the internet.")
def _handle_perform_error(error: errors.StandaloneBindError) -> None:
if error.socket_error.errno == errno.EACCES:
raise errors.PluginError(
"Could not bind TCP port {0} because you don't have "
"the appropriate permissions (for example, you "
"aren't running this program as "
"root).".format(error.port))
if error.socket_error.errno == errno.EADDRINUSE:
msg = (
"Could not bind TCP port {0} because it is already in "
"use by another process on this system (such as a web "
"server). Please stop the program in question and "
"then try again.".format(error.port))
should_retry = display_util.yesno(msg, "Retry", "Cancel", default=False)
if not should_retry:
raise errors.PluginError(msg)
else:
raise error | |
table.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package dynamodb
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Provides a DynamoDB table resource
//
// > **Note:** It is recommended to use [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) for `readCapacity` and/or `writeCapacity` if there's `autoscaling policy` attached to the table.
//
// ## Example Usage
//
// The following dynamodb table description models the table and GSI shown
// in the [AWS SDK example documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.html)
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v4/go/aws/dynamodb"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := dynamodb.NewTable(ctx, "basic_dynamodb_table", &dynamodb.TableArgs{
// Attributes: dynamodb.TableAttributeArray{
// &dynamodb.TableAttributeArgs{
// Name: pulumi.String("UserId"),
// Type: pulumi.String("S"),
// },
// &dynamodb.TableAttributeArgs{
// Name: pulumi.String("GameTitle"),
// Type: pulumi.String("S"),
// },
// &dynamodb.TableAttributeArgs{
// Name: pulumi.String("TopScore"),
// Type: pulumi.String("N"),
// },
// },
// BillingMode: pulumi.String("PROVISIONED"),
// GlobalSecondaryIndexes: dynamodb.TableGlobalSecondaryIndexArray{
// &dynamodb.TableGlobalSecondaryIndexArgs{
// HashKey: pulumi.String("GameTitle"),
// Name: pulumi.String("GameTitleIndex"),
// NonKeyAttributes: pulumi.StringArray{
// pulumi.String("UserId"),
// },
// ProjectionType: pulumi.String("INCLUDE"),
// RangeKey: pulumi.String("TopScore"),
// ReadCapacity: pulumi.Int(10),
// WriteCapacity: pulumi.Int(10),
// },
// },
// HashKey: pulumi.String("UserId"),
// RangeKey: pulumi.String("GameTitle"),
// ReadCapacity: pulumi.Int(20),
// Tags: pulumi.StringMap{
// "Environment": pulumi.String("production"),
// "Name": pulumi.String("dynamodb-table-1"),
// },
// Ttl: &dynamodb.TableTtlArgs{
// AttributeName: pulumi.String("TimeToExist"),
// Enabled: pulumi.Bool(false),
// },
// WriteCapacity: pulumi.Int(20),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
// ### Global Tables
//
// This resource implements support for [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) via `replica` configuration blocks. For working with [DynamoDB Global Tables V1 (version 2017.11.29)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html), see the `dynamodb.GlobalTable` resource.
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v4/go/aws/dynamodb"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := dynamodb.NewTable(ctx, "example", &dynamodb.TableArgs{
// Attributes: dynamodb.TableAttributeArray{
// &dynamodb.TableAttributeArgs{
// Name: pulumi.String("TestTableHashKey"),
// Type: pulumi.String("S"),
// },
// },
// BillingMode: pulumi.String("PAY_PER_REQUEST"),
// HashKey: pulumi.String("TestTableHashKey"),
// Replicas: dynamodb.TableReplicaArray{
// &dynamodb.TableReplicaArgs{
// RegionName: pulumi.String("us-east-2"),
// },
// &dynamodb.TableReplicaArgs{
// RegionName: pulumi.String("us-west-2"),
// },
// },
// StreamEnabled: pulumi.Bool(true),
// StreamViewType: pulumi.String("NEW_AND_OLD_IMAGES"),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// DynamoDB tables can be imported using the `name`, e.g.
//
// ```sh
// $ pulumi import aws:dynamodb/table:Table basic-dynamodb-table GameScores
// ```
type Table struct {
pulumi.CustomResourceState
// The arn of the table
Arn pulumi.StringOutput `pulumi:"arn"`
// List of nested attribute definitions. Only required for `hashKey` and `rangeKey` attributes. Each attribute has two properties:
Attributes TableAttributeArrayOutput `pulumi:"attributes"`
// Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
BillingMode pulumi.StringPtrOutput `pulumi:"billingMode"`
// Describe a GSI for the table;
// subject to the normal limits on the number of GSIs, projected
// attributes, etc.
GlobalSecondaryIndexes TableGlobalSecondaryIndexArrayOutput `pulumi:"globalSecondaryIndexes"`
// The name of the hash key in the index; must be
// defined as an attribute in the resource.
HashKey pulumi.StringOutput `pulumi:"hashKey"`
// Describe an LSI on the table;
// these can only be allocated *at creation* so you cannot change this
// definition after you have created the resource.
LocalSecondaryIndexes TableLocalSecondaryIndexArrayOutput `pulumi:"localSecondaryIndexes"`
// The name of the index
Name pulumi.StringOutput `pulumi:"name"`
// Point-in-time recovery options.
PointInTimeRecovery TablePointInTimeRecoveryOutput `pulumi:"pointInTimeRecovery"`
// The name of the range key; must be defined
RangeKey pulumi.StringPtrOutput `pulumi:"rangeKey"`
// The number of read units for this index. Must be set if billingMode is set to PROVISIONED.
ReadCapacity pulumi.IntPtrOutput `pulumi:"readCapacity"`
// Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
Replicas TableReplicaArrayOutput `pulumi:"replicas"`
// Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
ServerSideEncryption TableServerSideEncryptionOutput `pulumi:"serverSideEncryption"`
// The ARN of the Table Stream. Only available when `streamEnabled = true`
StreamArn pulumi.StringOutput `pulumi:"streamArn"`
// Indicates whether Streams are to be enabled (true) or disabled (false).
StreamEnabled pulumi.BoolPtrOutput `pulumi:"streamEnabled"`
// A timestamp, in ISO 8601 format, for this stream. Note that this timestamp is not
// a unique identifier for the stream on its own. However, the combination of AWS customer ID,
// table name and this field is guaranteed to be unique.
// It can be used for creating CloudWatch Alarms. Only available when `streamEnabled = true`
StreamLabel pulumi.StringOutput `pulumi:"streamLabel"`
// When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
StreamViewType pulumi.StringOutput `pulumi:"streamViewType"`
// A map of tags to populate on the created table. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll pulumi.StringMapOutput `pulumi:"tagsAll"`
// Defines ttl, has two properties, and can only be specified once:
Ttl TableTtlPtrOutput `pulumi:"ttl"`
// The number of write units for this index. Must be set if billingMode is set to PROVISIONED.
WriteCapacity pulumi.IntPtrOutput `pulumi:"writeCapacity"`
}
// NewTable registers a new resource with the given unique name, arguments, and options.
func NewTable(ctx *pulumi.Context,
name string, args *TableArgs, opts ...pulumi.ResourceOption) (*Table, error) |
// GetTable gets an existing Table resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetTable(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *TableState, opts ...pulumi.ResourceOption) (*Table, error) {
var resource Table
err := ctx.ReadResource("aws:dynamodb/table:Table", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Table resources.
type tableState struct {
// The arn of the table
Arn *string `pulumi:"arn"`
// List of nested attribute definitions. Only required for `hashKey` and `rangeKey` attributes. Each attribute has two properties:
Attributes []TableAttribute `pulumi:"attributes"`
// Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
BillingMode *string `pulumi:"billingMode"`
// Describe a GSI for the table;
// subject to the normal limits on the number of GSIs, projected
// attributes, etc.
GlobalSecondaryIndexes []TableGlobalSecondaryIndex `pulumi:"globalSecondaryIndexes"`
// The name of the hash key in the index; must be
// defined as an attribute in the resource.
HashKey *string `pulumi:"hashKey"`
// Describe an LSI on the table;
// these can only be allocated *at creation* so you cannot change this
// definition after you have created the resource.
LocalSecondaryIndexes []TableLocalSecondaryIndex `pulumi:"localSecondaryIndexes"`
// The name of the index
Name *string `pulumi:"name"`
// Point-in-time recovery options.
PointInTimeRecovery *TablePointInTimeRecovery `pulumi:"pointInTimeRecovery"`
// The name of the range key; must be defined
RangeKey *string `pulumi:"rangeKey"`
// The number of read units for this index. Must be set if billingMode is set to PROVISIONED.
ReadCapacity *int `pulumi:"readCapacity"`
// Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
Replicas []TableReplica `pulumi:"replicas"`
// Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
ServerSideEncryption *TableServerSideEncryption `pulumi:"serverSideEncryption"`
// The ARN of the Table Stream. Only available when `streamEnabled = true`
StreamArn *string `pulumi:"streamArn"`
// Indicates whether Streams are to be enabled (true) or disabled (false).
StreamEnabled *bool `pulumi:"streamEnabled"`
// A timestamp, in ISO 8601 format, for this stream. Note that this timestamp is not
// a unique identifier for the stream on its own. However, the combination of AWS customer ID,
// table name and this field is guaranteed to be unique.
// It can be used for creating CloudWatch Alarms. Only available when `streamEnabled = true`
StreamLabel *string `pulumi:"streamLabel"`
// When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
StreamViewType *string `pulumi:"streamViewType"`
// A map of tags to populate on the created table. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags map[string]string `pulumi:"tags"`
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll map[string]string `pulumi:"tagsAll"`
// Defines ttl, has two properties, and can only be specified once:
Ttl *TableTtl `pulumi:"ttl"`
// The number of write units for this index. Must be set if billingMode is set to PROVISIONED.
WriteCapacity *int `pulumi:"writeCapacity"`
}
type TableState struct {
// The arn of the table
Arn pulumi.StringPtrInput
// List of nested attribute definitions. Only required for `hashKey` and `rangeKey` attributes. Each attribute has two properties:
Attributes TableAttributeArrayInput
// Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
BillingMode pulumi.StringPtrInput
// Describe a GSI for the table;
// subject to the normal limits on the number of GSIs, projected
// attributes, etc.
GlobalSecondaryIndexes TableGlobalSecondaryIndexArrayInput
// The name of the hash key in the index; must be
// defined as an attribute in the resource.
HashKey pulumi.StringPtrInput
// Describe an LSI on the table;
// these can only be allocated *at creation* so you cannot change this
// definition after you have created the resource.
LocalSecondaryIndexes TableLocalSecondaryIndexArrayInput
// The name of the index
Name pulumi.StringPtrInput
// Point-in-time recovery options.
PointInTimeRecovery TablePointInTimeRecoveryPtrInput
// The name of the range key; must be defined
RangeKey pulumi.StringPtrInput
// The number of read units for this index. Must be set if billingMode is set to PROVISIONED.
ReadCapacity pulumi.IntPtrInput
// Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
Replicas TableReplicaArrayInput
// Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
ServerSideEncryption TableServerSideEncryptionPtrInput
// The ARN of the Table Stream. Only available when `streamEnabled = true`
StreamArn pulumi.StringPtrInput
// Indicates whether Streams are to be enabled (true) or disabled (false).
StreamEnabled pulumi.BoolPtrInput
// A timestamp, in ISO 8601 format, for this stream. Note that this timestamp is not
// a unique identifier for the stream on its own. However, the combination of AWS customer ID,
// table name and this field is guaranteed to be unique.
// It can be used for creating CloudWatch Alarms. Only available when `streamEnabled = true`
StreamLabel pulumi.StringPtrInput
// When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
StreamViewType pulumi.StringPtrInput
// A map of tags to populate on the created table. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags pulumi.StringMapInput
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll pulumi.StringMapInput
// Defines ttl, has two properties, and can only be specified once:
Ttl TableTtlPtrInput
// The number of write units for this index. Must be set if billingMode is set to PROVISIONED.
WriteCapacity pulumi.IntPtrInput
}
func (TableState) ElementType() reflect.Type {
return reflect.TypeOf((*tableState)(nil)).Elem()
}
type tableArgs struct {
// List of nested attribute definitions. Only required for `hashKey` and `rangeKey` attributes. Each attribute has two properties:
Attributes []TableAttribute `pulumi:"attributes"`
// Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
BillingMode *string `pulumi:"billingMode"`
// Describe a GSI for the table;
// subject to the normal limits on the number of GSIs, projected
// attributes, etc.
GlobalSecondaryIndexes []TableGlobalSecondaryIndex `pulumi:"globalSecondaryIndexes"`
// The name of the hash key in the index; must be
// defined as an attribute in the resource.
HashKey string `pulumi:"hashKey"`
// Describe an LSI on the table;
// these can only be allocated *at creation* so you cannot change this
// definition after you have created the resource.
LocalSecondaryIndexes []TableLocalSecondaryIndex `pulumi:"localSecondaryIndexes"`
// The name of the index
Name *string `pulumi:"name"`
// Point-in-time recovery options.
PointInTimeRecovery *TablePointInTimeRecovery `pulumi:"pointInTimeRecovery"`
// The name of the range key; must be defined
RangeKey *string `pulumi:"rangeKey"`
// The number of read units for this index. Must be set if billingMode is set to PROVISIONED.
ReadCapacity *int `pulumi:"readCapacity"`
// Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
Replicas []TableReplica `pulumi:"replicas"`
// Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
ServerSideEncryption *TableServerSideEncryption `pulumi:"serverSideEncryption"`
// Indicates whether Streams are to be enabled (true) or disabled (false).
StreamEnabled *bool `pulumi:"streamEnabled"`
// When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
StreamViewType *string `pulumi:"streamViewType"`
// A map of tags to populate on the created table. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags map[string]string `pulumi:"tags"`
// Defines ttl, has two properties, and can only be specified once:
Ttl *TableTtl `pulumi:"ttl"`
// The number of write units for this index. Must be set if billingMode is set to PROVISIONED.
WriteCapacity *int `pulumi:"writeCapacity"`
}
// The set of arguments for constructing a Table resource.
type TableArgs struct {
// List of nested attribute definitions. Only required for `hashKey` and `rangeKey` attributes. Each attribute has two properties:
Attributes TableAttributeArrayInput
// Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
BillingMode pulumi.StringPtrInput
// Describe a GSI for the table;
// subject to the normal limits on the number of GSIs, projected
// attributes, etc.
GlobalSecondaryIndexes TableGlobalSecondaryIndexArrayInput
// The name of the hash key in the index; must be
// defined as an attribute in the resource.
HashKey pulumi.StringInput
// Describe an LSI on the table;
// these can only be allocated *at creation* so you cannot change this
// definition after you have created the resource.
LocalSecondaryIndexes TableLocalSecondaryIndexArrayInput
// The name of the index
Name pulumi.StringPtrInput
// Point-in-time recovery options.
PointInTimeRecovery TablePointInTimeRecoveryPtrInput
// The name of the range key; must be defined
RangeKey pulumi.StringPtrInput
// The number of read units for this index. Must be set if billingMode is set to PROVISIONED.
ReadCapacity pulumi.IntPtrInput
// Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
Replicas TableReplicaArrayInput
// Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
ServerSideEncryption TableServerSideEncryptionPtrInput
// Indicates whether Streams are to be enabled (true) or disabled (false).
StreamEnabled pulumi.BoolPtrInput
// When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
StreamViewType pulumi.StringPtrInput
// A map of tags to populate on the created table. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags pulumi.StringMapInput
// Defines ttl, has two properties, and can only be specified once:
Ttl TableTtlPtrInput
// The number of write units for this index. Must be set if billingMode is set to PROVISIONED.
WriteCapacity pulumi.IntPtrInput
}
func (TableArgs) ElementType() reflect.Type {
return reflect.TypeOf((*tableArgs)(nil)).Elem()
}
type TableInput interface {
pulumi.Input
ToTableOutput() TableOutput
ToTableOutputWithContext(ctx context.Context) TableOutput
}
func (*Table) ElementType() reflect.Type {
return reflect.TypeOf((*Table)(nil))
}
func (i *Table) ToTableOutput() TableOutput {
return i.ToTableOutputWithContext(context.Background())
}
func (i *Table) ToTableOutputWithContext(ctx context.Context) TableOutput {
return pulumi.ToOutputWithContext(ctx, i).(TableOutput)
}
func (i *Table) ToTablePtrOutput() TablePtrOutput {
return i.ToTablePtrOutputWithContext(context.Background())
}
func (i *Table) ToTablePtrOutputWithContext(ctx context.Context) TablePtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(TablePtrOutput)
}
type TablePtrInput interface {
pulumi.Input
ToTablePtrOutput() TablePtrOutput
ToTablePtrOutputWithContext(ctx context.Context) TablePtrOutput
}
type tablePtrType TableArgs
func (*tablePtrType) ElementType() reflect.Type {
return reflect.TypeOf((**Table)(nil))
}
func (i *tablePtrType) ToTablePtrOutput() TablePtrOutput {
return i.ToTablePtrOutputWithContext(context.Background())
}
func (i *tablePtrType) ToTablePtrOutputWithContext(ctx context.Context) TablePtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(TablePtrOutput)
}
// TableArrayInput is an input type that accepts TableArray and TableArrayOutput values.
// You can construct a concrete instance of `TableArrayInput` via:
//
// TableArray{ TableArgs{...} }
type TableArrayInput interface {
pulumi.Input
ToTableArrayOutput() TableArrayOutput
ToTableArrayOutputWithContext(context.Context) TableArrayOutput
}
type TableArray []TableInput
func (TableArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]*Table)(nil)).Elem()
}
func (i TableArray) ToTableArrayOutput() TableArrayOutput {
return i.ToTableArrayOutputWithContext(context.Background())
}
func (i TableArray) ToTableArrayOutputWithContext(ctx context.Context) TableArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(TableArrayOutput)
}
// TableMapInput is an input type that accepts TableMap and TableMapOutput values.
// You can construct a concrete instance of `TableMapInput` via:
//
// TableMap{ "key": TableArgs{...} }
type TableMapInput interface {
pulumi.Input
ToTableMapOutput() TableMapOutput
ToTableMapOutputWithContext(context.Context) TableMapOutput
}
type TableMap map[string]TableInput
func (TableMap) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]*Table)(nil)).Elem()
}
func (i TableMap) ToTableMapOutput() TableMapOutput {
return i.ToTableMapOutputWithContext(context.Background())
}
func (i TableMap) ToTableMapOutputWithContext(ctx context.Context) TableMapOutput {
return pulumi.ToOutputWithContext(ctx, i).(TableMapOutput)
}
type TableOutput struct{ *pulumi.OutputState }
func (TableOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Table)(nil))
}
func (o TableOutput) ToTableOutput() TableOutput {
return o
}
func (o TableOutput) ToTableOutputWithContext(ctx context.Context) TableOutput {
return o
}
func (o TableOutput) ToTablePtrOutput() TablePtrOutput {
return o.ToTablePtrOutputWithContext(context.Background())
}
func (o TableOutput) ToTablePtrOutputWithContext(ctx context.Context) TablePtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v Table) *Table {
return &v
}).(TablePtrOutput)
}
type TablePtrOutput struct{ *pulumi.OutputState }
func (TablePtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Table)(nil))
}
func (o TablePtrOutput) ToTablePtrOutput() TablePtrOutput {
return o
}
func (o TablePtrOutput) ToTablePtrOutputWithContext(ctx context.Context) TablePtrOutput {
return o
}
func (o TablePtrOutput) Elem() TableOutput {
return o.ApplyT(func(v *Table) Table {
if v != nil {
return *v
}
var ret Table
return ret
}).(TableOutput)
}
type TableArrayOutput struct{ *pulumi.OutputState }
func (TableArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]Table)(nil))
}
func (o TableArrayOutput) ToTableArrayOutput() TableArrayOutput {
return o
}
func (o TableArrayOutput) ToTableArrayOutputWithContext(ctx context.Context) TableArrayOutput {
return o
}
func (o TableArrayOutput) Index(i pulumi.IntInput) TableOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) Table {
return vs[0].([]Table)[vs[1].(int)]
}).(TableOutput)
}
type TableMapOutput struct{ *pulumi.OutputState }
func (TableMapOutput) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]Table)(nil))
}
func (o TableMapOutput) ToTableMapOutput() TableMapOutput {
return o
}
func (o TableMapOutput) ToTableMapOutputWithContext(ctx context.Context) TableMapOutput {
return o
}
func (o TableMapOutput) MapIndex(k pulumi.StringInput) TableOutput {
return pulumi.All(o, k).ApplyT(func(vs []interface{}) Table {
return vs[0].(map[string]Table)[vs[1].(string)]
}).(TableOutput)
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*TableInput)(nil)).Elem(), &Table{})
pulumi.RegisterInputType(reflect.TypeOf((*TablePtrInput)(nil)).Elem(), &Table{})
pulumi.RegisterInputType(reflect.TypeOf((*TableArrayInput)(nil)).Elem(), TableArray{})
pulumi.RegisterInputType(reflect.TypeOf((*TableMapInput)(nil)).Elem(), TableMap{})
pulumi.RegisterOutputType(TableOutput{})
pulumi.RegisterOutputType(TablePtrOutput{})
pulumi.RegisterOutputType(TableArrayOutput{})
pulumi.RegisterOutputType(TableMapOutput{})
}
| {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.Attributes == nil {
return nil, errors.New("invalid value for required argument 'Attributes'")
}
if args.HashKey == nil {
return nil, errors.New("invalid value for required argument 'HashKey'")
}
var resource Table
err := ctx.RegisterResource("aws:dynamodb/table:Table", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} |
test_nomad_hook.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import patch |
from airflow import configuration
from airflow.contrib.hooks.nomad_hook import NomadHook
class TestNomadHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@patch("airflow.contrib.hooks.nomad_hook.NomadHook.get_nomad_client")
def test_nomad_client_connection(self, get_nomad_client):
NomadHook(nomad_conn_id='nomad_default')
self.assertTrue(get_nomad_client.called_once())
if __name__ == '__main__':
unittest.main() | |
stan-client.providers.ts | import { StanClientModuleOptions } from './interfaces'; | ): any[] {
return [{ provide: STAN_CLIENT_MODULE_OPTIONS, useValue: options || {} }];
} | import { STAN_CLIENT_MODULE_OPTIONS } from './stan-client.constants';
export function createStanClientProvider(
options: StanClientModuleOptions |
SkimAnalyzerCount.py | from __future__ import print_function
import itertools
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.HeppyCore.framework.event import Event
from PhysicsTools.HeppyCore.statistics.counter import Counter, Counters
from DataFormats.FWLite import Events, Handle,Lumis
class SkimAnalyzerCount( Analyzer ):
#---------------------------------------------
# TO FINDS THE INITIAL EVENTS BEFORE THE SKIM
#---------------------------------------------
def __init__(self, cfg_ana, cfg_comp, looperName):
super(SkimAnalyzerCount, self).__init__(cfg_ana, cfg_comp, looperName)
self.useLumiBlocks = self.cfg_ana.useLumiBlocks if (hasattr(self.cfg_ana,'useLumiBlocks')) else False
self.verbose = getattr(self.cfg_ana, 'verbose', False)
def declareHandles(self):
super(SkimAnalyzerCount, self).declareHandles()
self.counterHandle = Handle("edm::MergeableCounter")
self.mchandles['GenInfo'] = AutoHandle( ('generator','',''), 'GenEventInfoProduct' )
def beginLoop(self, setup):
super(SkimAnalyzerCount,self).beginLoop(setup)
self.counters.addCounter('SkimReport')
self.count = self.counters.counter('SkimReport')
self.count.register('All Events')
if self.cfg_comp.isMC:
self.count.register('Sum Weights')
if not self.useLumiBlocks:
#print 'Will actually count events instead of accessing lumi blocks'
return True
print('Counting the total events before the skim by accessing luminosity blocks')
lumis = Lumis(self.cfg_comp.files)
totalEvents=0
for lumi in lumis:
if lumi.getByLabel('prePathCounter',self.counterHandle):
totalEvents+=self.counterHandle.product().value
else:
self.useLumiBlocks = False
break
if self.useLumiBlocks: | print('Done -> proceeding with the analysis')
else:
print('Failed -> will have to actually count events (this can happen if the input dataset is not a CMG one)')
def process(self, event):
if self.verbose:
print("\nProcessing run:lumi:event %d:%d:%d" % (
event.input.eventAuxiliary().id().run(),
event.input.eventAuxiliary().id().luminosityBlock(),
event.input.eventAuxiliary().id().event()))
if not self.useLumiBlocks:
self.readCollections( event.input )
self.count.inc('All Events')
if self.cfg_comp.isMC:
self.count.inc('Sum Weights', self.mchandles['GenInfo'].product().weight())
return True | self.count.inc('All Events',totalEvents)
if self.cfg_comp.isMC:
self.count.inc('Sum Weights',totalEvents) |
state.go | package state
import (
"bytes"
"encoding/gob"
"fmt"
"io"
"os"
"sync"
)
var lock sync.Mutex
var once sync.Once
//singleton reference
var fileTypeInstance *fileType
//fileType is the main struct for file database
type fileType struct {
path string
isRegistered bool
}
//getFileTypeInstance returns the singleton instance of the filedb object
func getFileTypeInstance() (*fileType, error) {
if fileTypeInstance == nil {
return nil, fmt.Errorf("state DB nil")
}
return fileTypeInstance, nil
}
//InitiateFileTypeInstance initiates the instance and sets the path for the DB
func InitiateFileTypeInstance(filePath string) {
if fileTypeInstance == nil {
once.Do(func() {
fileTypeInstance = &fileType{path: filePath}
})
}
}
// Save saves a representation of v to the file at path.
func Save(v interface{}) error {
lock.Lock()
defer lock.Unlock()
fileType, err := getFileTypeInstance()
if err != nil {
return err
}
if !fileType.isRegistered {
gob.Register(v)
fileType.isRegistered = true
}
f, err := os.Create(fileType.path)
if err != nil {
return err
}
defer f.Close()
r, err := marshal(v)
if err != nil {
return err
}
_, err = io.Copy(f, r)
return err
}
// Load loads the file at path into v.
func Load(v interface{}) error {
fileType, err := getFileTypeInstance()
if err != nil {
return err
}
if fileExists(fileType.path) {
lock.Lock()
defer lock.Unlock()
f, err := os.Open(fileType.path)
if err != nil |
defer f.Close()
return unmarshal(f, v)
}
// fmt.Printf("Db file %v not found\n", fileType.path)
return nil
}
// marshal is a function that marshals the object into an
// io.Reader.
var marshal = func(v interface{}) (io.Reader, error) {
var buf bytes.Buffer
e := gob.NewEncoder(&buf)
err := e.Encode(v)
if err != nil {
return nil, err
}
return bytes.NewReader(buf.Bytes()), nil
}
// unmarshal is a function that unmarshals the data from the
// reader into the specified value.
var unmarshal = func(r io.Reader, v interface{}) error {
d := gob.NewDecoder(r)
err := d.Decode(v)
if err != nil {
return err
}
return nil
}
// fileExists checks if a file exists and is not a directory before we
// try using it to prevent further errors.
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
| {
return err
} |
error.py | """
Ethereum Virtual Machine (EVM) Errors
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Errors which cause the EVM to halt exceptionally.
""" | class StackUnderflowError(Exception):
"""
Occurs when a pop is executed on an empty stack.
"""
pass
class StackOverflowError(Exception):
"""
Occurs when a push is executed on a stack at max capacity.
"""
pass
class OutOfGasError(Exception):
"""
Occurs when an operation costs more than the amount of gas left in the
frame.
"""
pass
class InvalidOpcode(Exception):
"""
Raised when an invalid opcode is encountered.
"""
pass
class InvalidJumpDestError(Exception):
"""
Occurs when the destination of a jump operation doesn't meet any of the
following criteria:
* The jump destination is less than the length of the code.
* The jump destination should have the `JUMPDEST` opcode (0x5B).
* The jump destination shouldn't be part of the data corresponding to
`PUSH-N` opcodes.
"""
class StackDepthLimitError(Exception):
"""
Raised when the message depth is greater than `1024`
"""
pass | |
seaborn.py | from typing import Tuple
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def heatmap(
elements: pd.DataFrame,
prop: str,
style: str = "whitegrid",
figsize: Tuple[int] = (16, 10),
cmap: str = "RdBu_r",
lw: int = 1,
output: str = None,
**kwargs
):
""" | prop : Name of the attribute of Element object that is available from the
elements table
style : Seaborn style option, default='whitegrid
figsize : Size of the plot, default=(16, 10)
cmap : Colormap to use, default='RdBu_r'
lw : Seaborn heatmap `linewidths` argumentm default=1,
see http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html
output : File name to save the plot, by default nothing is saved
"""
# add lanthanides and actinides
keys = ["period", "group_id", prop]
els = elements[keys].dropna()
elements_rect = els.pivot(*keys)
sns.set(font_scale=1.5, style=style, rc={"figure.figsize": figsize})
mask = np.asarray(elements_rect.isnull())
ax = sns.heatmap(elements_rect, cmap=cmap, mask=mask, linewidths=lw, **kwargs)
n = len(ax.xaxis.get_ticklabels())
ax.set_yticklabels(elements_rect.index[::-1], rotation=0)
ax.set_xticklabels(list(range(1, n + 1)))
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
ax.set_xlabel("Group")
ax.set_ylabel("Period")
if output is not None:
plt.savefig(output)
return ax | Plot a heatmap of the given property
Args:
elements: DataFrame with data about elements |
rule_30.py | """Generate an animation of the cellular automaton Rule 30."""
import json
import os
import pathlib
import shutil
import subprocess
import tempfile
import colour
import cv2
import imageio
import numpy as np
import scipy.signal as sg
import tqdm
# Global parameters
CONFIG_PATH = 'config/full.json'
FFMPEG_PATH = '/usr/bin/ffmpeg'
# Load configuration
with open(CONFIG_PATH) as f:
config = json.load(f)
VIDEO_WIDTH = config['video_width']
VIDEO_HEIGHT = config['video_height']
SECS = config['secs']
FPS = config['fps']
PIXEL_SIZE = config['pixel_size']
OUTPUT_PATH = config['output_path']
# Trade-off speed and temp storage requirements
COMP_LEVEL = config['comp_level']
COLOURS = map(colour.Color, config['colours'])
# Constants
STATE_WIDTH = VIDEO_WIDTH // PIXEL_SIZE
STATE_HEIGHT = VIDEO_HEIGHT // PIXEL_SIZE
NUM_FRAMES = SECS * FPS
class Rule30:
"""A class for generating Rule 30."""
neighbours = np.array([[1, 2, 4]], np.uint8)
kernel = np.array([0, 1, 2, 3, 4, 0, 0, 0])
colours = np.array([
list(map(lambda x: round(255 * x), c.rgb)) for c in COLOURS
], np.uint8)
def __init__(self, width, height):
"""Initialise the Rule 30 generator and set initial state.
Args:
width (int): State width
height(int): State height
"""
self.width = width
self.height = height
self.state = np.zeros((self.height, self.width), np.uint8)
self.peak_height = 1
self.state[-1, self.width // 2] = 2
self.rgb = None
self._update_rgb()
def step(self):
"""Update the state and RGB representation."""
self._update_state()
self._update_rgb()
def _update_state(self):
"""Update the state by applying Rule 30."""
conv_row_alive = (self.state[-1, None, :] > 0).astype(np.uint8)
rule_index = sg.convolve2d(conv_row_alive, self.neighbours,
mode='same', boundary='wrap')
new_row = self.kernel[rule_index]
self.state = np.concatenate((self.state[1:], new_row))
if self.peak_height < self.height:
self.peak_height += 1
self.state[-self.peak_height, self.width // 2] = 2
def _update_rgb(self):
"""Convert the state to an RGB array."""
self.rgb = self.colours[self.state]
class VideoConverter:
"""A class for converting frames of NumPy arrays to a video."""
def __init__(self, fps=30):
"""Initialise the converter and create a temporary directory.
Args:
fps (int): Frames per second for the converted video
"""
self.fps = fps
self.tmp_dir = tempfile.TemporaryDirectory()
self.curr_frame = 0
def add_frame(self, frame):
"""Adds a new frame to the video.
Args:
frame (uint8 NumPy array of shape: (video_height, video_width, 3))
Data of the new frame as RGB. All frames must have the same
dimensions.
"""
frame_path = os.path.join(self.tmp_dir.name, f'{self.curr_frame}.png')
imageio.imwrite(frame_path, frame, compress_level=COMP_LEVEL)
self.curr_frame += 1
def write(self, output_path):
|
def main():
converter = VideoConverter(fps=FPS)
animation = Rule30(STATE_WIDTH, STATE_HEIGHT + 1)
for __ in tqdm.trange(NUM_FRAMES // PIXEL_SIZE,
desc='Generating frames'):
small_frame = animation.rgb
enlarged_frame = cv2.resize(small_frame,
(VIDEO_WIDTH, VIDEO_HEIGHT + PIXEL_SIZE),
interpolation=cv2.INTER_NEAREST)
for i in range(PIXEL_SIZE):
converter.add_frame(enlarged_frame[i:(-PIXEL_SIZE + i)])
animation.step()
converter.write(OUTPUT_PATH)
if __name__ == '__main__':
main()
| """Converts the accumulated frames to video and writes the result.
Args:
output_path: (string) Path where to save the video file
"""
abs_tmp_dir_path = pathlib.Path(self.tmp_dir.name).absolute()
abs_output_path = pathlib.Path(output_path).absolute()
os.makedirs(os.path.dirname(abs_output_path), exist_ok=True)
if OUTPUT_PATH[-4:] == '.mp4':
subprocess.call([FFMPEG_PATH,
'-framerate', f'{self.fps}',
'-i', f'{abs_tmp_dir_path}/%d.png',
'-vcodec', 'libx264',
'-pix_fmt', 'yuv420p',
# Video quality, lower is better, but zero
# (lossless) doesn't work.
'-crf', '1',
'-y', # overwrite output files without asking
abs_output_path
])
elif OUTPUT_PATH[-4:] == '.gif':
subprocess.call([FFMPEG_PATH,
'-i', f'{abs_tmp_dir_path}/%d.png',
'-y', # overwrite output files without asking
abs_output_path
])
else:
raise NotImplementedError(
"filetype not support"
)
self.tmp_dir.cleanup()
print(f"Video written to: {abs_output_path}") |
SocialIcon.tsx | import React from 'react';
import {
View,
StyleSheet,
Platform,
Pressable,
ActivityIndicator,
ViewStyle,
StyleProp,
TextStyle,
} from 'react-native';
import Icon from '../Icon';
import Text from '../Text';
import fonts from '../config/fonts';
import {
androidRipple,
InlinePressableProps,
RneFunctionComponent,
} from '../helpers';
const colors = {
'github-alt': '#000000',
'google-plus-official': '#dd4b39',
google: '#dd4b39',
'reddit-alien': '#fc461e',
'stack-overflow': '#f27f33',
angellist: '#1c4082',
codepen: '#000000',
envelope: '#000000',
etsy: '#f2581e',
facebook: '#4267B2',
'facebook-messenger': '#0084ff',
flickr: '#ff0084',
foursquare: '#0072b1',
github: '#000000',
gitlab: '#e14329',
instagram: '#517fa4',
linkedin: '#007bb6',
medium: '#02b875',
pinterest: '#cb2027',
quora: '#a82400',
soundcloud: '#f50',
steam: '#c6c3c1',
stumbleupon: '#EB4823',
tumblr: '#32506d',
twitch: '#6441A5',
twitter: '#00aced',
vimeo: '#aad450',
vk: '#5181b8',
wechat: '#7bb32e',
weibo: '#e6162d',
whatsapp: '#075e54',
wordpress: '#21759b',
youtube: '#bb0000',
microsoft: '#46A4F2',
reddit: '#ed452f',
};
export type SocialMediaType =
| 'facebook'
| 'facebook-messenger' | | 'pinterest'
| 'linkedin'
| 'youtube'
| 'vimeo'
| 'tumblr'
| 'instagram'
| 'quora'
| 'flickr'
| 'foursquare'
| 'wordpress'
| 'stumbleupon'
| 'github'
| 'github-alt'
| 'twitch'
| 'medium'
| 'soundcloud'
| 'stack-overflow'
| 'gitlab'
| 'angellist'
| 'codepen'
| 'weibo'
| 'vk'
| 'microsoft'
| 'reddit';
export type SocialIconProps = {
/** Type of button. */
Component?: typeof React.Component;
/** Social media type. */
type?: SocialMediaType;
/** Creates button with a social icon. */
button?: boolean;
/** Type of icon set. [Supported sets here](icon#available-icon-sets). */
iconType?: string;
/** Extra styling for icon component. */
iconStyle?: StyleProp<ViewStyle>;
/** Adds styling to the button. */
style?: StyleProp<ViewStyle>;
/** Specify the color of the icon. */
iconColor?: string;
/** Add Underlay color. */
underlayColor?: string;
/** Title if made into a button. */
title?: string;
/** Raised adds a drop shadow, set to false to remove. */
raised?: boolean;
/** Disables the button, if true. */
disabled?: boolean;
/** Shows loading indicator. */
loading?: boolean;
/** Style to render when in loading state. */
activityIndicatorStyle?: StyleProp<ViewStyle>;
/** Decides the size of the activity indicator. */
small?: string;
/** Specify the size of the icon. */
iconSize?: number;
/** Reverses icon color scheme, setting background to white and icon to primary color. */
light?: boolean;
/** Specify font weight of title if set as a button with a title. */
fontWeight?: string;
/** Specify text styling. */
fontStyle?: StyleProp<TextStyle>;
/** Specify different font family. */
fontFamily?: string;
} & InlinePressableProps;
/** SocialIcons are visual cues to online and social media networks. We offer a varied range of social icons. */
export const SocialIcon: RneFunctionComponent<SocialIconProps> = ({
activityIndicatorStyle,
button = false,
disabled,
fontFamily,
fontStyle,
fontWeight,
iconType = 'font-awesome',
iconColor = 'white',
iconSize = 24,
iconStyle,
light,
loading,
onLongPress,
onPress,
onPressOut,
onPressIn,
Component = onPress || onLongPress || onPressIn || onPressOut
? Pressable
: View,
raised = true,
small,
style,
title,
type,
underlayColor,
pressableProps,
...attributes
}) => {
const shouldShowExpandedButton = button && title;
return (
<Component
{...{
onLongPress,
onPress,
onPressOut,
onPressIn,
android_ripple: androidRipple(
light ? 'white' : underlayColor || (type && colors[type])
),
...pressableProps,
...attributes,
}}
testID="RNE_SocialIcon"
underlayColor={light ? 'white' : underlayColor || (type && colors[type])}
disabled={disabled}
style={StyleSheet.flatten([
raised && styles.raised,
styles.container,
button && styles.button,
!button && raised && styles.icon,
!button &&
!light &&
!raised && {
width: iconSize * 2 + 4,
height: iconSize * 2 + 4,
borderRadius: iconSize * 2,
},
{ backgroundColor: type && colors[type] },
{
width: iconSize * 2 + 4,
height: iconSize * 2 + 4,
borderRadius: iconSize * 2,
},
light && { backgroundColor: 'white' },
style && style,
])}
>
<View style={styles.wrapper}>
{(shouldShowExpandedButton || !loading) && (
<Icon
testID="RNE_Icon"
iconStyle={StyleSheet.flatten([iconStyle && iconStyle])}
color={light ? type && colors[type] : iconColor}
name={type as SocialMediaType}
size={iconSize}
type={iconType}
/>
)}
{shouldShowExpandedButton && (
<Text
style={
StyleSheet.flatten([
styles.title,
light && { color: type && colors[type] },
fontFamily && { fontFamily },
fontWeight && { fontWeight },
fontStyle && fontStyle,
]) as TextStyle
}
>
{title}
</Text>
)}
{loading && (
<ActivityIndicator
testID="RNE_ActivityIndicator"
animating
style={StyleSheet.flatten([
styles.activityIndicatorStyle,
activityIndicatorStyle,
])}
color={light ? type && colors[type] : iconColor || 'white'}
size={(small && 'small') || 'large'}
/>
)}
</View>
</Component>
);
};
const styles = StyleSheet.create({
container: {
margin: 7,
borderRadius: 30,
flexDirection: 'row',
justifyContent: 'center',
alignItems: 'center',
},
button: {
paddingTop: 14,
paddingBottom: 14,
},
raised: {
...Platform.select({
android: {
elevation: 2,
},
default: {
shadowColor: 'rgba(0,0,0, .4)',
shadowOffset: { height: 1, width: 1 },
shadowOpacity: 1,
shadowRadius: 1,
},
}),
},
wrapper: {
flexDirection: 'row',
justifyContent: 'center',
alignItems: 'center',
},
title: {
color: 'white',
marginLeft: 15,
...Platform.select({
android: {
...fonts.android.black,
},
default: {
fontWeight: 'bold',
},
}),
},
icon: {
height: 52,
width: 52,
},
activityIndicatorStyle: {
marginHorizontal: 10,
height: 0,
},
});
SocialIcon.displayName = 'SocialIcon'; | | 'whatsapp'
| 'twitter'
| 'google-plus-official'
| 'google' |
escrow_upgradeable.rs | #[allow(dead_code)]
pub mod escrow_upgradeable {
# [rustfmt :: skip] use ethcontract as ethcontract ;
#[doc = "Generated by `ethcontract`"]
#[derive(Clone)]
pub struct Contract {
methods: Methods,
}
impl Contract {
#[doc = r" Retrieves the raw contract instance used to generate the type safe"]
#[doc = r" API for this contract."]
pub fn raw_contract() -> &'static self::ethcontract::Contract {
use self::ethcontract::common::artifact::truffle::TruffleLoader;
use self::ethcontract::private::lazy_static;
use self::ethcontract::Contract;
lazy_static! {
pub static ref CONTRACT: Contract = {
# [allow (unused_mut)] let mut contract = TruffleLoader :: new () . load_contract_from_str ("{\"contractName\":\"EscrowUpgradeable\",\"abi\":[{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[],\"outputs\":[],\"constant\":false,\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"depositsOf\",\"inputs\":[{\"name\":\"payee\",\"type\":\"address\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"constant\":false,\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"constant\":false,\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"constant\":false,\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"withdraw\",\"inputs\":[{\"name\":\"payee\",\"type\":\"address\"}],\"outputs\":[],\"constant\":false,\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"deposit\",\"inputs\":[{\"name\":\"payee\",\"type\":\"address\"}],\"outputs\":[],\"constant\":false,\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\"}],\"outputs\":[],\"constant\":false,\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"Withdrawn\",\"inputs\":[{\"name\":\"payee\",\"type\":\"address\",\"indexed\":true},{\"name\":\"weiAmount\",\"type\":\"uint256\",\"indexed\":false}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Deposited\",\"inputs\":[{\"name\":\"payee\",\"type\":\"address\",\"indexed\":true},{\"name\":\"weiAmount\",\"type\":\"uint256\",\"indexed\":false}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true}],\"anonymous\":false}],\"bytecode\":\"608060405234801561001057600080fd5b50610797806100206000396000f3fe6080604052600436106100705760003560e01c80638da5cb5b1161004e5780638da5cb5b146100c1578063e3a9db1a146100ee578063f2fde38b14610132578063f340fa011461015257610070565b806351cff8d914610075578063715018a6146100975780638129fc1c146100ac575b600080fd5b34801561008157600080fd5b50610095610090366004610682565b610165565b005b3480156100a357600080fd5b50610095610207565b3480156100b857600080fd5b5061009561023d565b3480156100cd57600080fd5b506033546040516001600160a01b0390911681526020015b60405180910390f35b3480156100fa57600080fd5b50610124610109366004610682565b6001600160a01b031660009081526065602052604090205490565b6040519081526020016100e5565b34801561013e57600080fd5b5061009561014d366004610682565b6102b1565b610095610160366004610682565b610349565b6033546001600160a01b031633146101985760405162461bcd60e51b815260040161018f906106f3565b60405180910390fd5b6001600160a01b03811660008181526065602052604081208054919055906101c090826103dd565b816001600160a01b03167f7084f5476618d8e60b11ef0d7d3f06914655adb8793e28ff7f018d4c76d505d5826040516101fb91815260200190565b60405180910390a25050565b6033546001600160a01b031633146102315760405162461bcd60e51b815260040161018f906106f3565b61023b60006104fb565b565b600054610100900460ff1680610256575060005460ff16155b6102725760405162461bcd60e51b815260040161018f906106a5565b600054610100900460ff16158015610294576000805461ffff19166101011790555b61029c61054d565b80156102ae576000805461ff00191690555b50565b6033546001600160a01b031633146102db5760405162461bcd60e51b815260040161018f906106f3565b6001600160a01b0381166103405760405162461bcd60e51b815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201526564647265737360d01b606482015260840161018f565b6102ae816104fb565b6033546001600160a01b031633146103735760405162461bcd60e51b815260040161018f906106f3565b6001600160a01b03811660009081526065602052604081208054349283929161039d908490610728565b90915550506040518181526001600160a01b038316907f2da466a7b24304f47e87fa2e1e5a81b9831ce54fec19055ce277ca2f39ba42c4906020016101fb565b8047101561042d5760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a20696e73756666696369656e742062616c616e6365000000604482015260640161018f565b6000826001600160a01b03168260405160006040518083038185875af1925050503d806000811461047a576040519150601f19603f3d011682016040523d82523d6000602084013e61047f565b606091505b50509050806104f65760405162461bcd60e51b815260206004820152603a60248201527f416464726573733a20756e61626c6520746f2073656e642076616c75652c207260448201527f6563697069656e74206d61792068617665207265766572746564000000000000606482015260840161018f565b505050565b603380546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b600054610100900460ff1680610566575060005460ff16155b6105825760405162461bcd60e51b815260040161018f906106a5565b600054610100900460ff161580156105a4576000805461ffff19166101011790555b6105ac6105b8565b6105b4610622565b61029c5b600054610100900460ff16806105d1575060005460ff16155b6105ed5760405162461bcd60e51b815260040161018f906106a5565b600054610100900460ff1615801561029c576000805461ffff191661010117905580156102ae576000805461ff001916905550565b600054610100900460ff168061063b575060005460ff16155b6106575760405162461bcd60e51b815260040161018f906106a5565b600054610100900460ff16158015610679576000805461ffff19166101011790555b61029c336104fb565b600060208284031215610693578081fd5b813561069e8161074c565b9392505050565b6020808252602e908201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160408201526d191e481a5b9a5d1a585b1a5e995960921b606082015260800190565b6020808252818101527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604082015260600190565b6000821982111561074757634e487b7160e01b81526011600452602481fd5b500190565b6001600160a01b03811681146102ae57600080fdfea26469706673582212209d575624978f1587a60baaaf3185655693db396e8e26251e40f337fa52271e8664736f6c63430008030033\",\"networks\":{},\"devdoc\":{\"details\":null,\"methods\":{}},\"userdoc\":{\"details\":null,\"methods\":{}}}") . expect ("valid contract JSON") ;
contract
};
}
&CONTRACT
}
#[doc = r" Creates a new contract instance with the specified `web3`"]
#[doc = r" provider at the given `Address`."]
#[doc = r""]
#[doc = r" Note that this does not verify that a contract with a matching"]
#[doc = r" `Abi` is actually deployed at the given address."]
pub fn at<F, B, T>(
web3: &self::ethcontract::web3::api::Web3<T>,
address: self::ethcontract::Address,
) -> Self
where
F: std::future::Future<
Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>,
> + Send
+ 'static,
B: std::future::Future<
Output = Result<
Vec<Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>>,
self::ethcontract::web3::Error,
>,
> + Send
+ 'static,
T: self::ethcontract::web3::Transport<Out = F>
+ self::ethcontract::web3::BatchTransport<Batch = B>
+ Send
+ Sync
+ 'static,
{
Contract::with_deployment_info(web3, address, None)
}
#[doc = r" Creates a new contract instance with the specified `web3` provider with"]
#[doc = r" the given `Abi` at the given `Address` and an optional transaction hash."]
#[doc = r" This hash is used to retrieve contract related information such as the"]
#[doc = r" creation block (which is useful for fetching all historic events)."]
#[doc = r""]
#[doc = r" Note that this does not verify that a contract with a matching `Abi` is"]
#[doc = r" actually deployed at the given address nor that the transaction hash,"]
#[doc = r" when provided, is actually for this contract deployment."]
pub fn with_deployment_info<F, B, T>(
web3: &self::ethcontract::web3::api::Web3<T>,
address: self::ethcontract::Address,
deployment_information: Option<ethcontract::common::DeploymentInformation>,
) -> Self
where
F: std::future::Future<
Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>,
> + Send
+ 'static,
B: std::future::Future<
Output = Result<
Vec<Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>>,
self::ethcontract::web3::Error,
>,
> + Send
+ 'static,
T: self::ethcontract::web3::Transport<Out = F>
+ self::ethcontract::web3::BatchTransport<Batch = B>
+ Send
+ Sync
+ 'static,
{
use self::ethcontract::transport::DynTransport;
use self::ethcontract::web3::api::Web3;
use self::ethcontract::Instance;
let transport = DynTransport::new(web3.transport().clone());
let web3 = Web3::new(transport);
let abi = Self::raw_contract().abi.clone();
let instance = Instance::with_deployment_info(web3, abi, address, deployment_information);
Contract::from_raw(instance)
}
#[doc = r" Creates a contract from a raw instance."]
fn from_raw(instance: self::ethcontract::dyns::DynInstance) -> Self {
let methods = Methods { instance };
Contract { methods }
}
#[doc = r" Returns the contract address being used by this instance."]
pub fn address(&self) -> self::ethcontract::Address {
self.raw_instance().address()
}
#[doc = r" Returns the deployment information of the contract"]
#[doc = r" if it is known, `None` otherwise."]
pub fn deployment_information(&self) -> Option<ethcontract::common::DeploymentInformation> {
self.raw_instance().deployment_information()
}
#[doc = r" Returns a reference to the default method options used by this"]
#[doc = r" contract."]
pub fn defaults(&self) -> &self::ethcontract::contract::MethodDefaults {
&self.raw_instance().defaults
}
#[doc = r" Returns a mutable reference to the default method options used"]
#[doc = r" by this contract."]
pub fn defaults_mut(&mut self) -> &mut self::ethcontract::contract::MethodDefaults {
&mut self.raw_instance_mut().defaults
}
#[doc = r" Returns a reference to the raw runtime instance used by this"]
#[doc = r" contract."]
pub fn raw_instance(&self) -> &self::ethcontract::dyns::DynInstance {
&self.methods.instance
}
#[doc = r" Returns a mutable reference to the raw runtime instance used by"]
#[doc = r" this contract."]
fn raw_instance_mut(&mut self) -> &mut self::ethcontract::dyns::DynInstance {
&mut self.methods.instance
}
}
impl std::fmt::Debug for Contract {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_tuple(stringify!(EscrowUpgradeable))
.field(&self.address())
.finish()
}
}
impl Contract {
#[doc = "Generated by `ethcontract`"]
#[allow(clippy::too_many_arguments)]
pub fn builder<F, B, T>(
web3: &self::ethcontract::web3::api::Web3<T>,
) -> self::ethcontract::dyns::DynDeployBuilder<Self>
where
F: std::future::Future<
Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>,
> + Send
+ 'static,
B: std::future::Future<
Output = Result<
Vec<Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>>,
self::ethcontract::web3::Error,
>,
> + Send
+ 'static,
T: self::ethcontract::web3::Transport<Out = F>
+ self::ethcontract::web3::BatchTransport<Batch = B>
+ Send
+ Sync
+ 'static,
{
use self::ethcontract::contract::DeployBuilder;
use self::ethcontract::dyns::DynTransport;
use self::ethcontract::web3::api::Web3;
let transport = DynTransport::new(web3.transport().clone());
let web3 = Web3::new(transport);
let bytecode = Self::raw_contract().bytecode.clone();
DeployBuilder::new(web3, bytecode, ()).expect("valid deployment args")
}
}
impl self::ethcontract::contract::Deploy<self::ethcontract::dyns::DynTransport> for Contract {
type Context = self::ethcontract::common::Bytecode;
fn bytecode(cx: &Self::Context) -> &self::ethcontract::common::Bytecode {
cx
}
fn abi(_: &Self::Context) -> &self::ethcontract::common::Abi {
&Self::raw_contract().abi
}
fn from_deployment(
web3: self::ethcontract::dyns::DynWeb3,
address: self::ethcontract::Address,
transaction_hash: self::ethcontract::H256,
_: Self::Context,
) -> Self {
Self::with_deployment_info(&web3, address, Some(transaction_hash.into()))
}
}
impl Contract {
#[doc = r" Returns an object that allows accessing typed method signatures."]
pub fn signatures() -> Signatures {
Signatures
}
#[doc = r" Retrieves a reference to type containing all the generated"]
#[doc = r" contract methods. This can be used for methods where the name"]
#[doc = r" would collide with a common method (like `at` or `deployed`)."]
pub fn methods(&self) -> &Methods {
&self.methods
}
}
#[doc = r" Type containing signatures for all methods for generated contract type."]
#[derive(Clone, Copy)]
pub struct Signatures;
impl Signatures {
#[doc = "Returns signature for method `initialize()`."]
#[allow(clippy::type_complexity)]
pub fn initialize(&self) -> self::ethcontract::contract::Signature<(), ()> {
self::ethcontract::contract::Signature::new([129, 41, 252, 28])
}
#[doc = "Returns signature for method `depositsOf(address):(uint256)`."]
#[allow(clippy::type_complexity)]
pub fn deposits_of(
&self,
) -> self::ethcontract::contract::Signature<
(self::ethcontract::Address,),
self::ethcontract::U256,
> {
self::ethcontract::contract::Signature::new([227, 169, 219, 26])
}
#[doc = "Returns signature for method `owner():(address)`."]
#[allow(clippy::type_complexity)]
pub fn owner(&self) -> self::ethcontract::contract::Signature<(), self::ethcontract::Address> {
self::ethcontract::contract::Signature::new([141, 165, 203, 91])
}
#[doc = "Returns signature for method `renounceOwnership()`."]
#[allow(clippy::type_complexity)]
pub fn renounce_ownership(&self) -> self::ethcontract::contract::Signature<(), ()> {
self::ethcontract::contract::Signature::new([113, 80, 24, 166])
}
#[doc = "Returns signature for method `withdraw(address)`."]
#[allow(clippy::type_complexity)]
pub fn withdraw(
&self,
) -> self::ethcontract::contract::Signature<(self::ethcontract::Address,), ()> {
self::ethcontract::contract::Signature::new([81, 207, 248, 217])
}
#[doc = "Returns signature for method `deposit(address)`."]
#[allow(clippy::type_complexity)]
pub fn deposit(
&self,
) -> self::ethcontract::contract::Signature<(self::ethcontract::Address,), ()> {
self::ethcontract::contract::Signature::new([243, 64, 250, 1])
}
#[doc = "Returns signature for method `transferOwnership(address)`."]
#[allow(clippy::type_complexity)]
pub fn transfer_ownership(
&self,
) -> self::ethcontract::contract::Signature<(self::ethcontract::Address,), ()> {
self::ethcontract::contract::Signature::new([242, 253, 227, 139])
}
}
#[doc = r" Type containing all contract methods for generated contract type."]
#[derive(Clone)]
pub struct Methods {
instance: self::ethcontract::dyns::DynInstance,
}
#[allow(clippy::too_many_arguments, clippy::type_complexity)]
impl Methods {
#[doc = "Generated by `ethcontract`"]
pub fn initialize(&self) -> self::ethcontract::dyns::DynMethodBuilder<()> {
self
.instance
.method([129, 41, 252, 28], ())
.expect("generated call")
}
#[doc = "Generated by `ethcontract`"]
pub fn deposits_of(
&self,
payee: self::ethcontract::Address,
) -> self::ethcontract::dyns::DynViewMethodBuilder<self::ethcontract::U256> {
self
.instance
.view_method([227, 169, 219, 26], (payee,))
.expect("generated call")
}
#[doc = "Generated by `ethcontract`"]
pub fn owner(
&self,
) -> self::ethcontract::dyns::DynViewMethodBuilder<self::ethcontract::Address> {
self
.instance
.view_method([141, 165, 203, 91], ())
.expect("generated call")
}
#[doc = "Generated by `ethcontract`"]
pub fn renounce_ownership(&self) -> self::ethcontract::dyns::DynMethodBuilder<()> {
self
.instance
.method([113, 80, 24, 166], ())
.expect("generated call")
}
#[doc = "Generated by `ethcontract`"]
pub fn withdraw(
&self,
payee: self::ethcontract::Address,
) -> self::ethcontract::dyns::DynMethodBuilder<()> {
self
.instance
.method([81, 207, 248, 217], (payee,))
.expect("generated call")
}
#[doc = "Generated by `ethcontract`"]
pub fn deposit(
&self,
payee: self::ethcontract::Address,
) -> self::ethcontract::dyns::DynMethodBuilder<()> {
self
.instance
.method([243, 64, 250, 1], (payee,))
.expect("generated call")
}
#[doc = "Generated by `ethcontract`"]
pub fn transfer_ownership(
&self,
new_owner: self::ethcontract::Address,
) -> self::ethcontract::dyns::DynMethodBuilder<()> {
self
.instance
.method([242, 253, 227, 139], (new_owner,))
.expect("generated call")
}
}
impl std::ops::Deref for Contract {
type Target = Methods;
fn deref(&self) -> &Self::Target {
&self.methods
}
}
#[doc = r" Module containing all generated data models for this contract's"]
#[doc = r" events."]
pub mod event_data {
use super::ethcontract;
#[derive(Clone, Debug, Default, Eq, PartialEq, serde :: Deserialize, serde :: Serialize)]
pub struct Withdrawn {
pub payee: self::ethcontract::Address,
pub wei_amount: self::ethcontract::U256,
}
impl Withdrawn {
#[doc = r" Retrieves the signature for the event this data corresponds to."]
#[doc = r" This signature is the Keccak-256 hash of the ABI signature of"]
#[doc = r" this event."]
pub fn signature() -> self::ethcontract::H256 {
self::ethcontract::H256([
112, 132, 245, 71, 102, 24, 216, 230, 11, 17, 239, 13, 125, 63, 6, 145, 70, 85, 173, 184,
121, 62, 40, 255, 127, 1, 141, 76, 118, 213, 5, 213,
])
}
#[doc = r" Retrieves the ABI signature for the event this data corresponds"]
#[doc = r" to. For this event the value should always be:"]
#[doc = r""]
#[doc = "`Withdrawn(address,uint256)`"]
pub fn abi_signature() -> &'static str {
"Withdrawn(address,uint256)"
}
}
impl self::ethcontract::tokens::Tokenize for Withdrawn {
fn from_token(
token: self::ethcontract::common::abi::Token,
) -> Result<Self, self::ethcontract::tokens::Error> {
let (payee, wei_amount) = self::ethcontract::tokens::Tokenize::from_token(token)?;
Ok(Withdrawn { payee, wei_amount })
}
fn into_token(self) -> self::ethcontract::common::abi::Token {
unimplemented!("events are only decoded, not encoded")
}
}
#[derive(Clone, Debug, Default, Eq, PartialEq, serde :: Deserialize, serde :: Serialize)]
pub struct Deposited {
pub payee: self::ethcontract::Address,
pub wei_amount: self::ethcontract::U256,
}
impl Deposited {
#[doc = r" Retrieves the signature for the event this data corresponds to."]
#[doc = r" This signature is the Keccak-256 hash of the ABI signature of"]
#[doc = r" this event."]
pub fn signature() -> self::ethcontract::H256 {
self::ethcontract::H256([
45, 164, 102, 167, 178, 67, 4, 244, 126, 135, 250, 46, 30, 90, 129, 185, 131, 28, 229,
79, 236, 25, 5, 92, 226, 119, 202, 47, 57, 186, 66, 196,
])
}
#[doc = r" Retrieves the ABI signature for the event this data corresponds"]
#[doc = r" to. For this event the value should always be:"]
#[doc = r""]
#[doc = "`Deposited(address,uint256)`"]
pub fn abi_signature() -> &'static str {
"Deposited(address,uint256)"
}
}
impl self::ethcontract::tokens::Tokenize for Deposited {
fn from_token(
token: self::ethcontract::common::abi::Token,
) -> Result<Self, self::ethcontract::tokens::Error> {
let (payee, wei_amount) = self::ethcontract::tokens::Tokenize::from_token(token)?;
Ok(Deposited { payee, wei_amount })
}
fn into_token(self) -> self::ethcontract::common::abi::Token {
unimplemented!("events are only decoded, not encoded")
}
}
#[derive(Clone, Debug, Default, Eq, PartialEq, serde :: Deserialize, serde :: Serialize)]
pub struct OwnershipTransferred {
pub previous_owner: self::ethcontract::Address,
pub new_owner: self::ethcontract::Address,
}
impl OwnershipTransferred {
#[doc = r" Retrieves the signature for the event this data corresponds to."]
#[doc = r" This signature is the Keccak-256 hash of the ABI signature of"]
#[doc = r" this event."]
pub fn signature() -> self::ethcontract::H256 {
self::ethcontract::H256([
139, 224, 7, 156, 83, 22, 89, 20, 19, 68, 205, 31, 208, 164, 242, 132, 25, 73, 127, 151,
34, 163, 218, 175, 227, 180, 24, 111, 107, 100, 87, 224,
])
}
#[doc = r" Retrieves the ABI signature for the event this data corresponds"]
#[doc = r" to. For this event the value should always be:"]
#[doc = r""]
#[doc = "`OwnershipTransferred(address,address)`"]
pub fn abi_signature() -> &'static str {
"OwnershipTransferred(address,address)"
}
}
impl self::ethcontract::tokens::Tokenize for OwnershipTransferred {
fn from_token(
token: self::ethcontract::common::abi::Token,
) -> Result<Self, self::ethcontract::tokens::Error> {
let (previous_owner, new_owner) = self::ethcontract::tokens::Tokenize::from_token(token)?;
Ok(OwnershipTransferred {
previous_owner,
new_owner,
})
}
fn into_token(self) -> self::ethcontract::common::abi::Token {
unimplemented!("events are only decoded, not encoded")
}
}
}
impl Contract {
#[doc = r" Retrieves a handle to a type containing for creating event"]
#[doc = r" streams for all the contract events."]
pub fn events(&self) -> Events<'_> {
Events {
instance: self.raw_instance(),
}
}
}
pub struct Events<'a> {
instance: &'a self::ethcontract::dyns::DynInstance,
}
impl Events<'_> {
#[doc = r" Generated by `ethcontract`."]
pub fn withdrawn(&self) -> self::event_builders::WithdrawnBuilder {
self::event_builders::WithdrawnBuilder(
self
.instance
.event(self::ethcontract::H256([
112, 132, 245, 71, 102, 24, 216, 230, 11, 17, 239, 13, 125, 63, 6, 145, 70, 85, 173,
184, 121, 62, 40, 255, 127, 1, 141, 76, 118, 213, 5, 213,
]))
.expect("generated event filter"),
)
}
#[doc = r" Generated by `ethcontract`."]
pub fn deposited(&self) -> self::event_builders::DepositedBuilder {
self::event_builders::DepositedBuilder(
self
.instance
.event(self::ethcontract::H256([
45, 164, 102, 167, 178, 67, 4, 244, 126, 135, 250, 46, 30, 90, 129, 185, 131, 28, 229,
79, 236, 25, 5, 92, 226, 119, 202, 47, 57, 186, 66, 196,
]))
.expect("generated event filter"),
)
}
#[doc = r" Generated by `ethcontract`."]
pub fn ownership_transferred(&self) -> self::event_builders::OwnershipTransferredBuilder {
self::event_builders::OwnershipTransferredBuilder(
self
.instance
.event(self::ethcontract::H256([
139, 224, 7, 156, 83, 22, 89, 20, 19, 68, 205, 31, 208, 164, 242, 132, 25, 73, 127,
151, 34, 163, 218, 175, 227, 180, 24, 111, 107, 100, 87, 224,
]))
.expect("generated event filter"),
)
}
}
#[doc = r" Module containing the generated event stream builders with type safe"]
#[doc = r" filter methods for this contract's events."]
pub mod event_builders {
use super::ethcontract;
use super::event_data;
#[doc = "A builder for creating a filtered stream of `Withdrawn` events."]
pub struct WithdrawnBuilder(
#[doc = r" The inner event builder."]
pub self::ethcontract::dyns::DynEventBuilder<self::event_data::Withdrawn>,
);
impl WithdrawnBuilder {
#[doc = r" Sets the starting block from which to stream logs for."]
#[doc = r""]
#[doc = r" If left unset defaults to the latest block."]
#[allow(clippy::wrong_self_convention)]
pub fn from_block(mut self, block: self::ethcontract::BlockNumber) -> Self {
self.0 = (self.0).from_block(block);
self
}
#[doc = r" Sets the last block from which to stream logs for."]
#[doc = r""]
#[doc = r" If left unset defaults to the streaming until the end of days."]
#[allow(clippy::wrong_self_convention)]
pub fn to_block(mut self, block: self::ethcontract::BlockNumber) -> Self {
self.0 = (self.0).to_block(block);
self
}
#[doc = r" Limits the number of events that can be retrieved by this filter."]
#[doc = r""]
#[doc = r" Note that this parameter is non-standard."]
pub fn limit(mut self, value: usize) -> Self {
self.0 = (self.0).limit(value);
self
}
#[doc = r" Sets the polling interval. This is used as the interval between"]
#[doc = r" consecutive `eth_getFilterChanges` calls to get filter updates."]
pub fn poll_interval(mut self, value: std::time::Duration) -> Self {
self.0 = (self.0).poll_interval(value);
self
}
#[doc = "Adds a filter for the payee event parameter."]
pub fn payee(mut self, topic: self::ethcontract::Topic<self::ethcontract::Address>) -> Self {
self.0 = (self.0).topic0(topic);
self
}
#[doc = r" Returns a future that resolves with a collection of all existing"]
#[doc = r" logs matching the builder parameters."]
pub async fn query(
self,
) -> std::result::Result<
std::vec::Vec<self::ethcontract::Event<self::event_data::Withdrawn>>,
self::ethcontract::errors::EventError,
> {
(self.0).query().await
}
#[doc = r" Creates an event stream from the current event builder."]
pub fn stream(
self,
) -> impl self::ethcontract::futures::stream::Stream<
Item = std::result::Result<
self::ethcontract::StreamEvent<self::event_data::Withdrawn>,
self::ethcontract::errors::EventError,
>,
> {
(self.0).stream()
}
}
#[doc = "A builder for creating a filtered stream of `Deposited` events."]
pub struct DepositedBuilder(
#[doc = r" The inner event builder."]
pub self::ethcontract::dyns::DynEventBuilder<self::event_data::Deposited>,
);
impl DepositedBuilder {
#[doc = r" Sets the starting block from which to stream logs for."]
#[doc = r""]
#[doc = r" If left unset defaults to the latest block."]
#[allow(clippy::wrong_self_convention)]
pub fn from_block(mut self, block: self::ethcontract::BlockNumber) -> Self {
self.0 = (self.0).from_block(block);
self
}
#[doc = r" Sets the last block from which to stream logs for."]
#[doc = r""]
#[doc = r" If left unset defaults to the streaming until the end of days."]
#[allow(clippy::wrong_self_convention)]
pub fn to_block(mut self, block: self::ethcontract::BlockNumber) -> Self {
self.0 = (self.0).to_block(block);
self
}
#[doc = r" Limits the number of events that can be retrieved by this filter."]
#[doc = r""]
#[doc = r" Note that this parameter is non-standard."]
pub fn limit(mut self, value: usize) -> Self {
self.0 = (self.0).limit(value);
self
}
#[doc = r" Sets the polling interval. This is used as the interval between"]
#[doc = r" consecutive `eth_getFilterChanges` calls to get filter updates."]
pub fn poll_interval(mut self, value: std::time::Duration) -> Self {
self.0 = (self.0).poll_interval(value);
self
}
#[doc = "Adds a filter for the payee event parameter."]
pub fn payee(mut self, topic: self::ethcontract::Topic<self::ethcontract::Address>) -> Self {
self.0 = (self.0).topic0(topic);
self
}
#[doc = r" Returns a future that resolves with a collection of all existing"]
#[doc = r" logs matching the builder parameters."]
pub async fn query(
self,
) -> std::result::Result<
std::vec::Vec<self::ethcontract::Event<self::event_data::Deposited>>,
self::ethcontract::errors::EventError,
> {
(self.0).query().await
}
#[doc = r" Creates an event stream from the current event builder."]
pub fn stream(
self,
) -> impl self::ethcontract::futures::stream::Stream<
Item = std::result::Result<
self::ethcontract::StreamEvent<self::event_data::Deposited>,
self::ethcontract::errors::EventError,
>,
> {
(self.0).stream()
}
}
#[doc = "A builder for creating a filtered stream of `OwnershipTransferred` events."]
pub struct OwnershipTransferredBuilder(
#[doc = r" The inner event builder."]
pub self::ethcontract::dyns::DynEventBuilder<self::event_data::OwnershipTransferred>,
);
impl OwnershipTransferredBuilder {
#[doc = r" Sets the starting block from which to stream logs for."]
#[doc = r""]
#[doc = r" If left unset defaults to the latest block."]
#[allow(clippy::wrong_self_convention)]
pub fn from_block(mut self, block: self::ethcontract::BlockNumber) -> Self {
self.0 = (self.0).from_block(block);
self
}
#[doc = r" Sets the last block from which to stream logs for."]
#[doc = r""]
#[doc = r" If left unset defaults to the streaming until the end of days."]
#[allow(clippy::wrong_self_convention)]
pub fn to_block(mut self, block: self::ethcontract::BlockNumber) -> Self {
self.0 = (self.0).to_block(block);
self
}
#[doc = r" Limits the number of events that can be retrieved by this filter."]
#[doc = r""]
#[doc = r" Note that this parameter is non-standard."]
pub fn limit(mut self, value: usize) -> Self {
self.0 = (self.0).limit(value);
self
}
#[doc = r" Sets the polling interval. This is used as the interval between"]
#[doc = r" consecutive `eth_getFilterChanges` calls to get filter updates."]
pub fn poll_interval(mut self, value: std::time::Duration) -> Self {
self.0 = (self.0).poll_interval(value);
self
}
#[doc = "Adds a filter for the previousOwner event parameter."] | mut self,
topic: self::ethcontract::Topic<self::ethcontract::Address>,
) -> Self {
self.0 = (self.0).topic0(topic);
self
}
#[doc = "Adds a filter for the newOwner event parameter."]
pub fn new_owner(
mut self,
topic: self::ethcontract::Topic<self::ethcontract::Address>,
) -> Self {
self.0 = (self.0).topic1(topic);
self
}
#[doc = r" Returns a future that resolves with a collection of all existing"]
#[doc = r" logs matching the builder parameters."]
pub async fn query(
self,
) -> std::result::Result<
std::vec::Vec<self::ethcontract::Event<self::event_data::OwnershipTransferred>>,
self::ethcontract::errors::EventError,
> {
(self.0).query().await
}
#[doc = r" Creates an event stream from the current event builder."]
pub fn stream(
self,
) -> impl self::ethcontract::futures::stream::Stream<
Item = std::result::Result<
self::ethcontract::StreamEvent<self::event_data::OwnershipTransferred>,
self::ethcontract::errors::EventError,
>,
> {
(self.0).stream()
}
}
}
impl Contract {
#[doc = r" Returns a log stream with all events."]
pub fn all_events(&self) -> self::ethcontract::dyns::DynAllEventsBuilder<Event> {
self::ethcontract::dyns::DynAllEventsBuilder::new(
self.raw_instance().web3(),
self.address(),
self.deployment_information(),
)
}
}
#[doc = r" A contract event."]
#[derive(Clone, Debug, Eq, PartialEq, serde :: Deserialize, serde :: Serialize)]
pub enum Event {
Deposited(self::event_data::Deposited),
OwnershipTransferred(self::event_data::OwnershipTransferred),
Withdrawn(self::event_data::Withdrawn),
}
impl self::ethcontract::contract::ParseLog for Event {
fn parse_log(
log: self::ethcontract::RawLog,
) -> Result<Self, self::ethcontract::errors::ExecutionError> {
let standard_event = log . topics . get (0) . copied () . map (| topic | match topic { self :: ethcontract :: H256 ([45 , 164 , 102 , 167 , 178 , 67 , 4 , 244 , 126 , 135 , 250 , 46 , 30 , 90 , 129 , 185 , 131 , 28 , 229 , 79 , 236 , 25 , 5 , 92 , 226 , 119 , 202 , 47 , 57 , 186 , 66 , 196]) => Ok (Event :: Deposited (log . clone () . decode (Contract :: raw_contract () . abi . event ("Deposited") . expect ("generated event decode")) ?)) , self :: ethcontract :: H256 ([139 , 224 , 7 , 156 , 83 , 22 , 89 , 20 , 19 , 68 , 205 , 31 , 208 , 164 , 242 , 132 , 25 , 73 , 127 , 151 , 34 , 163 , 218 , 175 , 227 , 180 , 24 , 111 , 107 , 100 , 87 , 224]) => Ok (Event :: OwnershipTransferred (log . clone () . decode (Contract :: raw_contract () . abi . event ("OwnershipTransferred") . expect ("generated event decode")) ?)) , self :: ethcontract :: H256 ([112 , 132 , 245 , 71 , 102 , 24 , 216 , 230 , 11 , 17 , 239 , 13 , 125 , 63 , 6 , 145 , 70 , 85 , 173 , 184 , 121 , 62 , 40 , 255 , 127 , 1 , 141 , 76 , 118 , 213 , 5 , 213]) => Ok (Event :: Withdrawn (log . clone () . decode (Contract :: raw_contract () . abi . event ("Withdrawn") . expect ("generated event decode")) ?)) , _ => Err (self :: ethcontract :: errors :: ExecutionError :: from (self :: ethcontract :: common :: abi :: Error :: InvalidData)) , }) ;
if let Some(Ok(data)) = standard_event {
return Ok(data);
}
Err(self::ethcontract::errors::ExecutionError::from(
self::ethcontract::common::abi::Error::InvalidData,
))
}
}
}
pub use self::escrow_upgradeable::Contract as EscrowUpgradeable; | pub fn previous_owner( |
gateway.go | package gateway
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/distribution/reference"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes/any"
apitypes "github.com/moby/buildkit/api/types"
"github.com/moby/buildkit/cache"
cacheutil "github.com/moby/buildkit/cache/util"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend"
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/tracing"
"github.com/moby/buildkit/worker"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
const (
keySource = "source"
keyDevel = "gateway-devel"
)
func NewGatewayFrontend(w frontend.WorkerInfos) frontend.Frontend {
return &gatewayFrontend{
workers: w,
}
}
type gatewayFrontend struct {
workers frontend.WorkerInfos
}
func filterPrefix(opts map[string]string, pfx string) map[string]string {
m := map[string]string{}
for k, v := range opts {
if strings.HasPrefix(k, pfx) {
m[strings.TrimPrefix(k, pfx)] = v
}
}
return m
}
func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition) (*frontend.Result, error) {
source, ok := opts[keySource]
if !ok {
return nil, errors.Errorf("no source specified for gateway")
}
sid := session.FromContext(ctx)
_, isDevel := opts[keyDevel]
var img specs.Image
var rootFS cache.ImmutableRef
var readonly bool // TODO: try to switch to read-only by default.
if isDevel {
devRes, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
frontend.SolveRequest{
Frontend: source,
FrontendOpt: filterPrefix(opts, "gateway-"),
FrontendInputs: inputs,
})
if err != nil {
return nil, err
}
defer func() {
devRes.EachRef(func(ref solver.ResultProxy) error {
return ref.Release(context.TODO())
})
}()
if devRes.Ref == nil {
return nil, errors.Errorf("development gateway didn't return default result")
}
res, err := devRes.Ref.Result(ctx)
if err != nil {
return nil, err
}
workerRef, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid ref: %T", res.Sys())
}
rootFS = workerRef.ImmutableRef
config, ok := devRes.Metadata[exptypes.ExporterImageConfigKey]
if ok {
if err := json.Unmarshal(config, &img); err != nil {
return nil, err
}
}
} else {
sourceRef, err := reference.ParseNormalizedNamed(source)
if err != nil {
return nil, err
}
dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), llb.ResolveImageConfigOpt{})
if err != nil {
return nil, err
}
if err := json.Unmarshal(config, &img); err != nil {
return nil, err
}
if dgst != "" {
sourceRef, err = reference.WithDigest(sourceRef, dgst)
if err != nil {
return nil, err
}
}
src := llb.Image(sourceRef.String(), &markTypeFrontend{})
def, err := src.Marshal(ctx)
if err != nil {
return nil, err
}
res, err := llbBridge.Solve(ctx, frontend.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, err
}
defer func() {
res.EachRef(func(ref solver.ResultProxy) error {
return ref.Release(context.TODO())
})
}()
if res.Ref == nil {
return nil, errors.Errorf("gateway source didn't return default result")
}
r, err := res.Ref.Result(ctx)
if err != nil {
return nil, err
}
workerRef, ok := r.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid ref: %T", r.Sys())
}
rootFS = workerRef.ImmutableRef
}
lbf, ctx, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers, inputs)
defer lbf.conn.Close()
if err != nil {
return nil, err
}
args := []string{"/run"}
env := []string{}
cwd := "/"
if img.Config.Env != nil {
env = img.Config.Env
}
if img.Config.Entrypoint != nil {
args = img.Config.Entrypoint
}
if img.Config.WorkingDir != "" {
cwd = img.Config.WorkingDir
}
i := 0
for k, v := range opts {
env = append(env, fmt.Sprintf("BUILDKIT_FRONTEND_OPT_%d", i)+"="+k+"="+v)
i++
}
env = append(env, "BUILDKIT_SESSION_ID="+sid)
dt, err := json.Marshal(gf.workers.WorkerInfos())
if err != nil {
return nil, errors.Wrap(err, "failed to marshal workers array")
}
env = append(env, "BUILDKIT_WORKERS="+string(dt))
defer lbf.Discard()
env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct)
meta := executor.Meta{
Env: env,
Args: args,
Cwd: cwd,
ReadonlyRootFS: readonly,
}
if v, ok := img.Config.Labels["moby.buildkit.frontend.network.none"]; ok {
if ok, _ := strconv.ParseBool(v); ok {
meta.NetMode = opspb.NetMode_NONE
}
}
err = llbBridge.Exec(ctx, meta, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr)
if err != nil {
if errors.Is(err, context.Canceled) && lbf.isErrServerClosed {
err = errors.Errorf("frontend grpc server closed unexpectedly")
}
// An existing error (set via Return rpc) takes
// precedence over this error, which in turn takes
// precedence over a success reported via Return.
lbf.mu.Lock()
if lbf.err == nil {
lbf.result = nil
lbf.err = err
}
lbf.mu.Unlock()
}
return lbf.Result()
}
func (lbf *llbBridgeForwarder) Discard() {
lbf.mu.Lock()
defer lbf.mu.Unlock()
for id, r := range lbf.refs {
if lbf.err == nil && lbf.result != nil {
keep := false
lbf.result.EachRef(func(r2 solver.ResultProxy) error {
if r == r2 {
keep = true
}
return nil
})
if keep {
continue
}
}
r.Release(context.TODO())
delete(lbf.refs, id)
}
}
func (lbf *llbBridgeForwarder) Done() <-chan struct{} {
return lbf.doneCh
}
func (lbf *llbBridgeForwarder) setResult(r *frontend.Result, err error) (*pb.ReturnResponse, error) {
lbf.mu.Lock()
defer lbf.mu.Unlock()
if (r == nil) == (err == nil) {
return nil, errors.New("gateway return must be either result or err")
}
if lbf.result != nil || lbf.err != nil {
return nil, errors.New("gateway result is already set")
}
lbf.result = r
lbf.err = err
close(lbf.doneCh)
return &pb.ReturnResponse{}, nil
}
func (lbf *llbBridgeForwarder) Result() (*frontend.Result, error) {
lbf.mu.Lock()
defer lbf.mu.Unlock()
if lbf.result == nil && lbf.err == nil {
return nil, errors.New("no result for incomplete build")
}
if lbf.err != nil {
return nil, lbf.err
}
return lbf.result, nil
}
func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition) *llbBridgeForwarder {
lbf := &llbBridgeForwarder{
callCtx: ctx,
llbBridge: llbBridge,
refs: map[string]solver.ResultProxy{},
doneCh: make(chan struct{}),
pipe: newPipe(),
workers: workers,
inputs: inputs,
}
return lbf
}
func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition) (*llbBridgeForwarder, context.Context, error) {
ctx, cancel := context.WithCancel(ctx)
lbf := NewBridgeForwarder(ctx, llbBridge, workers, inputs)
server := grpc.NewServer(grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor), grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor))
grpc_health_v1.RegisterHealthServer(server, health.NewServer())
pb.RegisterLLBBridgeServer(server, lbf)
go func() {
serve(ctx, server, lbf.conn)
select {
case <-ctx.Done():
default:
lbf.isErrServerClosed = true
}
cancel()
}()
return lbf, ctx, nil
}
type pipe struct {
Stdin io.ReadCloser
Stdout io.WriteCloser
conn net.Conn
}
func newPipe() *pipe {
pr1, pw1, _ := os.Pipe()
pr2, pw2, _ := os.Pipe()
return &pipe{
Stdin: pr1,
Stdout: pw2,
conn: &conn{
Reader: pr2,
Writer: pw1,
Closer: pw2,
},
}
}
type conn struct {
io.Reader
io.Writer
io.Closer
}
func (s *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) SetDeadline(t time.Time) error {
return nil
}
func (s *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (s *conn) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "pipe"
}
func (d dummyAddr) String() string {
return "localhost"
}
type LLBBridgeForwarder interface {
pb.LLBBridgeServer
Done() <-chan struct{}
Result() (*frontend.Result, error)
}
type llbBridgeForwarder struct {
mu sync.Mutex
callCtx context.Context
llbBridge frontend.FrontendLLBBridge
refs map[string]solver.ResultProxy
// lastRef solver.CachedResult
// lastRefs map[string]solver.CachedResult
// err error
doneCh chan struct{} // closed when result or err become valid through a call to a Return
result *frontend.Result
err error
exporterAttr map[string][]byte
workers frontend.WorkerInfos
inputs map[string]*opspb.Definition
isErrServerClosed bool
*pipe
}
func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
var platform *specs.Platform
if p := req.Platform; p != nil {
platform = &specs.Platform{
OS: p.OS,
Architecture: p.Architecture,
Variant: p.Variant,
OSVersion: p.OSVersion,
OSFeatures: p.OSFeatures,
}
}
dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{
Platform: platform,
ResolveMode: req.ResolveMode,
LogName: req.LogName,
})
if err != nil {
return nil, err
}
return &pb.ResolveImageConfigResponse{
Digest: dgst,
Config: dt,
}, nil
}
func translateLegacySolveRequest(req *pb.SolveRequest) error {
// translates ImportCacheRefs to new CacheImports (v0.4.0)
for _, legacyImportRef := range req.ImportCacheRefsDeprecated {
im := &pb.CacheOptionsEntry{
Type: "registry",
Attrs: map[string]string{"ref": legacyImportRef},
}
// FIXME(AkihiroSuda): skip append if already exists
req.CacheImports = append(req.CacheImports, im)
}
req.ImportCacheRefsDeprecated = nil
return nil
}
func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) {
if err := translateLegacySolveRequest(req); err != nil {
return nil, err
}
var cacheImports []frontend.CacheOptionsEntry
for _, e := range req.CacheImports {
cacheImports = append(cacheImports, frontend.CacheOptionsEntry{
Type: e.Type,
Attrs: e.Attrs,
})
}
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
Definition: req.Definition,
Frontend: req.Frontend,
FrontendOpt: req.FrontendOpt,
FrontendInputs: req.FrontendInputs,
CacheImports: cacheImports,
})
if err != nil {
return nil, err
}
if len(res.Refs) > 0 && !req.AllowResultReturn {
// this should never happen because old client shouldn't make a map request
return nil, errors.Errorf("solve did not return default result")
}
pbRes := &pb.Result{
Metadata: res.Metadata,
}
var defaultID string
lbf.mu.Lock()
if res.Refs != nil {
ids := make(map[string]string, len(res.Refs))
defs := make(map[string]*opspb.Definition, len(res.Refs))
for k, ref := range res.Refs {
id := identity.NewID()
if ref == nil {
id = ""
} else {
lbf.refs[id] = ref
}
ids[k] = id
defs[k] = ref.Definition()
}
if req.AllowResultArrayRef {
refMap := make(map[string]*pb.Ref, len(res.Refs))
for k, id := range ids {
refMap[k] = &pb.Ref{Id: id, Def: defs[k]}
}
pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: refMap}}
} else {
pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: ids}}
}
} else {
ref := res.Ref
id := identity.NewID()
var def *opspb.Definition
if ref == nil {
id = ""
} else {
def = ref.Definition()
lbf.refs[id] = ref
}
defaultID = id
if req.AllowResultArrayRef {
pbRes.Result = &pb.Result_Ref{Ref: &pb.Ref{Id: id, Def: def}}
} else {
pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: id}
}
}
lbf.mu.Unlock()
// compatibility mode for older clients
if req.Final {
exp := map[string][]byte{}
if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil {
return nil, err
}
for k, v := range res.Metadata {
exp[k] = v
}
lbf.mu.Lock()
lbf.result = &frontend.Result{
Ref: lbf.refs[defaultID],
Metadata: exp,
}
lbf.mu.Unlock()
}
resp := &pb.SolveResponse{
Result: pbRes,
}
if !req.AllowResultReturn {
resp.Ref = defaultID
}
return resp, nil
}
func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
lbf.mu.Lock()
ref, ok := lbf.refs[req.Ref]
lbf.mu.Unlock()
if !ok {
return nil, errors.Errorf("no such ref: %v", req.Ref)
}
if ref == nil {
return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.FilePath)
}
r, err := ref.Result(ctx)
if err != nil {
return nil, err
}
workerRef, ok := r.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid ref: %T", r.Sys())
}
newReq := cacheutil.ReadRequest{
Filename: req.FilePath,
}
if r := req.Range; r != nil {
newReq.Range = &cacheutil.FileRange{
Offset: int(r.Offset),
Length: int(r.Length),
}
}
dt, err := cacheutil.ReadFile(ctx, workerRef.ImmutableRef, newReq)
if err != nil {
return nil, err
}
return &pb.ReadFileResponse{Data: dt}, nil
}
func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirRequest) (*pb.ReadDirResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
lbf.mu.Lock()
ref, ok := lbf.refs[req.Ref]
lbf.mu.Unlock()
if !ok {
return nil, errors.Errorf("no such ref: %v", req.Ref)
}
if ref == nil {
return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.DirPath)
}
r, err := ref.Result(ctx)
if err != nil {
return nil, err
}
workerRef, ok := r.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid ref: %T", r.Sys())
}
newReq := cacheutil.ReadDirRequest{
Path: req.DirPath,
IncludePattern: req.IncludePattern,
}
entries, err := cacheutil.ReadDir(ctx, workerRef.ImmutableRef, newReq)
if err != nil {
return nil, err
}
return &pb.ReadDirResponse{Entries: entries}, nil
}
func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileRequest) (*pb.StatFileResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
lbf.mu.Lock()
ref, ok := lbf.refs[req.Ref]
lbf.mu.Unlock()
if !ok {
return nil, errors.Errorf("no such ref: %v", req.Ref)
}
if ref == nil {
return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.Path)
}
r, err := ref.Result(ctx)
if err != nil {
return nil, err
}
workerRef, ok := r.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid ref: %T", r.Sys())
}
st, err := cacheutil.StatFile(ctx, workerRef.ImmutableRef, req.Path)
if err != nil {
return nil, err
}
return &pb.StatFileResponse{Stat: st}, nil
}
func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) {
workers := lbf.workers.WorkerInfos()
pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers))
for _, w := range workers {
pbWorkers = append(pbWorkers, &apitypes.WorkerRecord{
ID: w.ID,
Labels: w.Labels,
Platforms: opspb.PlatformsFromSpec(w.Platforms),
})
}
return &pb.PongResponse{
FrontendAPICaps: pb.Caps.All(),
Workers: pbWorkers,
LLBCaps: opspb.Caps.All(),
}, nil
}
func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) (*pb.ReturnResponse, error) {
if in.Error != nil {
return lbf.setResult(nil, grpcerrors.FromGRPC(status.ErrorProto(&spb.Status{
Code: in.Error.Code,
Message: in.Error.Message,
Details: convertGogoAny(in.Error.Details),
})))
} else {
r := &frontend.Result{
Metadata: in.Result.Metadata,
}
switch res := in.Result.Result.(type) {
case *pb.Result_RefDeprecated:
ref, err := lbf.convertRef(res.RefDeprecated)
if err != nil {
return nil, err
}
r.Ref = ref
case *pb.Result_RefsDeprecated:
m := map[string]solver.ResultProxy{}
for k, id := range res.RefsDeprecated.Refs {
ref, err := lbf.convertRef(id)
if err != nil {
return nil, err
}
m[k] = ref
}
r.Refs = m
case *pb.Result_Ref:
ref, err := lbf.convertRef(res.Ref.Id)
if err != nil {
return nil, err
}
r.Ref = ref
case *pb.Result_Refs:
m := map[string]solver.ResultProxy{}
for k, ref := range res.Refs.Refs {
ref, err := lbf.convertRef(ref.Id)
if err != nil {
return nil, err
}
m[k] = ref
}
r.Refs = m
}
return lbf.setResult(r, nil)
}
}
func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) (*pb.InputsResponse, error) {
return &pb.InputsResponse{
Definitions: lbf.inputs,
}, nil
}
func (lbf *llbBridgeForwarder) convertRef(id string) (solver.ResultProxy, error) {
if id == "" {
return nil, nil
}
lbf.mu.Lock()
defer lbf.mu.Unlock()
r, ok := lbf.refs[id]
if !ok {
return nil, errors.Errorf("return reference %s not found", id)
}
return r, nil
}
func | (ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
go func() {
<-ctx.Done()
conn.Close()
}()
logrus.Debugf("serving grpc connection")
(&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer})
}
type markTypeFrontend struct{}
func (*markTypeFrontend) SetImageOption(ii *llb.ImageInfo) {
ii.RecordType = string(client.UsageRecordTypeFrontend)
}
func convertGogoAny(in []*gogotypes.Any) []*any.Any {
out := make([]*any.Any, len(in))
for i := range in {
out[i] = &any.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
}
return out
}
| serve |
sum_test.go | package main
import (
"io/ioutil"
"testing"
| var (
wasmBytes []byte
)
func init() {
var err error
wasmBytes, err = ioutil.ReadFile(wasmFilename)
if err != nil {
panic(err)
}
}
func TestSum(t *testing.T) {
runtime := wasm3.NewRuntime(&wasm3.Config{
Environment: wasm3.NewEnvironment(),
StackSize: 64 * 1024,
})
defer runtime.Destroy()
_, err := runtime.Load(wasmBytes)
if err != nil {
t.Fatal(err)
}
fn, err := runtime.FindFunction(fnName)
if err != nil {
t.Fatal(err)
}
result, _ := fn(1, 1)
if result != 2 {
t.Fatal("Result doesn't match")
}
}
func BenchmarkSum(b *testing.B) {
for n := 0; n < b.N; n++ {
runtime := wasm3.NewRuntime(&wasm3.Config{
Environment: wasm3.NewEnvironment(),
StackSize: 64 * 1024,
})
defer runtime.Destroy()
_, err := runtime.Load(wasmBytes)
if err != nil {
b.Fatal(err)
}
fn, err := runtime.FindFunction(fnName)
if err != nil {
b.Fatal(err)
}
fn(1, 2)
}
}
func BenchmarkSumReentrant(b *testing.B) {
runtime := wasm3.NewRuntime(&wasm3.Config{
Environment: wasm3.NewEnvironment(),
StackSize: 64 * 1024,
})
defer runtime.Destroy()
_, err := runtime.Load(wasmBytes)
if err != nil {
b.Fatal(err)
}
fn, err := runtime.FindFunction(fnName)
if err != nil {
b.Fatal(err)
}
for n := 0; n < b.N; n++ {
fn(1, 2)
}
} | wasm3 "github.com/matiasinsaurralde/go-wasm3"
)
|
jqxgrid.export.js | /* | Copyright (c) 2011-2016 jQWidgets.
License: http://jqwidgets.com/license/
*/
(function(a){a.extend(a.jqx._jqxGrid.prototype,{exportdata:function(q,z,y,o,r,t,f){if(!a.jqx.dataAdapter.ArrayExporter){throw"jqxGrid: Missing reference to jqxdata.export.js!"}if(y==undefined){y=true}var H=this;if(o==undefined){var o=this.getrows();if(o.length==0){throw"No data to export."}}this.exporting=true;if(!this.pageable){this.loadondemand=true}if(this.altrows){this._renderrows(this.virtualsizeinfo)}var F=r!=undefined?r:false;var E={};var n={};var v=[];var l=this.host.find(".jqx-grid-cell:first");var w=this.host.find(".jqx-grid-cell-alt:first");l.removeClass(this.toThemeProperty("jqx-grid-cell-selected"));l.removeClass(this.toThemeProperty("jqx-fill-state-pressed"));w.removeClass(this.toThemeProperty("jqx-grid-cell-selected"));w.removeClass(this.toThemeProperty("jqx-fill-state-pressed"));l.removeClass(this.toThemeProperty("jqx-grid-cell-hover"));l.removeClass(this.toThemeProperty("jqx-fill-state-hover"));w.removeClass(this.toThemeProperty("jqx-grid-cell-hover"));w.removeClass(this.toThemeProperty("jqx-fill-state-hover"));var g="cell";var e=1;var G="column";var c=1;var h=[];for(var B=0;B<this.columns.records.length;B++){var d=this.columns.records[B];if(d.cellclassname!=""){d.customCellStyles=new Array();if(typeof d.cellclassname=="string"){for(var C=0;C<o.length;C++){d.customCellStyles[C]=d.cellclassname}}else{for(var C=0;C<o.length;C++){var u=this.getrowboundindex(C);var b=d.cellclassname(u,d.displayfield,o[C][d.displayfield],o[C]);if(b){d.customCellStyles[C]=b}}}}}var x=new Array();a.each(this.columns.records,function(K){var N=a(H.table[0].rows[0].cells[K]);if(H.table[0].rows.length>1){var j=a(H.table[0].rows[1].cells[K])}var J=this;var L=function(P){P.removeClass(H.toThemeProperty("jqx-grid-cell-selected"));P.removeClass(H.toThemeProperty("jqx-fill-state-pressed"));P.removeClass(H.toThemeProperty("jqx-grid-cell-hover"));P.removeClass(H.toThemeProperty("jqx-fill-state-hover"));if(J.customCellStyles){for(var Q in J.customCellStyles){P.removeClass(J.customCellStyles[Q])}}};L(N);if(j){L(j)}if(this.displayfield==null){return true}if(H.showaggregates){if(H.getcolumnaggregateddata){h.push(H.getcolumnaggregateddata(this.displayfield,this.aggregates,true,o))}}var M=H._getexportcolumntype(this);if(this.exportable&&(!this.hidden||F)){E[this.displayfield]={};E[this.displayfield].text=this.text;E[this.displayfield].width=parseInt(this.width);if(isNaN(E[this.displayfield].width)){E[this.displayfield].width=60}E[this.displayfield].formatString=this.cellsformat;E[this.displayfield].localization=H.gridlocalization;E[this.displayfield].type=M;E[this.displayfield].cellsAlign=this.cellsalign;E[this.displayfield].hidden=!y;E[this.displayfield].displayfield=this.displayfield;x.push(E[this.displayfield])}g="cell"+e;var O=a(this.element);if(this.element==undefined){O=a(this.uielement)}G="column"+c;if(q=="html"||q=="xls"||q=="pdf"){var i=function(P,X,W,Q,V,S,R,T,U){n[P]={};if(X==undefined){return}n[P]["font-size"]=X.css("font-size");n[P]["font-weight"]=X.css("font-weight");n[P]["font-style"]=X.css("font-style");n[P]["background-color"]=S._getexportcolor(X.css("background-color"));n[P]["color"]=S._getexportcolor(X.css("color"));n[P]["border-color"]=S._getexportcolor(X.css("border-top-color"));if(W){n[P]["text-align"]=V.align}else{n[P]["text-align"]=V.cellsalign;n[P]["formatString"]=V.cellsformat;n[P]["dataType"]=M}if(q=="html"||q=="pdf"){n[P]["border-top-width"]=X.css("border-top-width");n[P]["border-left-width"]=X.css("border-left-width");n[P]["border-right-width"]=X.css("border-right-width");n[P]["border-bottom-width"]=X.css("border-bottom-width");n[P]["border-top-style"]=X.css("border-top-style");n[P]["border-left-style"]=X.css("border-left-style");n[P]["border-right-style"]=X.css("border-right-style");n[P]["border-bottom-style"]=X.css("border-bottom-style");if(W){if(R==0){n[P]["border-left-width"]=X.css("border-right-width")}n[P]["border-top-width"]=X.css("border-right-width");n[P]["border-bottom-width"]=X.css("border-bottom-width")}else{if(R==0){n[P]["border-left-width"]=X.css("border-right-width")}}n[P]["height"]=X.css("height")}if(V.exportable&&(!V.hidden||F)){if(T==true){if(!E[V.displayfield].customCellStyles){E[V.displayfield].customCellStyles=new Array()}E[V.displayfield].customCellStyles[U]=P}else{if(W){E[V.displayfield].style=P}else{if(!Q){E[V.displayfield].cellStyle=P}else{E[V.displayfield].cellAltStyle=P}}}}};i(G,O,true,false,this,H,K);c++;i(g,N,false,false,this,H,K);if(H.altrows){g="cellalt"+e;i(g,j,false,true,this,H,K)}if(this.customCellStyles){for(var I in J.customCellStyles){N.removeClass(J.customCellStyles[I])}for(var I in J.customCellStyles){N.addClass(J.customCellStyles[I]);i(g+J.customCellStyles[I],N,false,false,this,H,K,true,I);N.removeClass(J.customCellStyles[I])}}e++}});a.each(this.columns.records,function(i){if(E[this.displayfield]){E[this.displayfield].columnsDataFields=x}});if(this.showaggregates){var D=[];var A=q=="xls"?"_AG":"";var k=this.groupable?this.groups.length:0;if(this.rowdetails){k++}if(h.length>0){a.each(this.columns.records,function(j){if(this.aggregates){for(var J=0;J<this.aggregates.length;J++){if(!D[J]){D[J]={}}if(D[J]){var K=H._getaggregatename(this.aggregates[J]);var L=H._getaggregatetype(this.aggregates[J]);var I=h[j-k];if(I){D[J][this.displayfield]=A+K+": "+I[L]}}}}});a.each(this.columns.records,function(j){for(var I=0;I<D.length;I++){if(D[I][this.displayfield]==undefined){D[I][this.displayfield]=A}}})}a.each(D,function(){o.push(this)})}var m=this;var s=a.jqx.dataAdapter.ArrayExporter(o,E,n);if(z==undefined){this._renderrows(this.virtualsizeinfo);var p=s.exportTo(q);if(this.showaggregates){a.each(D,function(){o.pop(this)})}setTimeout(function(){m.exporting=false},50);return p}else{s.exportToFile(q,z,t,f)}if(this.showaggregates){a.each(D,function(){o.pop(this)})}this._renderrows(this.virtualsizeinfo);setTimeout(function(){m.exporting=false},50)},_getexportcolor:function(l){var f=l;if(l=="transparent"){f="#FFFFFF"}if(!f||!f.toString()){f="#FFFFFF"}if(f.toString().indexOf("rgb")!=-1){var i=f.split(",");if(f.toString().indexOf("rgba")!=-1){var d=parseInt(i[0].substring(5));var h=parseInt(i[1]);var j=parseInt(i[2]);var k=parseInt(i[3].substring(1,4));var m={r:d,g:h,b:j};var e=this._rgbToHex(m);if(d==0&&h==0&&j==0&&k==0){return"#ffffff"}return"#"+e}var d=parseInt(i[0].substring(4));var h=parseInt(i[1]);var j=parseInt(i[2].substring(1,4));var m={r:d,g:h,b:j};var e=this._rgbToHex(m);return"#"+e}else{if(f.toString().indexOf("#")!=-1){if(f.toString().length==4){var c=f.toString().substring(1,4);f+=c}}}return f},_rgbToHex:function(b){return this._intToHex(b.r)+this._intToHex(b.g)+this._intToHex(b.b)},_intToHex:function(c){var b=(parseInt(c).toString(16));if(b.length==1){b=("0"+b)}return b.toUpperCase()},_getexportcolumntype:function(f){var g=this;var e="string";var d=g.source.datafields||((g.source._source)?g.source._source.datafields:null);if(d){var i="";a.each(d,function(){if(this.name==f.displayfield){if(this.type){i=this.type}return false}});if(i){return i}}if(f!=null){if(this.dataview.cachedrecords==undefined){return e}var b=null;if(!this.virtualmode){if(this.dataview.cachedrecords.length==0){return e}b=this.dataview.cachedrecords[0][f.displayfield];if(b!=null&&b.toString()==""){return"string"}}else{a.each(this.dataview.cachedrecords,function(){b=this[f.displayfield];return false})}if(b!=null){if(f.cellsformat.indexOf("c")!=-1){return"number"}if(f.cellsformat.indexOf("n")!=-1){return"number"}if(f.cellsformat.indexOf("p")!=-1){return"number"}if(f.cellsformat.indexOf("d")!=-1){return"date"}if(f.cellsformat.indexOf("y")!=-1){return"date"}if(f.cellsformat.indexOf("M")!=-1){return"date"}if(f.cellsformat.indexOf("m")!=-1){return"date"}if(f.cellsformat.indexOf("t")!=-1){return"date"}if(typeof b=="boolean"){e="boolean"}else{if(a.jqx.dataFormat.isNumber(b)){e="number"}else{var h=new Date(b);if(h.toString()=="NaN"||h.toString()=="Invalid Date"){if(a.jqx.dataFormat){h=a.jqx.dataFormat.tryparsedate(b);if(h!=null){if(h&&h.getFullYear()){if(h.getFullYear()==1970&&h.getMonth()==0&&h.getDate()==1){var c=new Number(b);if(!isNaN(c)){return"number"}return"string"}}return"date"}else{e="string"}}else{e="string"}}else{e="date"}}}}}return e}})})(jqxBaseFramework); | jQWidgets v4.0.0 (2016-Jan) |
dataTree.ts | /* eslint-disable max-classes-per-file */
/*
This file is part of Astarte.
Copyright 2020 Ispirata Srl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import _ from 'lodash';
import { AstarteInterface } from '../models';
import type {
AstarteDataType,
AstarteDataValue,
AstarteDataTuple,
AstartePropertyData,
AstarteDatastreamData,
AstarteDatastreamIndividualData,
AstarteDatastreamObjectData,
AstarteInterfaceValues,
AstarteIndividualDatastreamInterfaceValue,
AstarteAggregatedDatastreamInterfaceValue,
} from '../types';
const getEndpointDataType = (iface: AstarteInterface, endpoint: string): AstarteDataType => {
const matchedMapping = AstarteInterface.findEndpointMapping(iface, endpoint);
if (matchedMapping == null) {
throw new Error(`Could not find an interface mapping for the endpoint ${endpoint}`);
}
return matchedMapping.type;
};
const isAstarteDataValue = (value: unknown): value is AstarteDataValue =>
!_.isUndefined(value) && (!_.isPlainObject(value) || _.isNull(value));
const isPropertiesInterfaceValue = (value: unknown): value is AstarteDataValue =>
isAstarteDataValue(value);
const isIndividualDatastreamInterfaceValue = (
value: unknown,
): value is AstarteIndividualDatastreamInterfaceValue => isAstarteDataValue(_.get(value, 'value'));
const isIndividualDatastreamInterfaceValues = (
value: unknown,
): value is AstarteIndividualDatastreamInterfaceValue[] =>
_.isArray(value) && value.every(isIndividualDatastreamInterfaceValue);
const isAggregatedDatastreamInterfaceValue = (
value: unknown,
): value is AstarteAggregatedDatastreamInterfaceValue => Array.isArray(value);
type AstarteDataTreeKind = 'properties' | 'datastream_object' | 'datastream_individual';
const getDataTreeKind = (iface: AstarteInterface): AstarteDataTreeKind => {
if (iface.type === 'properties') {
return 'properties';
}
if (iface.aggregation === 'object') {
return 'datastream_object';
}
return 'datastream_individual';
};
type JSON<Value> = Value | { [prop: string]: JSON<Value> };
type Equals<T, S> = [T] extends [S] ? ([S] extends [T] ? true : false) : false;
interface AstarteDataTreeNode<
Data extends AstartePropertyData | AstarteDatastreamIndividualData | AstarteDatastreamObjectData
> {
dataKind: AstarteDataTreeKind;
name: string;
endpoint: string;
getParentNode: () => AstarteDataTreeNode<Data> | null;
getNode: (endpoint: string) => AstarteDataTreeNode<Data> | null;
getLeaves: () => AstarteDataTreeNode<Data>[];
toData: () => Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamObjectData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamIndividualData[]
: AstartePropertyData[];
toLinearizedData: () => Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamData[]
: AstartePropertyData[];
toLastValue: () => JSON<AstarteDataValue>;
}
interface AstarteDataTreeLeafNodeParams<
Data extends AstartePropertyData | AstarteDatastreamIndividualData | AstarteDatastreamObjectData
> {
interface: AstarteInterface;
data: Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamObjectData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamIndividualData[]
: AstartePropertyData;
endpoint?: string;
parentNode?: AstarteDataTreeBranchNode<Data> | null;
}
class | <
Data extends AstartePropertyData | AstarteDatastreamIndividualData | AstarteDatastreamObjectData
> implements AstarteDataTreeNode<Data> {
readonly dataKind: AstarteDataTreeKind;
readonly endpoint: string;
private readonly parent: AstarteDataTreeBranchNode<Data> | null;
private readonly data: Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamObjectData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamIndividualData[]
: AstartePropertyData;
private readonly linearizedData: Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamData[]
: AstartePropertyData;
constructor({
interface: iface,
data,
endpoint = '',
parentNode = null,
}: AstarteDataTreeLeafNodeParams<Data>) {
this.endpoint = endpoint;
this.parent = parentNode;
this.dataKind = getDataTreeKind(iface);
this.data = data;
if (iface.type === 'properties') {
// @ts-expect-error cannot correctly infer from generics
this.linearizedData = data as AstartePropertyData;
} else if (iface.type === 'datastream' && iface.aggregation === 'individual') {
const interfaceData = data as AstarteDatastreamIndividualData[];
// @ts-expect-error cannot correctly infer from generics
this.linearizedData = interfaceData.map((obj) => ({
endpoint: obj.endpoint,
timestamp: obj.timestamp,
...({ type: obj.type, value: obj.value } as AstarteDataTuple),
})) as AstarteDatastreamData[];
} else {
const interfaceData = data as AstarteDatastreamObjectData[];
// @ts-expect-error cannot correctly infer from generics
this.linearizedData = interfaceData
.map((obj) =>
Object.entries(obj.value).map(([prop, propValue]) => ({
endpoint: `${obj.endpoint}/${prop}`,
timestamp: obj.timestamp,
...propValue,
})),
)
.flat() as AstarteDatastreamData[];
}
}
getParentNode(): AstarteDataTreeBranchNode<Data> | null {
return this.parent;
}
getNode(endpoint: string): AstarteDataTreeLeafNode<Data> | null {
const sanitizedEndpoint = endpoint.replace(/\/$/, '');
if (sanitizedEndpoint === this.endpoint) {
return this;
}
return null;
}
getLeaves(): AstarteDataTreeLeafNode<Data>[] {
return [this];
}
toData(): Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamObjectData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamIndividualData[]
: [AstartePropertyData] {
// @ts-expect-error cannot correctly infer from generics
return _.isArray(this.data) ? this.data : [this.data];
}
toLinearizedData(): Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamData[]
: [AstartePropertyData] {
// @ts-expect-error cannot correctly infer from generics
return _.isArray(this.linearizedData) ? this.linearizedData : [this.linearizedData];
}
toLastValue(): JSON<AstarteDataValue> {
if (this.dataKind === 'properties') {
const data = this.data as AstartePropertyData;
return data.value;
}
if (this.dataKind === 'datastream_individual') {
const data = this.data as AstarteDatastreamIndividualData[];
const lastData: AstarteDatastreamIndividualData | undefined = _.last(
_.orderBy(data, ['timestamp'], ['asc']),
);
return lastData ? lastData.value : null;
}
const data = this.data as AstarteDatastreamObjectData[];
const lastData: AstarteDatastreamObjectData | undefined = _.last(
_.orderBy(data, ['timestamp'], ['asc']),
);
return lastData ? _.mapValues(lastData.value, (valueTuple) => valueTuple.value) : null;
}
get name(): string {
return this.parent != null ? this.endpoint.replace(`${this.parent.endpoint}/`, '') : '';
}
}
interface AstarteDataTreeBranchNodeParams<
Data extends AstartePropertyData | AstarteDatastreamIndividualData | AstarteDatastreamObjectData
> {
interface: AstarteInterface;
data: AstarteInterfaceValues;
endpoint?: string;
parentNode?: AstarteDataTreeBranchNode<Data> | null;
}
class AstarteDataTreeBranchNode<
Data extends AstartePropertyData | AstarteDatastreamIndividualData | AstarteDatastreamObjectData
> implements AstarteDataTreeNode<Data> {
readonly dataKind: AstarteDataTreeKind;
readonly endpoint: string;
private readonly parent: AstarteDataTreeBranchNode<Data> | null;
private readonly children: Array<AstarteDataTreeBranchNode<Data> | AstarteDataTreeLeafNode<Data>>;
constructor({
interface: iface,
data,
endpoint = '',
parentNode = null,
}: AstarteDataTreeBranchNodeParams<Data>) {
this.endpoint = endpoint;
this.parent = parentNode;
this.dataKind = getDataTreeKind(iface);
if (iface.type === 'properties') {
// @ts-expect-error cannot correctly infer from generics
this.children = Object.entries(data).map(([prop, propValue]) =>
toPropertiesTreeNode({
interface: iface,
data: propValue,
endpoint: `${endpoint}/${prop}`,
// @ts-expect-error cannot correctly infer from generics
parentNode: this as AstarteDataTreeBranchNode<AstartePropertyData>,
}),
) as Array<AstarteDataTreeBranchNode<Data> | AstarteDataTreeLeafNode<Data>>;
} else if (iface.type === 'datastream' && iface.aggregation === 'individual') {
// @ts-expect-error cannot correctly infer from generics
this.children = Object.entries(data).map(([prop, propValue]) =>
toDatastreamIndividualTreeNode({
interface: iface,
data: propValue,
endpoint: `${endpoint}/${prop}`,
// @ts-expect-error cannot correctly infer from generics
parentNode: this as AstarteDataTreeBranchNode<AstarteDatastreamIndividualData>,
}),
) as Array<AstarteDataTreeBranchNode<Data> | AstarteDataTreeLeafNode<Data>>;
} else {
// @ts-expect-error cannot correctly infer from generics
this.children = Object.entries(data).map(([prop, propValue]) =>
toDatastreamObjectTreeNode({
interface: iface,
data: propValue,
endpoint: `${endpoint}/${prop}`,
// @ts-expect-error cannot correctly infer from generics
parentNode: this as AstarteDataTreeBranchNode<AstarteDatastreamObjectData>,
}),
) as Array<AstarteDataTreeBranchNode<Data> | AstarteDataTreeLeafNode<Data>>;
}
}
getParentNode(): AstarteDataTreeBranchNode<Data> | null {
return this.parent;
}
getNode(
endpoint: string,
): AstarteDataTreeBranchNode<Data> | AstarteDataTreeLeafNode<Data> | null {
const sanitizedEndpoint = endpoint.replace(/\/$/, '');
if (sanitizedEndpoint === this.endpoint) {
return this;
}
if (this.children.length === 0) {
return null;
}
let foundNode: AstarteDataTreeBranchNode<Data> | AstarteDataTreeLeafNode<Data> | null = null;
this.children.forEach((child) => {
const node = child.getNode(sanitizedEndpoint);
if (node != null) {
foundNode = node;
}
});
return foundNode;
}
getLeaves(): AstarteDataTreeLeafNode<Data>[] {
return this.children.map((child) => child.getLeaves()).flat();
}
toData(): Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamObjectData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamIndividualData[]
: AstartePropertyData[] {
// @ts-expect-error cannot correctly infer from generics
return this.getLeaves()
.map((leaf) => leaf.toData())
.flat();
}
toLinearizedData(): Equals<Data, AstarteDatastreamObjectData> extends true
? AstarteDatastreamData[]
: Equals<Data, AstarteDatastreamIndividualData> extends true
? AstarteDatastreamData[]
: AstartePropertyData[] {
// @ts-expect-error cannot correctly infer from generics
return this.getLeaves()
.map((leaf) => leaf.toLinearizedData())
.flat();
}
toLastValue(): JSON<AstarteDataValue> {
return this.children.reduce(
(acc, child) => ({
...acc,
[child.name]: child.toLastValue(),
}),
{},
);
}
get name(): string {
return this.parent != null ? this.endpoint.replace(`${this.parent.endpoint}/`, '') : '';
}
}
function toAstarteDataTree(params: {
interface: AstarteInterface;
data: AstarteInterfaceValues;
endpoint?: string;
}):
| AstarteDataTreeNode<AstartePropertyData>
| AstarteDataTreeNode<AstarteDatastreamIndividualData>
| AstarteDataTreeNode<AstarteDatastreamObjectData> {
if (params.interface.type === 'properties') {
return toPropertiesTreeNode({
interface: params.interface,
data: params.data,
endpoint: params.endpoint || '',
parentNode: null,
});
}
if (params.interface.type === 'datastream' && params.interface.aggregation === 'individual') {
return toDatastreamIndividualTreeNode({
interface: params.interface,
data: params.data,
endpoint: params.endpoint || '',
parentNode: null,
});
}
return toDatastreamObjectTreeNode({
interface: params.interface,
data: params.data,
endpoint: params.endpoint || '',
parentNode: null,
});
}
function toPropertiesTreeNode(params: {
interface: AstarteInterface;
data: AstarteInterfaceValues;
endpoint: string;
parentNode: AstarteDataTreeBranchNode<AstartePropertyData> | null;
}): AstarteDataTreeBranchNode<AstartePropertyData> | AstarteDataTreeLeafNode<AstartePropertyData> {
if (isPropertiesInterfaceValue(params.data)) {
return new AstarteDataTreeLeafNode<AstartePropertyData>({
interface: params.interface,
data: {
endpoint: params.endpoint,
...({
value: params.data,
type: getEndpointDataType(params.interface, params.endpoint),
} as AstarteDataTuple),
},
endpoint: params.endpoint,
parentNode: params.parentNode,
});
}
return new AstarteDataTreeBranchNode<AstartePropertyData>({
interface: params.interface,
data: params.data,
endpoint: params.endpoint,
parentNode: params.parentNode,
});
}
function toDatastreamIndividualTreeNode(params: {
interface: AstarteInterface;
data: AstarteInterfaceValues;
endpoint: string;
parentNode: AstarteDataTreeBranchNode<AstarteDatastreamIndividualData> | null;
}):
| AstarteDataTreeBranchNode<AstarteDatastreamIndividualData>
| AstarteDataTreeLeafNode<AstarteDatastreamIndividualData> {
if (isIndividualDatastreamInterfaceValues(params.data)) {
const leafData: AstarteDatastreamIndividualData[] = params.data.map((dataValue) => ({
endpoint: params.endpoint,
timestamp: dataValue.timestamp,
...({
value: dataValue.value,
type: getEndpointDataType(params.interface, params.endpoint),
} as AstarteDataTuple),
}));
return new AstarteDataTreeLeafNode<AstarteDatastreamIndividualData>({
interface: params.interface,
data: leafData,
endpoint: params.endpoint,
parentNode: params.parentNode,
});
}
if (isIndividualDatastreamInterfaceValue(params.data)) {
const leafData: AstarteDatastreamIndividualData[] = [
{
endpoint: params.endpoint,
timestamp: params.data.timestamp,
...({
value: params.data.value,
type: getEndpointDataType(params.interface, params.endpoint),
} as AstarteDataTuple),
},
];
return new AstarteDataTreeLeafNode<AstarteDatastreamIndividualData>({
interface: params.interface,
data: leafData,
endpoint: params.endpoint,
parentNode: params.parentNode,
});
}
return new AstarteDataTreeBranchNode<AstarteDatastreamIndividualData>({
interface: params.interface,
data: params.data,
endpoint: params.endpoint,
parentNode: params.parentNode,
});
}
function toDatastreamObjectTreeNode(params: {
interface: AstarteInterface;
data: AstarteInterfaceValues;
endpoint: string;
parentNode: AstarteDataTreeBranchNode<AstarteDatastreamObjectData> | null;
}):
| AstarteDataTreeBranchNode<AstarteDatastreamObjectData>
| AstarteDataTreeLeafNode<AstarteDatastreamObjectData> {
if (isAggregatedDatastreamInterfaceValue(params.data)) {
const leafData: AstarteDatastreamObjectData[] = params.data.map((obj) => ({
endpoint: params.endpoint,
timestamp: obj.timestamp,
value: Object.entries(_.omit(obj, 'timestamp')).reduce(
(acc, [objProp, objPropValue]) => ({
...acc,
[objProp]: {
value: objPropValue,
type: getEndpointDataType(params.interface, `${params.endpoint}/${objProp}`),
} as AstarteDataTuple,
}),
{},
),
}));
return new AstarteDataTreeLeafNode<AstarteDatastreamObjectData>({
interface: params.interface,
data: leafData,
endpoint: params.endpoint,
parentNode: params.parentNode,
});
}
return new AstarteDataTreeBranchNode<AstarteDatastreamObjectData>({
interface: params.interface,
data: params.data,
endpoint: params.endpoint,
parentNode: params.parentNode,
});
}
export { toAstarteDataTree };
export type { AstarteDataTreeNode, AstarteDataTreeKind };
| AstarteDataTreeLeafNode |
s3.py | """Amazon S3 Module."""
import concurrent.futures
import csv
import logging
import time
import uuid
from itertools import repeat
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import boto3 # type: ignore
import botocore.exceptions # type: ignore
import pandas as pd # type: ignore
import pandas.io.parsers # type: ignore
import pyarrow as pa # type: ignore
import pyarrow.lib # type: ignore
import pyarrow.parquet # type: ignore
import s3fs # type: ignore
from boto3.s3.transfer import TransferConfig # type: ignore
from pandas.io.common import infer_compression # type: ignore
from awswrangler import _data_types, _utils, catalog, exceptions
_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: "", "gzip": ".gz", "snappy": ".snappy"}
_logger: logging.Logger = logging.getLogger(__name__)
def get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:
"""Get bucket region name.
Parameters
----------
bucket : str
Bucket name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
str
Region code (e.g. 'us-east-1').
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name')
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name', boto3_session=boto3.Session())
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
_logger.debug(f"bucket: {bucket}")
region: str = client_s3.get_bucket_location(Bucket=bucket)["LocationConstraint"]
region = "us-east-1" if region is None else region
_logger.debug(f"region: {region}")
return region
def does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:
"""Check if object exists on S3.
Parameters
----------
path: str
S3 path (e.g. s3://bucket/key).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
bool
True if exists, False otherwise.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real')
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal')
False
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real', boto3_session=boto3.Session())
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal', boto3_session=boto3.Session())
False
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
bucket: str
key: str
bucket, key = path.replace("s3://", "").split("/", 1)
try:
client_s3.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as ex:
if ex.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return False
raise ex # pragma: no cover
def list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:
"""List Amazon S3 objects from a prefix.
Parameters
----------
path : str
S3 path (e.g. s3://bucket/prefix).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of objects paths.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix')
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix', boto3_session=boto3.Session())
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
paginator = client_s3.get_paginator("list_objects_v2")
bucket: str
prefix: str
bucket, prefix = _utils.parse_path(path=path)
response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={"PageSize": 1000})
paths: List[str] = []
for page in response_iterator:
contents: Optional[List] = page.get("Contents")
if contents is not None:
for content in contents:
if (content is not None) and ("Key" in content):
key: str = content["Key"]
paths.append(f"s3://{bucket}/{key}")
return paths
def _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:
if isinstance(path, str): # prefix
paths: List[str] = list_objects(path=path, boto3_session=boto3_session)
elif isinstance(path, list):
paths = path
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
def delete_objects(
path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None
) -> None:
"""Delete Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.delete_objects(['s3://bucket/key0', 's3://bucket/key1']) # Delete both objects
>>> wr.s3.delete_objects('s3://bucket/prefix') # Delete all objects under the received prefix
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)
for bucket, keys in buckets.items():
chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)
if use_threads is False:
for chunk in chunks:
_delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))
def _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:
buckets: Dict[str, List[str]] = {}
bucket: str
key: str
for path in paths:
bucket, key = _utils.parse_path(path=path)
if bucket not in buckets:
buckets[bucket] = []
buckets[bucket].append(key)
return buckets
def _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:
_logger.debug(f"len(keys): {len(keys)}")
batch: List[Dict[str, str]] = [{"Key": key} for key in keys]
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": batch})
def describe_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Dict[str, Any]]:
"""Describe Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Fetch attributes like ContentLength, DeleteMarker, LastModified, ContentType, etc
The full list of attributes can be explored under the boto3 head_object documentation:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Dict[str, Any]]
Return a dictionary of objects returned from head_objects where the key is the object path.
The response object can be explored here:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Examples
--------
>>> import awswrangler as wr
>>> descs0 = wr.s3.describe_objects(['s3://bucket/key0', 's3://bucket/key1']) # Describe both objects
>>> descs1 = wr.s3.describe_objects('s3://bucket/prefix') # Describe all objects under the prefix
>>> descs2 = wr.s3.describe_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return {}
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resp_list: List[Tuple[str, Dict[str, Any]]]
if use_threads is False:
resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))
desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)
return desc_list
def _describe_object(
path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client
) -> Tuple[str, Dict[str, Any]]:
wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time
tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1
bucket: str
key: str
bucket, key = _utils.parse_path(path=path)
desc: Dict[str, Any] = {}
for i in range(tries, 0, -1):
try:
desc = client_s3.head_object(Bucket=bucket, Key=key)
break
except botocore.exceptions.ClientError as e: # pragma: no cover
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404: # Not Found
_logger.debug(f"Object not found. {i} seconds remaining to wait.")
if i == 1: # Last try, there is no more need to sleep
break
time.sleep(1)
else:
raise e
return path, desc
def size_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Optional[int]]:
"""Get the size (ContentLength) in bytes of Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Optional[int]]
Dictionary where the key is the object path and the value is the object size.
Examples
--------
>>> import awswrangler as wr
>>> sizes0 = wr.s3.size_objects(['s3://bucket/key0', 's3://bucket/key1']) # Get the sizes of both objects
>>> sizes1 = wr.s3.size_objects('s3://bucket/prefix') # Get the sizes of all objects under the received prefix
>>> sizes2 = wr.s3.size_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
desc_list: Dict[str, Dict[str, Any]] = describe_objects(
path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session
)
size_list: Dict[str, Optional[int]] = {k: d.get("ContentLength", None) for k, d in desc_list.items()}
return size_list
def to_csv( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write CSV file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
sep : str
String of length 1. Field delimiter for the output file.
index : bool
Write row names (index).
columns : List[str], optional
Columns to write.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html
Returns
-------
None
None.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.csv'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(file_format="csv", df=df, path=path, fs=fs, **pandas_kwargs)
paths = [path]
else:
mode = "append" if mode is None else mode
exist: bool = False
if columns:
df = df[columns]
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_csv_dataset(
df=df,
path=path,
index=index,
sep=sep,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True
)
if (exist is False) or (mode == "overwrite"):
catalog.create_csv_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
sep=sep,
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_csv_partitions(
database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_csv_dataset(
df: pd.DataFrame,
path: str,
index: bool,
sep: str,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
_logger.debug(f"dtypes: {df.dtypes}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=df,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=subgroup,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def to_json(
df: pd.DataFrame,
path: str,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
"""Write JSON file on Amazon S3.
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html
Returns
-------
None
None.
Examples
--------
Writing JSON file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... )
Writing CSV file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
"""
return _to_text(
file_format="json",
df=df,
path=path,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
def _to_text(
file_format: str,
df: pd.DataFrame,
path: str,
fs: Optional[s3fs.S3FileSystem] = None,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
if df.empty is True: # pragma: no cover
raise exceptions.EmptyDataFrame()
if fs is None:
fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
with fs.open(path, "w") as f:
if file_format == "csv":
df.to_csv(f, **pandas_kwargs)
elif file_format == "json":
df.to_json(f, **pandas_kwargs)
def to_parquet( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write Parquet file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.parquet'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)
if compression_ext is None:
raise exceptions.InvalidCompression(f"{compression} is invalid, please use None, snappy or gzip.")
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
paths = [
_to_parquet_file(
df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}
)
]
else:
mode = "append" if mode is None else mode
exist: bool = False
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_parquet_dataset(
df=df,
path=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
if (exist is False) or (mode == "overwrite"):
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_parquet_dataset(
df: pd.DataFrame,
path: str,
index: bool,
compression: Optional[str],
compression_ext: str,
cpus: int,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug(f"schema: {schema}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=subgroup,
schema=schema,
path=file_path,
index=index,
compression=compression,
cpus=cpus,
fs=fs,
dtype=dtype,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def _to_parquet_file(
df: pd.DataFrame,
path: str,
schema: pa.Schema,
index: bool,
compression: Optional[str],
cpus: int,
fs: s3fs.S3FileSystem,
dtype: Dict[str, str],
) -> str:
table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)
for col_name, col_type in dtype.items():
if col_name in table.column_names:
col_index = table.column_names.index(col_name)
pyarrow_dtype = _data_types.athena2pyarrow(col_type)
field = pa.field(name=col_name, type=pyarrow_dtype)
table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))
_logger.debug(f"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})")
pyarrow.parquet.write_table(
table=table,
where=path,
write_statistics=True,
use_dictionary=True,
filesystem=fs,
coerce_timestamps="ms",
compression=compression,
flavor="spark",
)
return path
def read_csv(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read CSV file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_csv().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all CSV files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path='s3://bucket/prefix/')
Reading all CSV files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all CSV files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_csv,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_fwf(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read fixed-width formatted file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_fwf().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_fwf.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all fixed-width formatted (FWF) files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path='s3://bucket/prefix/')
Reading all fixed-width formatted (FWF) files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all fixed-width formatted (FWF) files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_fwf,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_json(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read JSON file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_json().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all JSON files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path='s3://bucket/prefix/')
Reading all JSON files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_json(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all JSON files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_json,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def _read_text(
parser_func: Callable,
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if "iterator" in pandas_kwargs:
raise exceptions.InvalidArgument("Please, use chunksize instead of iterator.")
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if chunksize is not None:
dfs: Iterator[pd.DataFrame] = _read_text_chunksize(
parser_func=parser_func,
paths=paths,
boto3_session=boto3_session,
chunksize=chunksize,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
return dfs
if use_threads is False:
df: pd.DataFrame = pd.concat(
objs=[
_read_text_full(
parser_func=parser_func,
path=p,
boto3_session=boto3_session,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
for p in paths
],
ignore_index=True,
sort=False,
)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
df = pd.concat(
objs=executor.map(
_read_text_full,
repeat(parser_func),
paths,
repeat(boto3_session),
repeat(pandas_kwargs),
repeat(s3_additional_kwargs),
),
ignore_index=True,
sort=False,
)
return df
def _read_text_chunksize(
parser_func: Callable,
paths: List[str],
boto3_session: boto3.Session,
chunksize: int,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Iterator[pd.DataFrame]:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
for path in paths:
_logger.debug(f"path: {path}")
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)
for df in reader:
yield df
def _read_text_full(
parser_func: Callable,
path: str,
boto3_session: boto3.Session,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pd.DataFrame:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
return parser_func(f, **pandas_args)
def | (
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
categories: List[str] = None,
validate_schema: bool = True,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pyarrow.parquet.ParquetDataset:
"""Encapsulate all initialization before the use of the pyarrow.parquet.ParquetDataset."""
if dataset is False:
path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)
elif isinstance(path, str):
path_or_paths = path[:-1] if path.endswith("/") else path
else:
path_or_paths = path
_logger.debug(f"path_or_paths: {path_or_paths}")
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(
path_or_paths=path_or_paths,
filesystem=fs,
metadata_nthreads=cpus,
filters=filters,
read_dictionary=categories,
validate_schema=validate_schema,
)
return data
def read_parquet(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
validate_schema: bool = True,
chunked: bool = False,
dataset: bool = False,
categories: List[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
validate_schema:
Check that individual file schemas are all the same / compatible. Schemas within a
folder prefix should all be the same. Disable if you have schemas that are different
and want to disable this check.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading all Parquet files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path='s3://bucket/prefix/')
Reading all Parquet files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all Parquet files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path=['s3://bucket/filename0.parquet', 's3://bucket/filename1.parquet'])
Reading in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path,
filters=filters,
dataset=dataset,
categories=categories,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
validate_schema=validate_schema,
)
if chunked is False:
return _read_parquet(
data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema
)
return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)
def _read_parquet(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
validate_schema: bool = True,
) -> pd.DataFrame:
tables: List[pa.Table] = []
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
tables.append(table)
promote: bool = not validate_schema
table = pa.lib.concat_tables(tables, promote=promote)
return table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def _read_parquet_chunked(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
) -> Iterator[pd.DataFrame]:
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
yield table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def read_parquet_metadata(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:
"""Read Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]]]
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}).
Examples
--------
Reading all Parquet files (with partitions) metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path='s3://bucket/prefix/', dataset=True)
Reading all Parquet files metadata from a list
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path=[
... 's3://bucket/filename0.parquet',
... 's3://bucket/filename1.parquet'
... ])
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session
)
return _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=data.partitions
)
def store_parquet_metadata(
path: str,
database: str,
table: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
compression: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:
"""Infer and store parquet metadata on AWS Glue Catalog.
Infer Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths
And then stores it on AWS Glue Catalog including all inferred partitions
(No need of 'MCSK REPAIR TABLE')
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
database : str
Glue/Athena catalog: Database name.
table : str
Glue/Athena catalog: Table name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``, etc).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]
The metadata used to create the Glue Table.
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}). /
partitions_values: Dictionary with keys as S3 path locations and values as a
list of partitions values as str (e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).
Examples
--------
Reading all Parquet files metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(
... path='s3://bucket/prefix/',
... database='...',
... table='...',
... dataset=True
... )
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session
)
partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions
columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=partitions
)
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
)
partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(
path=path, partitions=partitions
)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return columns_types, partitions_types, partitions_values
def wait_objects_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects
"""
return _wait_objects(
waiter_name="object_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def wait_objects_not_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects not exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectNotExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_not_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects not exist
"""
return _wait_objects(
waiter_name="object_not_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def _wait_objects(
waiter_name: str,
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
delay = 5 if delay is None else delay
max_attempts = 20 if max_attempts is None else max_attempts
_delay: int = int(delay) if isinstance(delay, float) else delay
if len(paths) < 1:
return None
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
waiter = client_s3.get_waiter(waiter_name)
_paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]
if use_threads is False:
for bucket, key in _paths:
waiter.wait(Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts})
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
futures: List[concurrent.futures.Future] = []
for bucket, key in _paths:
future: concurrent.futures.Future = executor.submit(
fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts}
)
futures.append(future)
for future in futures:
future.result()
return None
def read_parquet_table(
table: str,
database: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
categories: List[str] = None,
chunked: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet table registered on AWS Glue Catalog.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
table : str
AWS Glue Catalog table name.
database : str
AWS Glue Catalog database name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading Parquet Table
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(database='...', table='...')
Reading Parquet Table encrypted
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(
... database='...',
... table='...'
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading Parquet Table in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet_table(database='...', table='...', chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)
return read_parquet(
path=path,
filters=filters,
columns=columns,
categories=categories,
chunked=chunked,
dataset=True,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
def merge_datasets(
source_path: str,
target_path: str,
mode: str = "append",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Merge a source dataset into a target dataset.
Note
----
If you are merging tables (S3 datasets + Glue Catalog metadata),
remember that you will also need to update your partitions metadata in some cases.
(e.g. wr.athena.repair_table(table='...', database='...'))
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.merge_datasets(
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... mode="append"
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
paths: List[str] = list_objects(path=f"{source_path}/", boto3_session=session)
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
if mode == "overwrite":
_logger.debug(f"Deleting to overwrite: {target_path}/")
delete_objects(path=f"{target_path}/", use_threads=use_threads, boto3_session=session)
elif mode == "overwrite_partitions":
paths_wo_prefix: List[str] = [x.replace(f"{source_path}/", "") for x in paths]
paths_wo_filename: List[str] = [f"{x.rpartition('/')[0]}/" for x in paths_wo_prefix]
partitions_paths: List[str] = list(set(paths_wo_filename))
target_partitions_paths = [f"{target_path}/{x}" for x in partitions_paths]
for path in target_partitions_paths:
_logger.debug(f"Deleting to overwrite_partitions: {path}")
delete_objects(path=path, use_threads=use_threads, boto3_session=session)
elif mode != "append":
raise exceptions.InvalidArgumentValue(f"{mode} is a invalid mode option.")
new_objects: List[str] = copy_objects(
paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session
)
_logger.debug(f"len(new_objects): {len(new_objects)}")
return new_objects
def copy_objects(
paths: List[str],
source_path: str,
target_path: str,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Copy a list of S3 objects to another S3 directory.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/dir0/key0, s3://bucket/dir0/key1]).
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.copy_objects(
... paths=["s3://bucket0/dir0/key0", "s3://bucket0/dir0/key1"])
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
batch: List[Tuple[str, str]] = []
new_objects: List[str] = []
for path in paths:
path_wo_prefix: str = path.replace(f"{source_path}/", "")
path_final: str = f"{target_path}/{path_wo_prefix}"
new_objects.append(path_final)
batch.append((path, path_final))
_logger.debug(f"len(new_objects): {len(new_objects)}")
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
return new_objects
def _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:
_logger.debug(f"len(batch): {len(batch)}")
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
for source, target in batch:
source_bucket, source_key = _utils.parse_path(path=source)
copy_source: Dict[str, str] = {"Bucket": source_bucket, "Key": source_key}
target_bucket, target_key = _utils.parse_path(path=target)
resource_s3.meta.client.copy(
CopySource=copy_source,
Bucket=target_bucket,
Key=target_key,
SourceClient=client_s3,
Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),
)
| _read_parquet_init |
snark_range.rs | use accumulator::group::Rsa2048;
use algebra::bls12_381::{Bls12_381, G1Projective};
use cpsnarks_set::commitments::Commitment;
use cpsnarks_set::{
parameters::Parameters,
protocols::hash_to_prime::{
snark_range::Protocol,
transcript::{TranscriptProverChannel, TranscriptVerifierChannel},
HashToPrimeProtocol, Statement, Witness,
},
};
use criterion::{criterion_group, criterion_main, Criterion};
use merlin::Transcript;
use rand::thread_rng;
use rug::rand::RandState;
use rug::Integer;
use std::cell::RefCell;
pub fn | (c: &mut Criterion) {
let params = Parameters::from_security_level(128).unwrap();
let mut rng1 = RandState::new();
rng1.seed(&Integer::from(13));
let mut rng2 = thread_rng();
let crs = cpsnarks_set::protocols::membership::Protocol::<
Rsa2048,
G1Projective,
Protocol<Bls12_381>,
>::setup(¶ms, &mut rng1, &mut rng2)
.unwrap()
.crs
.crs_hash_to_prime;
let protocol = Protocol::<Bls12_381>::from_crs(&crs);
let value = Integer::from(Integer::u_pow_u(
2,
(crs.parameters.hash_to_prime_bits) as u32,
)) - &Integer::from(245);
let randomness = Integer::from(9);
let commitment = protocol
.crs
.pedersen_commitment_parameters
.commit(&value, &randomness)
.unwrap();
let proof_transcript = RefCell::new(Transcript::new(b"hash_to_prime"));
let statement = Statement { c_e_q: commitment };
let mut verifier_channel = TranscriptVerifierChannel::new(&crs, &proof_transcript);
protocol
.prove(
&mut verifier_channel,
&mut rng2,
&statement,
&Witness {
e: value.clone(),
r_q: randomness.clone(),
},
)
.unwrap();
let proof = verifier_channel.proof().unwrap();
let verification_transcript = RefCell::new(Transcript::new(b"hash_to_prime"));
let mut prover_channel = TranscriptProverChannel::new(&crs, &verification_transcript, &proof);
protocol.verify(&mut prover_channel, &statement).unwrap();
c.bench_function("snark_range protocol", move |b| {
b.iter(|| {
let proof_transcript = RefCell::new(Transcript::new(b"hash_to_prime"));
let statement = Statement { c_e_q: commitment };
let mut verifier_channel = TranscriptVerifierChannel::new(&crs, &proof_transcript);
protocol
.prove(
&mut verifier_channel,
&mut rng2,
&statement,
&Witness {
e: value.clone(),
r_q: randomness.clone(),
},
)
.unwrap();
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| criterion_benchmark |
test_game_map.py | import pytest
from array import array
from game_map import GameMap
from tests.conftest import get_relative_path
sample_map_data = tuple(
reversed(
(
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
)
)
)
def test_game_map_from_file(sample_game_map, sample_tiles):
assert sample_game_map.map_data == sample_map_data
assert sample_game_map.width == 21
assert sample_game_map.height == 21
assert sample_game_map.tile_data == sample_tiles
# Assert map is read right-up
assert sample_game_map.get(16, 2) == 0
assert sample_game_map.get(16, 18) == 1
def test_game_map_get_out_of_bounds(sample_game_map):
with pytest.raises(AssertionError):
sample_game_map.get(-1, 0)
sample_game_map.get(0, -1)
sample_game_map.get(-1, -1)
sample_game_map.get(21, 0)
sample_game_map.get(0, 21)
sample_game_map.get(21, 21)
def test_game_map_load_mapfile_nonrectangular():
|
def test_game_map_traversable(sample_game_map):
assert sample_game_map.traversable(2, 2)
assert not sample_game_map.traversable(1, 1)
assert sample_game_map.traversable(16, 2)
assert not sample_game_map.traversable(16, 18)
| with pytest.raises(AssertionError):
GameMap.load_mapfile(get_relative_path("fixtures/map_nonrectangular.csv")) |
completion.rs | // completions: classify whether it's a name completion or a field completion,
// then wait for parser/checker outputs to actually gather the informations
use std::cmp::Ordering;
use std::collections::HashSet;
use std::sync::Arc;
use kailua_env::{Pos, Span, Source};
use kailua_syntax::lex::{Tok, Punct, Keyword, NestedToken, NestingCategory};
use kailua_syntax::ast::Chunk;
use kailua_types::ty::Key;
use kailua_check::env::Output;
use protocol::*;
use super::{get_prefix_expr_slot, last_non_comment};
fn index_and_neighbor<T, F>(tokens: &[T], pos: Pos, as_span: F) -> (usize, bool, bool)
where F: Fn(&T) -> Span
{
// pos
// ________v________ idx end after
// BOF^ 0000000 0 false false
// BOF^ $EOF 0 false false
// BOF^$EOF 0 false false
// iiiiiiii jjjjjjj i true true
// iiiiiiiijjjjjjjjj i true true
// iiiiii jjjjjjj i false false
// iiiiii jjjjjjjjj j false false
// iiii jjjjjj kkk j false true
// iii jjjjj kkkkk j false true
// ii jjjjj kkkkkk j true true
// zzzzzzzz $EOF z true true
// zzzz $EOF z false false
match tokens.binary_search_by(|tok| as_span(tok).begin().cmp(&pos)) {
Ok(i) => { // tokens[i].begin == pos
if i > 0 && as_span(&tokens[i-1]).end() == pos {
(i-1, true, true)
} else {
(i, false, false)
}
},
Err(0) => { // pos < tokens[0].begin or inf
(0, false, false)
},
Err(i) => { // tokens[i-1].begin < pos < tokens[i].begin or inf
match pos.cmp(&as_span(&tokens[i-1]).end()) {
Ordering::Less => (i-1, false, true),
Ordering::Equal => (i-1, true, true),
Ordering::Greater => (i-1, false, false),
}
},
}
}
#[test]
fn test_index_and_neighbor() {
use kailua_env::SourceFile;
// we need a sizable span to construct a dummy list of "tokens" (solely represented by spans)
let mut source = Source::new();
let span = source.add(SourceFile::from_u8("dummy".to_string(), b"0123456789"[..].to_owned()));
let pos = |i| span.clone().nth(i).unwrap();
let tokens = [Span::new(pos(1), pos(2)), Span::new(pos(2), pos(4)),
Span::new(pos(5), pos(7)), Span::new(pos(8), pos(8))];
assert_eq!(index_and_neighbor(&tokens, pos(0), |&sp| sp), (0, false, false));
assert_eq!(index_and_neighbor(&tokens, pos(1), |&sp| sp), (0, false, false));
assert_eq!(index_and_neighbor(&tokens, pos(2), |&sp| sp), (0, true, true));
assert_eq!(index_and_neighbor(&tokens, pos(3), |&sp| sp), (1, false, true));
assert_eq!(index_and_neighbor(&tokens, pos(4), |&sp| sp), (1, true, true));
assert_eq!(index_and_neighbor(&tokens, pos(5), |&sp| sp), (2, false, false));
assert_eq!(index_and_neighbor(&tokens, pos(6), |&sp| sp), (2, false, true));
assert_eq!(index_and_neighbor(&tokens, pos(7), |&sp| sp), (2, true, true));
assert_eq!(index_and_neighbor(&tokens, pos(8), |&sp| sp), (3, false, false));
assert_eq!(index_and_neighbor(&tokens, pos(9), |&sp| sp), (3, false, false));
let tokens = [Span::new(pos(1), pos(2)), Span::new(pos(2), pos(4)),
Span::new(pos(5), pos(8)), Span::new(pos(8), pos(8))];
assert_eq!(index_and_neighbor(&tokens, pos(7), |&sp| sp), (2, false, true));
assert_eq!(index_and_neighbor(&tokens, pos(8), |&sp| sp), (2, true, true));
assert_eq!(index_and_neighbor(&tokens, pos(9), |&sp| sp), (3, false, false));
}
fn make_item(label: String, kind: CompletionItemKind, detail: Option<String>) -> CompletionItem {
CompletionItem {
label: label,
kind: Some(kind),
detail: detail,
documentation: None,
sortText: None,
filterText: None,
insertText: None,
textEdit: None,
additionalTextEdits: Vec::new(),
command: None,
data: None,
}
}
// for the interactivity, the lookbehind is limited to a reasonable number
const LOOKBEHIND_LIMIT: usize = 4096;
// check if the caret is located in regions where the autocompletion should be disabled:
//
// 1. `local NAME ... | ... [= ...]`
// 2. `for NAME ... | ... = ... do ... end`
// 3. `function NAME ... | ( ... )`
// 4. `function [NAME ...] ( ... | ... )`
//
// the check for 1 and 2 is handled by looking backward for the first token
// that is not a comment, a name or a comma and is in the same nesting as the caret.
// if the token exists and it's `local` or `for`, autocompletion is disabled.
//
// the check for 3 is handled by looking backward for the first token
// that is not a comment, a name, a dot or a colon and is in the same nesting as the caret.
// if the token exists and it's `function`, autocompletion is disabled.
//
// the check for 4 is handled similarly to the check for 1 and 2,
// but once seen a `(` token, it will switch to the check for 3 at the parent nesting.
fn is_name_completion_disabled(tokens: &[NestedToken], name_idx: usize) -> bool {
let name_tok = &tokens[name_idx];
let mut init_depth = name_tok.depth;
let init_serial = name_tok.serial;
let mut name_decl_possible = true; // case 1, 2 and 4a
let mut func_sig_possible = true; // case 3 and 4b
for (i, tok) in tokens[..name_idx].iter().enumerate().rev().take(LOOKBEHIND_LIMIT) {
if !(name_decl_possible || func_sig_possible) { break; }
if tok.depth <= init_depth && tok.serial != init_serial {
// escaped the current nesting, stop the search
return false;
} else if tok.depth > init_depth {
// ignore more nested tokens (but count them towards the threshold)
continue;
}
// name_decl_possible can continue to func_sig_possible in place, so this should be first
if func_sig_possible {
match tok.tok.base {
Tok::Comment |
Tok::Name(_) |
Tok::Punct(Punct::Dot) |
Tok::Punct(Punct::Colon) => {},
Tok::Keyword(Keyword::Function) => return true,
_ => func_sig_possible = false,
}
}
if name_decl_possible {
match tok.tok.base {
Tok::Comment |
Tok::Name(_) |
Tok::Punct(Punct::Comma) |
// Newline to account for meta comments (other tokens are nested)
Tok::Punct(Punct::Newline) => {},
Tok::Punct(Punct::LParen) => {
// `function ... ( ... | ... )` is possible
// update the initial nesting to point to a token before `(` and proceed
if i == 0 { return false; }
init_depth = tokens[i-1].depth;
name_decl_possible = false;
func_sig_possible = true;
}
Tok::Keyword(Keyword::Local) |
Tok::Keyword(Keyword::For) => return true,
_ => name_decl_possible = false,
}
}
}
false
}
const EXPR_KEYWORDS: &'static [&'static str] = &[
"and", "break", "do", "else", "elseif", "end", "false", "for", "function", "goto", "if",
"in", "local", "nil", "not", "or", "repeat", "return", "then", "true", "until", "while",
]; | "and", "break", "do", "else", "elseif", "end", "false", "for", "function", "goto", "if",
"in", "local", "nil", "not", "or", "repeat", "return", "then", "true", "until", "while",
"assume", "const", "global", "map", "module", "once", "open", "type", "var", "vector",
];
fn keywords_per_category(nesting_category: NestingCategory) -> &'static [&'static str] {
match nesting_category {
NestingCategory::Expr => EXPR_KEYWORDS,
NestingCategory::Meta => META_KEYWORDS,
}
}
fn detail_from_span(span: Span, source: &Source) -> Option<String> {
let begin = span.begin(); // span.end() won't be in the different line, probably
if let Some(file) = source.file(begin.unit()) {
// XXX the file names can collide
let path = file.path().split(|c| c == '\\' || c == '/').last().unwrap_or("");
if let Some((line, _)) = file.line_from_pos(begin) {
Some(format!("{}:{}", path, line + 1))
} else {
Some(format!("{}", path))
}
} else {
None
}
}
#[derive(Clone, Debug)]
pub enum Class {
// complete a top-level name or keyword.
// the caret is located inside tokens[i] or on the intersection of tokens[i-1] and tokens[i].
// in the former case tokens[i] should be a name; in the latter case tokens[i-1] may not exist.
Name(usize, NestingCategory),
// complete a field or method after `.` or `:`.
// the caret is located after tokens[i] (which should be `.` or `:`).
Field(usize),
}
pub fn classify(tokens: &[NestedToken], pos: Pos) -> Option<Class> {
let (idx, end, after) = index_and_neighbor(tokens, pos, |tok| tok.tok.span);
let ptok = if idx > 0 { tokens.get(idx - 1) } else { None };
let tok = tokens.get(idx);
match (end, after, ptok.map(|tok| &tok.tok.base), tok.map(|tok| &tok.tok.base)) {
// ... `.` | ...
// ... `:` | ...
(true, true, _, Some(&Tok::Punct(Punct::Dot))) |
(true, true, _, Some(&Tok::Punct(Punct::Colon))) => {
Some(Class::Field(idx))
},
// ... `.` NAM|E ...
// ... `:` NAM|E ...
// ... `.` NAME | ... (with no space between NAME and the caret)
// ... `:` NAME | ...
(_, true, Some(&Tok::Punct(Punct::Dot)), Some(&Tok::Name(_))) |
(_, true, Some(&Tok::Punct(Punct::Dot)), Some(&Tok::Keyword(_))) |
(_, true, Some(&Tok::Punct(Punct::Colon)), Some(&Tok::Name(_))) |
(_, true, Some(&Tok::Punct(Punct::Colon)), Some(&Tok::Keyword(_))) => {
Some(Class::Field(idx - 1)) // should point to `ptok`
},
// ... NAME | ... (ditto)
(_, true, _, Some(&Tok::Name(_))) |
(_, true, _, Some(&Tok::Keyword(_))) => {
Some(Class::Name(idx, tok.unwrap().category))
},
_ => None,
}
}
pub fn complete_name(tokens: &[NestedToken], name_idx: usize, nesting_category: NestingCategory,
pos: Pos, last_chunk: &Chunk, all_chunks: &[Arc<Chunk>],
source: &Source) -> Vec<CompletionItem> {
let mut items = Vec::new();
// check if the caret is at the name definition and autocompletion should be disabled
if nesting_category == NestingCategory::Expr && is_name_completion_disabled(tokens, name_idx) {
return items;
}
// if the current word being typed matches exactly a keyword, we temporarily pause
// the completion to avoid capturing the carriage return from the completion. (XXX suboptimal)
let name_token = &tokens[name_idx].tok;
if name_token.span.end() == pos {
if let Tok::Keyword(_) = name_token.base {
return items;
}
}
let mut seen = HashSet::new();
if let Some(scope) = last_chunk.map.scope_from_pos(pos) {
for (name, _scope, id) in last_chunk.map.names_and_scopes(scope) {
if seen.insert(name) { // ignore shadowed names (always appear later)
let name = String::from_utf8_lossy(name).into_owned();
let detail = last_chunk.local_names.get(&id).and_then(|def| {
detail_from_span(def.def_span, source)
});
items.push(make_item(name, CompletionItemKind::Variable, detail));
}
}
}
for chunk in all_chunks {
for (name, &span) in chunk.global_scope.iter() {
if seen.insert(name) {
let name = String::from_utf8_lossy(name).into_owned();
let detail = detail_from_span(span, source);
items.push(make_item(name, CompletionItemKind::Variable, detail));
}
}
}
for keyword in keywords_per_category(nesting_category) {
items.push(make_item(keyword[..].to_owned(), CompletionItemKind::Keyword, None));
}
items
}
pub fn complete_field(tokens: &[NestedToken], sep_idx: usize,
outputs: &[Arc<Output>]) -> Option<Vec<CompletionItem>> {
let end = if let Some((_idx, tok)) = last_non_comment(&tokens[..sep_idx]) {
tok.tok.span.end()
} else {
// there is no chance that this will yield completions
return Some(Vec::new());
};
// for multiple outputs, we combine all possible fields and deduplicate them
let mut items = Vec::new();
let mut seen = HashSet::new(); // we never return the same name twice
for output in outputs {
let slot = get_prefix_expr_slot(end, output);
debug!("complete_field: get_prefix_expr_slot({:#?}) returns {:?}", end, slot);
if let Some(slot) = slot {
// now we've got the closest slot for given position;
// check if it's actually a table or similar (if it's not, we will fail fast)
if let Some(fields) = output.get_available_fields(&slot.unlift()) {
for (k, _v) in fields {
if let Key::Str(ref s) = k {
let name = String::from_utf8_lossy(&s).into_owned();
if seen.insert(name.clone()) {
items.push(make_item(name, CompletionItemKind::Field, None));
}
}
}
}
} else {
// this is the field position but there is no associated slot.
// we should retry no matter other outputs return,
// as we have only one chance to return the completions.
return None;
}
}
Some(items)
} | const META_KEYWORDS: &'static [&'static str] = &[ |
auth.py | # Copyright ยฉ 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bring in the common JWT Manager."""
from functools import wraps
from http import HTTPStatus
from flask import g, request
from flask_jwt_oidc import JwtManager
from jose import jwt as josejwt
jwt = (
JwtManager()
) # pylint: disable=invalid-name; lower case name as used by convention in most Flask apps
class Auth:
"""Extending JwtManager to include additional functionalities."""
@classmethod
def require(cls, f):
"""Validate the Bearer Token."""
@jwt.requires_auth
@wraps(f)
def decorated(*args, **kwargs):
g.authorization_header = request.headers.get("Authorization", None)
g.token_info = g.jwt_oidc_token_info
return f(*args, **kwargs)
return decorated
@classmethod
def ismemberofgroups(cls, groups):
"""Check that at least one of the realm groups are in the token.
Args:
groups [str,]: Comma separated list of valid roles
"""
def decorated(f):
# Token verification is commented here with an expectation to use this decorator in conjuction with require.
#@Auth.require
@wraps(f)
def wrapper(*args, **kwargs):
_groups = groups.split(',')
token = jwt.get_token_auth_header()
unverified_claims = josejwt.get_unverified_claims(token)
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
exists = False
for group in _groups:
if group in usergroups:
exists = True
retval = "Unauthorized" , 401
if exists == True:
return f(*args, **kwargs)
return retval
return wrapper
return decorated
auth = (
Auth()
)
class AuthHelper:
@classmethod
def getuserid(cls):
t |
@classmethod
def getusername(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['name']
@classmethod
def isministrymember(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
for group in usergroups:
if group.endswith("Ministry Team"):
return True
return False
@classmethod
def getusergroups(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
return usergroups | oken = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['preferred_username']
|
product-page.ts | import { testProductUrl, verifyTabbingOrder } from '../tabbing-order';
import { TabElement } from '../tabbing-order.model';
const containerSelector = '.ProductDetailsPageTemplate .Summary';
export function productPageTabbingOrder(config: TabElement[]) {
cy.server();
cy.visit(testProductUrl);
cy.route(
`${Cypress.env('API_URL')}/rest/v2/electronics-spa/products/779841/reviews*`
).as('reviews');
cy.get('cx-breadcrumb').should('contain', 'Home');
cy.get('cx-breadcrumb').should('contain', 'Film cameras');
cy.get('cx-breadcrumb').should('contain', 'Kodak'); | cy.get('cx-item-counter button').contains('+').click();
cy.wait('@reviews');
verifyTabbingOrder(containerSelector, config);
} |
// add product and force "minus" button to be active |
rust_project_json_test.rs | #[cfg(test)]
mod tests {
use runfiles::Runfiles; | let r = Runfiles::create().unwrap();
let rust_project_path = r.rlocation(
"rules_rust/test/rust_analyzer/merging_crates_test_reversed/rust-project.json",
);
let content = std::fs::read_to_string(&rust_project_path)
.unwrap_or_else(|_| panic!("couldn't open {:?}", &rust_project_path));
assert!(
content.contains(r#""root_module":"test/rust_analyzer/merging_crates_test_reversed/mylib.rs","deps":[{"crate":0,"name":"lib_dep"},{"crate":1,"name":"extra_test_dep"}]"#),
"expected rust-project.json to contain both lib_dep and extra_test_dep in deps of mylib.rs.");
}
} |
#[test]
fn test_deps_of_crate_and_its_test_are_merged() { |
mip-taoge-scaydk-adviser.js | /**
* @file mip-taoge-scaydk-adviser ็ปไปถ
* @author Taoge <[email protected]>
*/
define(function (require) {
var $ = require('zepto');
var customElem = require('customElement').create();
var options;
var o;
var t;
customElem.prototype.build = function () {
var element = this.element;
options = {
'id': element.getAttribute('element') || 'carousel',
'delay': element.getAttribute('delay') || 3000,
'switch': element.getAttribute('switch') || 'on'
};
o = '#' + options.id;
// ไธๅ
ฑๅคๅฐไธช้กพ้ฎ
var l = $(o + ' ul > li').length;
var x = '';
for (var i = 1; i <= l; i++) {
if (i === 1) {
x += '<li class="dot active">1</li>';
}
else {
x += '<li class="dot">1</li>';
}
}
// ๅๅงๅๅฐๅ็น
$(o + ' div.carousel > ol').empty().html(x);
// ๅฑๅนๅฎฝๅบฆ
var w = $(o).width();
// ulๅฎฝๅบฆ
var uw = w * l;
$(o + ' ul').css({'width': uw});
// liๅฎฝๅบฆ
var lw = w * 0.76;
$(o + ' ul > li').css({'width': lw, 'margin': '0 ' + (w * 0.03) + 'px'});
// ็ชๅฃๅคงๅฐๆนๅๆถ
$(window).resize(function () {
// ulๅฎฝๅบฆ
$(o + ' ul').css({'width': $(o).width() * $(o + ' ul > li').length});
// liๅฎฝๅบฆ
$(o + ' ul > li').css({'width': $(o).width() * 0.76, 'margin': '0 ' + ($(o).width() * 0.03) + 'px'});
});
// ๅฐๅ็น็นๅปไบไปถ
$(o + ' div.carousel > ol > li').click(function () {
// ๅๆญข่ชๅจๅๆข
if (options.switch === 'on') {
window.clearInterval(t);
}
var i = $(this).index();
checkMipImg(i);
var l = $(o + ' div.carousel > ol > li').length;
// ็นไบฎๅฐๅ็น
$(this).addClass('active').siblings().removeClass('active');
// ้กพ้ฎไธชๆฐ
var l2 = $(o + ' ul > li').length;
var l3 = (i + 1);
// ulๅๆขๅ็ๅทฆ่พน่ท
var nul;
for (i; i < l2 && i < l3; i++) {
if (i > 0) {
nul = -(w * i - ((w * 0.09) * ((i * 2) + 1)));
}
else {
nul = '9%';
}
$(o + ' ul').css({'left': nul});
}
// ่ชๅจๅๆข
if (options.switch === 'on') {
t = window.setInterval(function () {
reincarnationloan(1);
}, Number(options.delay));
}
});
// ไธไธไธชไธไธไธไธช็นๅปไบไปถ
$(o + ' div.carousel_arrow > div').click(function () {
// ๅๆญข่ชๅจๅๆข
if (options.switch === 'on') {
window.clearInterval(t);
}
var c = $(this).attr('class');
if (c === 'arrow_r') {
reincarnationloan(1);
}
else if (c === 'arrow_l') {
reincarnationloan(-1);
}
// ่ชๅจๅๆข
if (options.switch === 'on') {
t = window.setInterval(function () {
reincarnationloan(1);
}, Number(options.delay));
}
});
// ่ชๅจๅๆข
if (options.switch === 'on') {
t = window.setInterval(function () {
reincarnationloan(1);
}, Number(options.delay));
}
};
function reincarnationloan(c) {
// ๅฝๅๅฐๅ็น็ดขๅผ
var i = $(o + ' div.carousel ol li.active').index();
// ไธไธไธช็ดขๅผ | // ่พพๅฐ็ฌฌไธไธชๆ่
ๆๅไธไธชๅๅฝ้ถ
if (i === l) {
i = 0;
}
else if (i === -1) {
i = l - 1;
}
checkMipImg(i);
// ็นไบฎไธไธไธชๅฐๅ็น
$(o + ' div.carousel > ol > li').eq(i).addClass('active').siblings().removeClass('active');
// ้กพ้ฎไธชๆฐ
var l2 = $(o + ' ul >li').length;
var l3 = i + 1;
// ๅฑๅนๅฎฝๅบฆ
var w = $(o).width();
// ulๅทฆ่พน่ท
var ul = w * 0.09;
// ulๅๆขๅ็ๅทฆ่พน่ท
var nul;
for (var i2 = l3 - 1; i2 < l2 && i2 < l3; i2++) {
if (i2 > 0) {
nul = -(w * i2 - (ul * ((i2 * 2) + 1)));
}
else {
nul = '9%';
}
$(o + ' ul').css({'left': nul});
}
}
function checkMipImg(i) {
var l = $(o + ' ul > li:nth-child(' + (i + 1) + ') > div:nth-child(1) > mip-img > img').length;
if (l === 0) {
var img = $(o + ' ul > li:nth-child(' + (i + 1) + ') > div:nth-child(1) > mip-img');
var html = '<img class="mip-fill-content mip-replaced-content" src="' + img.attr('src') + '">';
img.append(html);
}
}
// ๅๅปบๅ
็ด ๅ่ฐ
customElem.prototype.createdCallback = function () {
// console.log('created');
};
// ๅๆๆกฃไธญๆๅ
ฅ่็นๅ่ฐ
customElem.prototype.attachedCallback = function () {
// console.log('attached');
};
// ไปๆๆกฃไธญ็งปๅบ่็นๅ่ฐ
customElem.prototype.detachedCallback = function () {
// console.log('detached');
};
// ็ฌฌไธๆฌก่ฟๅ
ฅๅฏ่งๅบๅ่ฐ,ๅชไผๆง่กไธๆฌก๏ผๅๆๅ ่ฝฝ๏ผๅฉไบ็ฝ้กต้ๅบฆ
customElem.prototype.firstInviewCallback = function () {
// console.log('first in viewport');
};
// ่ฟๅ
ฅๆ็ฆปๅผๅฏ่งๅบๅ่ฐ๏ผๆฏๆฌก็ถๆๅๅ้ฝไผๆง่ก
customElem.prototype.viewportCallback = function (isInView) {
// true ่ฟๅ
ฅๅฏ่งๅบ;false ็ฆปๅผๅฏ่งๅบ
// console.log(isInView);
};
// ๆงๅถviewportCallbackใfirstInviewCallbackๆฏๅฆๆๅๆง่ก
// ่ฝฎๆญๅพ็็ญๅฏไฝฟ็จๆญคๆนๆณๆๅๆธฒๆ
customElem.prototype.prerenderAllowed = function () {
// ๅคๆญๆกไปถ๏ผๅฏ่ชๅฎไนใ่ฟๅๅผไธบtrueๆถ,viewportCallbackใfirstInviewCallbackไผๅจๅ
็ด buildๅๆง่ก
return !!this.isCarouselImg;
};
return customElem;
}); | i = i + c;
// ไธๅ
ฑๅคๅฐไธชๅ็น
var l = $(o + ' div.carousel ol li').length; |
gsm_editor.go | package gsm
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/jenkins-x/jx-helpers/v3/pkg/files"
"github.com/jenkins-x/jx-logging/v3/pkg/log"
"k8s.io/apimachinery/pkg/util/json"
"github.com/jenkins-x/jx-helpers/v3/pkg/cmdrunner"
"github.com/jenkins-x/jx-secret/pkg/extsecrets"
"github.com/jenkins-x/jx-secret/pkg/extsecrets/editor"
"github.com/pkg/errors"
"k8s.io/client-go/kubernetes"
)
const (
gcloud = "gcloud"
)
type client struct {
commandRunner cmdrunner.CommandRunner
quietCommandRunner cmdrunner.CommandRunner
kubeClient kubernetes.Interface
env map[string]string
tmpDir string
}
func | (commandRunner cmdrunner.CommandRunner, quietCommandRunner cmdrunner.CommandRunner, kubeClient kubernetes.Interface) (editor.Interface, error) {
if commandRunner == nil {
commandRunner = cmdrunner.DefaultCommandRunner
}
if quietCommandRunner == nil {
quietCommandRunner = commandRunner
}
tmpDir := os.Getenv("JX_SECRET_TMP_DIR")
if tmpDir != "" {
err := os.MkdirAll(tmpDir, files.DefaultDirWritePermissions)
if err != nil {
return nil, errors.Wrapf(err, "failed to create jx secret temp dir %s", tmpDir)
}
}
log.Logger().Debugf("using secret temp dir %s", tmpDir)
c := &client{
commandRunner: commandRunner,
quietCommandRunner: quietCommandRunner,
kubeClient: kubeClient,
tmpDir: tmpDir,
}
err := c.initialise()
if err != nil {
return c, errors.Wrapf(err, "failed to setup gsm secret editor")
}
return c, nil
}
func (c *client) Write(properties *editor.KeyProperties) error {
key := extsecrets.SimplifyKey("gcpSecretsManager", properties.Key)
if len(properties.Properties) == 0 {
return fmt.Errorf("creating an inline secret in Google Secret Manager with no property is not yet supported, secret %s", key)
}
// check secret is created
err := c.ensureSecretExists(key, properties.GCPProject)
if err != nil {
return errors.Wrapf(err, "failed to ensure secret key %s exists in project %s", key, properties.GCPProject)
}
editor.SortPropertyValues(properties.Properties)
// create a temporary file used to upload secret values to Google Secrets Manager
file, err := ioutil.TempFile(c.tmpDir, "jx")
if err != nil {
return errors.Wrap(err, "failed to create temporary directory used to write secrets to then upload to google secrets manager")
}
defer os.Remove(file.Name())
// write properties as a key values ina json file so we can upload to Google Secrets Manager
err = c.writeTemporarySecretPropertiesJSON(properties, file)
if err != nil {
return errors.Wrapf(err, "failed to write secret key values pairs to filename %s", file.Name())
}
// create a new secret version
args := []string{"secrets", "versions", "add", key, "--project", properties.GCPProject, "--data-file", file.Name()}
cmd := &cmdrunner.Command{
Name: gcloud,
Args: args,
Env: c.env,
}
_, err = c.commandRunner(cmd)
if err != nil {
return errors.Wrapf(err, "failed to create a version of secret %s", key)
}
return nil
}
func (c *client) writeTemporarySecretPropertiesJSON(properties *editor.KeyProperties, file *os.File) error {
// if we only have one property and its got an empty property name lets just write the value
if len(properties.Properties) == 1 && properties.Properties[0].Property == "" {
_, err := file.Write([]byte(properties.Properties[0].Value))
if err != nil {
return errors.Wrap(err, "failed to write property value to temporary file")
}
return nil
}
// write properties as a key values ina json file so we can upload to Google Secrets Manager
values := map[string]string{}
for _, p := range properties.Properties {
values[p.Property] = p.Value
}
data, err := json.Marshal(values)
if err != nil {
return errors.Wrap(err, "failed to marshall secrets used to upload to google secrets manager")
}
_, err = file.Write(data)
if err != nil {
return errors.Wrap(err, "failed to write secrets to then upload to google secrets manager")
}
return nil
}
func (c *client) ensureSecretExists(key, projectID string) error {
args := []string{"secrets", "describe", key, "--project", projectID}
cmd := &cmdrunner.Command{
Name: gcloud,
Args: args,
Env: c.env,
}
_, err := c.quietCommandRunner(cmd)
if err != nil {
if strings.Contains(err.Error(), "NOT_FOUND") {
args := []string{"secrets", "create", key, "--project", projectID, "--replication-policy", "automatic"}
cmd := &cmdrunner.Command{
Name: gcloud,
Args: args,
Env: c.env,
}
_, err = c.commandRunner(cmd)
if err != nil {
return errors.Wrapf(err, "failed to create secret %s in project %s", key, projectID)
}
} else {
return errors.Wrapf(err, "failed to describe secret %s in project %s", key, projectID)
}
}
return nil
}
func (c *client) initialise() error {
log.Logger().Debugf("verifying we have gcloud installed")
// lets verify we can find the binary
cmd := &cmdrunner.Command{
Name: gcloud,
Args: []string{"secrets", "--help"},
Env: c.env,
}
_, err := c.quietCommandRunner(cmd)
if err != nil {
return errors.Wrapf(err, "failed to invoke the binary '%s'. Please make sure you installed '%s' and put it on your $PATH", gcloud, gcloud)
}
log.Logger().Debugf("verifying we can connect to gsm...")
// lets verify we can list the secrets
cmd = &cmdrunner.Command{
Name: gcloud,
Args: []string{"secrets", "list", "--help"},
Env: c.env,
}
_, err = c.quietCommandRunner(cmd)
if err != nil {
return errors.Wrapf(err, "failed to access gsm. command failed: %s", cmdrunner.CLI(cmd))
}
log.Logger().Debugf("gsm is setup correctly!\n\n")
return nil
}
| NewEditor |
me.test.ts | import { graphQLRequest, graphQLRequestAsUser, resetDB, disconnect } from '@/tests/helpers';
import { UserFactory } from '@/tests/factories/user';
beforeEach(async () => resetDB());
afterAll(async () => disconnect());
describe('me query', () => {
describe('not logged in', () => {
it('returns null ', async () => {
const query = `
query ME {
me {
id
}
}
`;
const response = await graphQLRequest({ query });
expect(response.body).toMatchInlineSnapshot(`
Object {
"data": Object {
"me": null,
},
}
`);
});
});
describe('logged in', () => {
it('returns user data', async () => {
const query = `
query ME {
me {
email
roles
}
}
`;
const user = await UserFactory.create({
email: '[email protected]',
});
const response = await graphQLRequestAsUser(user, { query });
expect(response.body).toMatchInlineSnapshot(`
Object {
"data": Object {
"me": Object {
"email": "[email protected]", | "roles": Array [
"USER",
],
},
},
}
`);
});
});
}); | |
mod.rs | use crate::{variable_length_crh::VariableLengthCRH, Error};
use ark_ec::{
group::Group, models::TEModelParameters, twisted_edwards_extended::GroupAffine as TEAffine,
};
use ark_ff::{PrimeField, ToConstraintField, Zero};
use ark_std::rand::{CryptoRng, Rng, SeedableRng};
use ark_std::{
fmt::{Debug, Formatter, Result as FmtResult},
marker::PhantomData,
};
use ark_std::{vec, vec::Vec};
pub mod constraints;
pub struct VariableLengthPedersenParameters {
pub seed: Vec<u8>,
}
impl VariableLengthPedersenParameters {
pub fn get_generators<RO: Rng + CryptoRng + SeedableRng, G: Group>(
&self,
pos: usize,
) -> Vec<G> {
let mut seed = RO::Seed::default();
let seed_as_mut = seed.as_mut();
seed_as_mut[..self.seed.len()].clone_from_slice(&self.seed[..]);
let mut rng = RO::from_seed(seed);
let mut res = Vec::<G>::new();
for _ in 0..pos {
res.push(G::rand(&mut rng));
}
res
}
}
pub struct VariableLengthPedersenCRH<RO: Rng + CryptoRng + SeedableRng, P: TEModelParameters> {
#[doc(hidden)]
pub ro_phantom: PhantomData<RO>,
#[doc(hidden)]
pub te_parameters_phantom: PhantomData<P>,
}
impl<RO: Rng + CryptoRng + SeedableRng, P: TEModelParameters> VariableLengthCRH<P::BaseField>
for VariableLengthPedersenCRH<RO, P>
where
P::BaseField: PrimeField,
{
type Output = TEAffine<P>;
type Parameters = VariableLengthPedersenParameters;
fn setup<R: Rng>(rng: &mut R) -> Result<Self::Parameters, Error> {
let mut seed = RO::Seed::default();
let seed_as_mut = seed.as_mut();
rng.fill_bytes(seed_as_mut);
Ok(Self::Parameters {
seed: seed_as_mut.to_vec(),
})
}
fn evaluate(parameters: &Self::Parameters, input: &[u8]) -> Result<Self::Output, Error> {
let mut padded_input = Vec::with_capacity(input.len() + 4);
let len = (input.len() as u32).to_le_bytes();
padded_input.extend_from_slice(&len);
padded_input.extend_from_slice(input);
assert!(input.len() < (1u64 << 32) as usize);
// Compute sum of h_i^{m_i} for all i.
let bits = bytes_to_bits(&padded_input);
let generators = parameters.get_generators::<RO, TEAffine<P>>(bits.len());
let result = bits
.iter()
.zip(generators.iter())
.map(|(bit, generator)| {
if *bit {
*generator
} else {
TEAffine::<P>::zero()
}
})
.sum::<TEAffine<P>>();
Ok(result)
}
fn convert_output_to_field_elements(output: Self::Output) -> Result<Vec<P::BaseField>, Error> {
Ok(vec![output.x, output.y])
}
}
pub fn bytes_to_bits(bytes: &[u8]) -> Vec<bool> |
impl Debug for VariableLengthPedersenParameters {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
writeln!(f, "Pedersen Hash Parameters {{")?;
writeln!(f, "\t Generator {:?}", self.seed)?;
writeln!(f, "}}")
}
}
impl<ConstraintF: PrimeField> ToConstraintField<ConstraintF> for VariableLengthPedersenParameters {
#[inline]
fn to_field_elements(&self) -> Option<Vec<ConstraintF>> {
self.seed.to_field_elements()
}
}
impl Clone for VariableLengthPedersenParameters {
fn clone(&self) -> Self {
Self {
seed: self.seed.clone(),
}
}
}
impl Default for VariableLengthPedersenParameters {
fn default() -> Self {
Self { seed: Vec::new() }
}
}
| {
let mut bits = Vec::with_capacity(bytes.len() * 8);
for byte in bytes {
for i in 0..8 {
let bit = (*byte >> i) & 1;
bits.push(bit == 1)
}
}
bits
} |
example_twitter_dag.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
"""
This is an example dag for managing twitter data.
"""
from datetime import date, timedelta
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.hive_operator import HiveOperator
from airflow.operators.python_operator import PythonOperator
# --------------------------------------------------------------------------------
# Create a few placeholder scripts. In practice these would be different python
# script files, which are imported in this section with absolute or relative imports
# --------------------------------------------------------------------------------
def fetchtweets():
"""
This is a placeholder for fetchtweets.
"""
def | ():
"""
This is a placeholder for cleantweets.
"""
def analyzetweets():
"""
This is a placeholder for analyzetweets.
"""
def transfertodb():
"""
This is a placeholder for transfertodb.
"""
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'Ekhtiar',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(5),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
with DAG(
dag_id='example_twitter_dag',
default_args=default_args,
schedule_interval="@daily"
) as dag:
# --------------------------------------------------------------------------------
# This task should call Twitter API and retrieve tweets from yesterday from and to
# for the four twitter users (Twitter_A,..,Twitter_D) There should be eight csv
# output files generated by this task and naming convention
# is direction(from or to)_twitterHandle_date.csv
# --------------------------------------------------------------------------------
fetch_tweets = PythonOperator(
task_id='fetch_tweets',
python_callable=fetchtweets
)
# --------------------------------------------------------------------------------
# Clean the eight files. In this step you can get rid of or cherry pick columns
# and different parts of the text
# --------------------------------------------------------------------------------
clean_tweets = PythonOperator(
task_id='clean_tweets',
python_callable=cleantweets
)
clean_tweets << fetch_tweets
# --------------------------------------------------------------------------------
# In this section you can use a script to analyze the twitter data. Could simply
# be a sentiment analysis through algorithms like bag of words or something more
# complicated. You can also take a look at Web Services to do such tasks
# --------------------------------------------------------------------------------
analyze_tweets = PythonOperator(
task_id='analyze_tweets',
python_callable=analyzetweets
)
analyze_tweets << clean_tweets
# --------------------------------------------------------------------------------
# Although this is the last task, we need to declare it before the next tasks as we
# will use set_downstream This task will extract summary from Hive data and store
# it to MySQL
# --------------------------------------------------------------------------------
hive_to_mysql = PythonOperator(
task_id='hive_to_mysql',
python_callable=transfertodb
)
# --------------------------------------------------------------------------------
# The following tasks are generated using for loop. The first task puts the eight
# csv files to HDFS. The second task loads these files from HDFS to respected Hive
# tables. These two for loops could be combined into one loop. However, in most cases,
# you will be running different analysis on your incoming incoming and outgoing tweets,
# and hence they are kept separated in this example.
# --------------------------------------------------------------------------------
from_channels = ['fromTwitter_A', 'fromTwitter_B', 'fromTwitter_C', 'fromTwitter_D']
to_channels = ['toTwitter_A', 'toTwitter_B', 'toTwitter_C', 'toTwitter_D']
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
# define where you want to store the tweets csv file in your local directory
local_dir = "/tmp/"
# define the location where you want to store in HDFS
hdfs_dir = " /tmp/"
for channel in to_channels:
file_name = "to_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/"
)
load_to_hdfs << analyze_tweets
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')"
)
load_to_hive << load_to_hdfs
load_to_hive >> hive_to_mysql
for channel in from_channels:
file_name = "from_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/"
)
load_to_hdfs << analyze_tweets
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')"
)
load_to_hive << load_to_hdfs
load_to_hive >> hive_to_mysql
| cleantweets |
store.rs | use crate::frame_info::StoreFrameInfo;
use crate::sig_registry::SignatureRegistry;
use crate::trampoline::StoreInstanceHandle;
use crate::{Engine, Module, Trap};
use anyhow::{bail, Result};
use std::any::Any;
use std::cell::{Cell, RefCell};
use std::collections::HashSet;
use std::convert::TryFrom;
use std::fmt;
use std::future::Future;
use std::hash::{Hash, Hasher};
use std::pin::Pin;
use std::ptr;
use std::rc::{Rc, Weak};
use std::sync::Arc;
use std::task::{Context, Poll};
use wasmtime_environ::wasm;
use wasmtime_jit::{CompiledModule, ModuleCode, TypeTables};
use wasmtime_runtime::{
InstanceHandle, RuntimeMemoryCreator, SignalHandler, StackMapRegistry, TrapInfo, VMContext,
VMExternRef, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex,
};
/// A `Store` is a collection of WebAssembly instances and host-defined items.
///
/// All WebAssembly instances and items will be attached to and refer to a
/// `Store`. For example instances, functions, globals, and tables are all
/// attached to a `Store`. Instances are created by instantiating a
/// [`Module`](crate::Module) within a `Store`.
///
/// `Store` is not thread-safe and cannot be sent to other threads. All items
/// which refer to a `Store` additionally are not threadsafe and can only be
/// used on the original thread that they were created on.
///
/// A `Store` is not intended to be a long-lived object in a program. No form of
/// GC is implemented at this time so once an instance is created within a
/// `Store` it will not be deallocated until all references to the `Store` have
/// gone away (this includes all references to items in the store). This makes
/// `Store` unsuitable for creating an unbounded number of instances in it
/// because `Store` will never release this memory. It's instead recommended to
/// have a long-lived [`Engine`] and instead create a `Store` for a more scoped
/// portion of your application.
///
/// # Stores and `Clone`
///
/// Using `clone` on a `Store` is a cheap operation. It will not create an
/// entirely new store, but rather just a new reference to the existing object.
/// In other words it's a shallow copy, not a deep copy.
///
/// ## Stores and `Default`
///
/// You can create a store with default configuration settings using
/// `Store::default()`. This will create a brand new [`Engine`] with default
/// ocnfiguration (see [`Config`](crate::Config) for more information).
#[derive(Clone)]
pub struct Store {
inner: Rc<StoreInner>,
}
pub(crate) struct StoreInner {
is_async: bool,
engine: Engine,
interrupts: Arc<VMInterrupts>,
signatures: RefCell<SignatureRegistry>,
instances: RefCell<Vec<InstanceHandle>>,
signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
externref_activations_table: VMExternRefActivationsTable,
stack_map_registry: StackMapRegistry,
/// Information about JIT code which allows us to test if a program counter
/// is in JIT code, lookup trap information, etc.
frame_info: RefCell<StoreFrameInfo>,
/// Set of all compiled modules that we're holding a strong reference to
/// the module's code for. This includes JIT functions, trampolines, etc.
modules: RefCell<HashSet<ArcModuleCode>>,
// Numbers of resources instantiated in this store.
instance_count: Cell<usize>,
memory_count: Cell<usize>,
table_count: Cell<usize>,
/// An adjustment to add to the fuel consumed value in `interrupts` above
/// to get the true amount of fuel consumed.
fuel_adj: Cell<i64>,
#[cfg(feature = "async")]
current_suspend: Cell<*const wasmtime_fiber::Suspend<Result<(), Trap>, (), Result<(), Trap>>>,
#[cfg(feature = "async")]
current_poll_cx: Cell<*mut Context<'static>>,
out_of_gas_behavior: Cell<OutOfGas>,
}
#[derive(Copy, Clone)]
enum OutOfGas {
Trap,
InjectFuel {
injection_count: u32,
fuel_to_inject: u64,
},
}
struct HostInfoKey(VMExternRef);
impl PartialEq for HostInfoKey {
fn eq(&self, rhs: &Self) -> bool {
VMExternRef::eq(&self.0, &rhs.0)
}
}
impl Eq for HostInfoKey {}
impl Hash for HostInfoKey {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
VMExternRef::hash(&self.0, hasher);
}
}
impl Store {
/// Creates a new store to be associated with the given [`Engine`].
///
/// Note that this `Store` cannot be used with asynchronous host calls nor
/// can it be used to call functions asynchronously. For that you'll want to
/// use [`Store::new_async`].
pub fn new(engine: &Engine) -> Store {
Store::_new(engine, false)
}
/// Creates a new async store to be associated with the given [`Engine`].
///
/// The returned store can optionally define host functions with `async`.
/// Instances created and functions called within the returned `Store`
/// *must* be called through their asynchronous APIs, however. For example
/// using [`Func::call`](crate::Func::call) will panic in the returned
/// store.
///
/// # Asynchronous Wasm
///
/// WebAssembly does not currently have a way to specify at the bytecode
/// level what is and isn't async. Host-defined functions, however, may be
/// defined as `async`. WebAssembly imports always appear synchronous, which
/// gives rise to a bit of an impedence mismatch here. To solve this
/// Wasmtime supports "asynchronous stores" which enables calling these
/// asynchronous functions in a way that looks synchronous to the executing
/// WebAssembly code.
///
/// An asynchronous store must always invoke wasm code asynchronously,
/// meaning we'll always represent its computation as a
/// [`Future`](std::future::Future). The `poll` method of the futures
/// returned by Wasmtime will perform the actual work of calling the
/// WebAssembly. Wasmtime won't manage its own thread pools or similar,
/// that's left up to the embedder.
///
/// To implement futures in a way that WebAssembly sees asynchronous host
/// functions as synchronous, all async Wasmtime futures will execute on a
/// separately allocated native stack from the thread otherwise executing
/// Wasmtime. This separate native stack can then be switched to and from.
/// Using this whenever an `async` host function returns a future that
/// resolves to `Pending` we switch away from the temporary stack back to
/// the main stack and propagate the `Pending` status.
///
/// In general it's encouraged that the integration with `async` and
/// wasmtime is designed early on in your embedding of Wasmtime to ensure
/// that it's planned that WebAssembly executes in the right context of your
/// application.
///
/// # Execution in `poll`
///
/// The [`Future::poll`](std::future::Future::poll) method is the main
/// driving force behind Rust's futures. That method's own documentation
/// states "an implementation of `poll` should strive to return quickly, and
/// should not block". This, however, can be at odds with executing
/// WebAssembly code as part of the `poll` method itself. If your
/// WebAssembly is untrusted then this could allow the `poll` method to take
/// arbitrarily long in the worst case, likely blocking all other
/// asynchronous tasks.
///
/// To remedy this situation you have a two possible ways to solve this:
///
/// * First you can spawn futures into a thread pool. Care must be taken for
/// this because Wasmtime futures are not `Send` or `Sync`. If you ensure
/// that the entire state of a `Store` is wrapped up in a single future,
/// though, you can send the whole future at once to a separate thread. By
/// doing this in a thread pool you are relaxing the requirement that
/// `Future::poll` must be fast because your future is executing on a
/// separate thread. This strategy, however, would likely still require
/// some form of cancellation via [`Store::interrupt_handle`] to ensure
/// wasm doesn't take *too* long to execute.
///
/// * Alternatively you can enable the
/// [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
/// as [`Store::out_of_fuel_async_yield`] When doing so this will
/// configure Wasmtime futures to yield periodically while they're
/// executing WebAssembly code. After consuming the specified amount of
/// fuel wasm futures will return `Poll::Pending` from their `poll`
/// method, and will get automatically re-polled later. This enables the
/// `Future::poll` method to take roughly a fixed amount of time since
/// fuel is guaranteed to get consumed while wasm is executing. Note that
/// to prevent infinite execution of wasm you'll still need to use
/// [`Store::interrupt_handle`].
///
/// In either case special care needs to be taken when integrating
/// asynchronous wasm into your application. You should carefully plan where
/// WebAssembly will execute and what compute resources will be allotted to
/// it. If Wasmtime doesn't support exactly what you'd like just yet, please
/// feel free to open an issue!
#[cfg(feature = "async")]
#[cfg_attr(nightlydoc, doc(cfg(feature = "async")))]
pub fn new_async(engine: &Engine) -> Store {
Store::_new(engine, true)
}
fn _new(engine: &Engine, is_async: bool) -> Store {
// Ensure that wasmtime_runtime's signal handlers are configured. Note
// that at the `Store` level it means we should perform this
// once-per-thread. Platforms like Unix, however, only require this
// once-per-program. In any case this is safe to call many times and
// each one that's not relevant just won't do anything.
wasmtime_runtime::init_traps();
Store {
inner: Rc::new(StoreInner {
is_async,
engine: engine.clone(),
interrupts: Arc::new(Default::default()),
signatures: RefCell::new(Default::default()),
instances: RefCell::new(Vec::new()),
signal_handler: RefCell::new(None),
externref_activations_table: VMExternRefActivationsTable::new(),
stack_map_registry: StackMapRegistry::default(),
frame_info: Default::default(),
modules: Default::default(),
instance_count: Default::default(),
memory_count: Default::default(),
table_count: Default::default(),
fuel_adj: Cell::new(0),
#[cfg(feature = "async")]
current_suspend: Cell::new(ptr::null()),
#[cfg(feature = "async")]
current_poll_cx: Cell::new(ptr::null_mut()),
out_of_gas_behavior: Cell::new(OutOfGas::Trap),
}),
}
}
pub(crate) fn from_inner(inner: Rc<StoreInner>) -> Store {
Store { inner }
}
/// Returns the [`Engine`] that this store is associated with.
pub fn engine(&self) -> &Engine {
&self.inner.engine
}
/// Returns an optional reference to a ['RuntimeMemoryCreator']
pub(crate) fn memory_creator(&self) -> Option<&dyn RuntimeMemoryCreator> {
self.engine()
.config()
.memory_creator
.as_ref()
.map(|x| x as _)
}
pub(crate) fn signatures(&self) -> &RefCell<SignatureRegistry> {
&self.inner.signatures
}
pub(crate) fn lookup_shared_signature<'a>(
&'a self,
types: &'a TypeTables,
) -> impl Fn(wasm::SignatureIndex) -> VMSharedSignatureIndex + 'a {
move |index| {
self.signatures()
.borrow()
.lookup(&types.wasm_signatures[index])
.expect("signature not previously registered")
}
}
pub(crate) fn register_module(&self, module: &Module) {
// All modules register their JIT code in a store for two reasons
// currently:
//
// * First we only catch signals/traps if the program counter falls
// within the jit code of an instantiated wasm module. This ensures
// we don't catch accidental Rust/host segfaults.
//
// * Second when generating a backtrace we'll use this mapping to
// only generate wasm frames for instruction pointers that fall
// within jit code.
self.register_jit_code(module.compiled_module());
// We need to know about all the stack maps of all instantiated modules
// so when performing a GC we know about all wasm frames that we find
// on the stack.
self.register_stack_maps(module.compiled_module());
// Signatures are loaded into our `SignatureRegistry` here
// once-per-module (and once-per-signature). This allows us to create
// a `Func` wrapper for any function in the module, which requires that
// we know about the signature and trampoline for all instances.
self.register_signatures(module);
// And finally with a module being instantiated into this `Store` we
// need to preserve its jit-code. References to this module's code and
// trampolines are not owning-references so it's our responsibility to
// keep it all alive within the `Store`.
self.inner
.modules
.borrow_mut()
.insert(ArcModuleCode(module.compiled_module().code().clone()));
}
fn register_jit_code(&self, module: &CompiledModule) {
let functions = module.finished_functions();
let first_pc = match functions.values().next() {
Some(f) => unsafe { (**f).as_ptr() as usize },
None => return,
};
// Only register this module if it hasn't already been registered.
let mut info = self.inner.frame_info.borrow_mut();
if !info.contains_pc(first_pc) {
info.register(module);
}
}
fn register_stack_maps(&self, module: &CompiledModule) {
self.stack_map_registry()
.register_stack_maps(module.stack_maps().map(|(func, stack_maps)| unsafe {
let ptr = (*func).as_ptr();
let len = (*func).len();
let start = ptr as usize;
let end = ptr as usize + len;
let range = start..end;
(range, stack_maps)
}));
}
fn register_signatures(&self, module: &Module) {
let trampolines = module.compiled_module().trampolines();
let mut signatures = self.signatures().borrow_mut();
for (index, wasm) in module.types().wasm_signatures.iter() {
signatures.register(wasm, trampolines[index]);
}
}
pub(crate) fn bump_resource_counts(&self, module: &Module) -> Result<()> {
let config = self.engine().config();
fn bump(slot: &Cell<usize>, max: usize, amt: usize, desc: &str) -> Result<()> {
let new = slot.get().saturating_add(amt);
if new > max {
bail!(
"resource limit exceeded: {} count too high at {}",
desc,
new
);
}
slot.set(new);
Ok(())
}
let module = module.env_module();
let memories = module.memory_plans.len() - module.num_imported_memories;
let tables = module.table_plans.len() - module.num_imported_tables;
bump(
&self.inner.instance_count,
config.max_instances,
1,
"instance",
)?;
bump(
&self.inner.memory_count,
config.max_memories,
memories,
"memory",
)?;
bump(&self.inner.table_count, config.max_tables, tables, "table")?;
Ok(())
}
pub(crate) unsafe fn add_instance(&self, handle: InstanceHandle) -> StoreInstanceHandle {
self.inner.instances.borrow_mut().push(handle.clone());
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) fn existing_instance_handle(&self, handle: InstanceHandle) -> StoreInstanceHandle {
debug_assert!(self
.inner
.instances
.borrow()
.iter()
.any(|i| i.vmctx_ptr() == handle.vmctx_ptr()));
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) unsafe fn existing_vmctx(&self, cx: *mut VMContext) -> StoreInstanceHandle {
self.existing_instance_handle(InstanceHandle::from_vmctx(cx))
}
pub(crate) fn weak(&self) -> Weak<StoreInner> {
Rc::downgrade(&self.inner)
}
pub(crate) fn upgrade(weak: &Weak<StoreInner>) -> Option<Self> {
let inner = weak.upgrade()?;
Some(Self { inner })
}
pub(crate) fn set_signal_handler(&self, handler: Option<Box<SignalHandler<'static>>>) {
*self.inner.signal_handler.borrow_mut() = handler;
}
pub(crate) fn interrupts(&self) -> &VMInterrupts {
&self.inner.interrupts
}
/// Returns whether the stores `a` and `b` refer to the same underlying
/// `Store`.
///
/// Because the `Store` type is reference counted multiple clones may point
/// to the same underlying storage, and this method can be used to determine
/// whether two stores are indeed the same.
pub fn same(a: &Store, b: &Store) -> bool {
Rc::ptr_eq(&a.inner, &b.inner)
}
/// Creates an [`InterruptHandle`] which can be used to interrupt the
/// execution of instances within this `Store`.
///
/// An [`InterruptHandle`] handle is a mechanism of ensuring that guest code
/// doesn't execute for too long. For example it's used to prevent wasm
/// programs for executing infinitely in infinite loops or recursive call
/// chains.
///
/// The [`InterruptHandle`] type is sendable to other threads so you can
/// interact with it even while the thread with this `Store` is executing
/// wasm code.
///
/// There's one method on an interrupt handle:
/// [`InterruptHandle::interrupt`]. This method is used to generate an
/// interrupt and cause wasm code to exit "soon".
///
/// ## When are interrupts delivered?
///
/// The term "interrupt" here refers to one of two different behaviors that
/// are interrupted in wasm:
///
/// * The head of every loop in wasm has a check to see if it's interrupted.
/// * The prologue of every function has a check to see if it's interrupted.
///
/// This interrupt mechanism makes no attempt to signal interrupts to
/// native code. For example if a host function is blocked, then sending
/// an interrupt will not interrupt that operation.
///
/// Interrupts are consumed as soon as possible when wasm itself starts
/// executing. This means that if you interrupt wasm code then it basically
/// guarantees that the next time wasm is executing on the target thread it
/// will return quickly (either normally if it were already in the process
/// of returning or with a trap from the interrupt). Once an interrupt
/// trap is generated then an interrupt is consumed, and further execution
/// will not be interrupted (unless another interrupt is set).
///
/// When implementing interrupts you'll want to ensure that the delivery of
/// interrupts into wasm code is also handled in your host imports and
/// functionality. Host functions need to either execute for bounded amounts
/// of time or you'll need to arrange for them to be interrupted as well.
///
/// ## Return Value
///
/// This function returns a `Result` since interrupts are not always
/// enabled. Interrupts are enabled via the
/// [`Config::interruptable`](crate::Config::interruptable) method, and if
/// this store's [`Config`](crate::Config) hasn't been configured to enable
/// interrupts then an error is returned.
///
/// ## Examples
///
/// ```
/// # use anyhow::Result;
/// # use wasmtime::*;
/// # fn main() -> Result<()> {
/// // Enable interruptable code via `Config` and then create an interrupt
/// // handle which we'll use later to interrupt running code.
/// let engine = Engine::new(Config::new().interruptable(true));
/// let store = Store::new(&engine);
/// let interrupt_handle = store.interrupt_handle()?;
///
/// // Compile and instantiate a small example with an infinite loop.
/// let module = Module::new(&engine, r#"
/// (func (export "run") (loop br 0))
/// "#)?;
/// let instance = Instance::new(&store, &module, &[])?;
/// let run = instance
/// .get_func("run")
/// .ok_or(anyhow::format_err!("failed to find `run` function export"))?
/// .get0::<()>()?;
///
/// // Spin up a thread to send us an interrupt in a second
/// std::thread::spawn(move || {
/// std::thread::sleep(std::time::Duration::from_secs(1));
/// interrupt_handle.interrupt();
/// }); | /// assert!(trap.to_string().contains("wasm trap: interrupt"));
/// # Ok(())
/// # }
/// ```
pub fn interrupt_handle(&self) -> Result<InterruptHandle> {
if self.engine().config().tunables.interruptable {
Ok(InterruptHandle {
interrupts: self.inner.interrupts.clone(),
})
} else {
bail!("interrupts aren't enabled for this `Store`")
}
}
pub(crate) fn externref_activations_table(&self) -> &VMExternRefActivationsTable {
&self.inner.externref_activations_table
}
pub(crate) fn stack_map_registry(&self) -> &StackMapRegistry {
&self.inner.stack_map_registry
}
pub(crate) fn frame_info(&self) -> &RefCell<StoreFrameInfo> {
&self.inner.frame_info
}
/// Perform garbage collection of `ExternRef`s.
pub fn gc(&self) {
// For this crate's API, we ensure that `set_stack_canary` invariants
// are upheld for all host-->Wasm calls, and we register every module
// used with this store in `self.inner.stack_map_registry`.
unsafe {
wasmtime_runtime::gc(
&self.inner.stack_map_registry,
&self.inner.externref_activations_table,
);
}
}
/// Returns the amount of fuel consumed by this store's execution so far.
///
/// If fuel consumption is not enabled via
/// [`Config::consume_fuel`](crate::Config::consume_fuel) then this
/// function will return `None`. Also note that fuel, if enabled, must be
/// originally configured via [`Store::add_fuel`].
pub fn fuel_consumed(&self) -> Option<u64> {
if !self.engine().config().tunables.consume_fuel {
return None;
}
let consumed = unsafe { *self.inner.interrupts.fuel_consumed.get() };
Some(u64::try_from(self.inner.fuel_adj.get() + consumed).unwrap())
}
/// Adds fuel to this [`Store`] for wasm to consume while executing.
///
/// For this method to work fuel consumption must be enabled via
/// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
/// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
/// immediately trap). This function must be called for the store to have
/// some fuel to allow WebAssembly to execute.
///
/// Note that at this time when fuel is entirely consumed it will cause
/// wasm to trap. More usages of fuel are planned for the future.
///
/// # Panics
///
/// This function will panic if the store's [`Config`](crate::Config) did
/// not have fuel consumption enabled.
pub fn add_fuel(&self, fuel: u64) -> Result<()> {
anyhow::ensure!(
self.engine().config().tunables.consume_fuel,
"fuel is not configured in this store"
);
// Fuel is stored as an i64, so we need to cast it. If the provided fuel
// value overflows that just assume that i64::max will suffice. Wasm
// execution isn't fast enough to burn through i64::max fuel in any
// reasonable amount of time anyway.
let fuel = i64::try_from(fuel).unwrap_or(i64::max_value());
let adj = self.inner.fuel_adj.get();
let consumed_ptr = unsafe { &mut *self.inner.interrupts.fuel_consumed.get() };
match (consumed_ptr.checked_sub(fuel), adj.checked_add(fuel)) {
// If we succesfully did arithmetic without overflowing then we can
// just update our fields.
(Some(consumed), Some(adj)) => {
self.inner.fuel_adj.set(adj);
*consumed_ptr = consumed;
}
// Otherwise something overflowed. Make sure that we preserve the
// amount of fuel that's already consumed, but otherwise assume that
// we were given infinite fuel.
_ => {
self.inner.fuel_adj.set(i64::max_value());
*consumed_ptr = (*consumed_ptr + adj) - i64::max_value();
}
}
Ok(())
}
/// Configures a [`Store`] to generate a [`Trap`] whenever it runs out of
/// fuel.
///
/// When a [`Store`] is configured to consume fuel with
/// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
/// configure what happens when fuel runs out. Specifically a WebAssembly
/// trap will be raised and the current execution of WebAssembly will be
/// aborted.
///
/// This is the default behavior for running out of fuel.
pub fn out_of_fuel_trap(&self) {
self.inner.out_of_gas_behavior.set(OutOfGas::Trap);
}
/// Configures a [`Store`] to yield execution of async WebAssembly code
/// periodically.
///
/// When a [`Store`] is configured to consume fuel with
/// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
/// configure what happens when fuel runs out. Specifically executing
/// WebAssembly will be suspended and control will be yielded back to the
/// caller. This is only suitable with use of [async
/// stores](Store::new_async) because only then are futures used and yields
/// are possible.
///
/// The purpose of this behavior is to ensure that futures which represent
/// execution of WebAssembly do not execute too long inside their
/// `Future::poll` method. This allows for some form of cooperative
/// multitasking where WebAssembly will voluntarily yield control
/// periodically (based on fuel consumption) back to the running thread.
///
/// Note that futures returned by this crate will automatically flag
/// themselves to get re-polled if a yield happens. This means that
/// WebAssembly will continue to execute, just after giving the host an
/// opportunity to do something else.
///
/// The `fuel_to_inject` parameter indicates how much fuel should be
/// automatically re-injected after fuel runs out. This is how much fuel
/// will be consumed between yields of an async future.
///
/// The `injection_count` parameter indicates how many times this fuel will
/// be injected. Multiplying the two parameters is the total amount of fuel
/// this store is allowed before wasm traps.
///
/// # Panics
///
/// This method will panic if it is not called on an [async
/// store](Store::new_async).
pub fn out_of_fuel_async_yield(&self, injection_count: u32, fuel_to_inject: u64) {
assert!(self.is_async());
self.inner.out_of_gas_behavior.set(OutOfGas::InjectFuel {
injection_count,
fuel_to_inject,
});
}
pub(crate) fn is_async(&self) -> bool {
self.inner.is_async
}
/// Blocks on the asynchronous computation represented by `future` and
/// produces the result here, in-line.
///
/// This function is designed to only work when it's currently executing on
/// a native fiber. This fiber provides the ability for us to handle the
/// future's `Pending` state as "jump back to whomever called the fiber in
/// an asynchronous fashion and propagate `Pending`". This tight coupling
/// with `on_fiber` below is what powers the asynchronicity of calling wasm.
/// Note that the asynchronous part only applies to host functions, wasm
/// itself never really does anything asynchronous at this time.
///
/// This function takes a `future` and will (appear to) synchronously wait
/// on the result. While this function is executing it will fiber switch
/// to-and-from the original frame calling `on_fiber` which should be a
/// guarantee due to how async stores are configured.
///
/// The return value here is either the output of the future `T`, or a trap
/// which represents that the asynchronous computation was cancelled. It is
/// not recommended to catch the trap and try to keep executing wasm, so
/// we've tried to liberally document this.
#[cfg(feature = "async")]
pub(crate) fn block_on<T>(
&self,
mut future: Pin<&mut dyn Future<Output = T>>,
) -> Result<T, Trap> {
debug_assert!(self.is_async());
// Take our current `Suspend` context which was configured as soon as
// our fiber started. Note that we must load it at the front here and
// save it on our stack frame. While we're polling the future other
// fibers may be started for recursive computations, and the current
// suspend context is only preserved at the edges of the fiber, not
// during the fiber itself.
//
// For a little bit of extra safety we also replace the current value
// with null to try to catch any accidental bugs on our part early.
// This is all pretty unsafe so we're trying to be careful...
//
// Note that there should be a segfaulting test in `async_functions.rs`
// if this `Reset` is removed.
let suspend = self.inner.current_suspend.replace(ptr::null());
let _reset = Reset(&self.inner.current_suspend, suspend);
assert!(!suspend.is_null());
loop {
let future_result = unsafe {
let current_poll_cx = self.inner.current_poll_cx.replace(ptr::null_mut());
let _reset = Reset(&self.inner.current_poll_cx, current_poll_cx);
assert!(!current_poll_cx.is_null());
future.as_mut().poll(&mut *current_poll_cx)
};
match future_result {
Poll::Ready(t) => break Ok(t),
Poll::Pending => {}
}
unsafe {
(*suspend).suspend(())?;
}
}
}
/// Executes a synchronous computation `func` asynchronously on a new fiber.
///
/// This function will convert the synchronous `func` into an asynchronous
/// future. This is done by running `func` in a fiber on a separate native
/// stack which can be suspended and resumed from.
///
/// Most of the nitty-gritty here is how we juggle the various contexts
/// necessary to suspend the fiber later on and poll sub-futures. It's hoped
/// that the various comments are illuminating as to what's going on here.
#[cfg(feature = "async")]
pub(crate) async fn on_fiber<R>(&self, func: impl FnOnce() -> R) -> Result<R, Trap> {
debug_assert!(self.is_async());
// TODO: allocation of a fiber should be much more abstract where we
// shouldn't be allocating huge stacks on every async wasm function call.
let mut slot = None;
let fiber = wasmtime_fiber::Fiber::new(10 * 1024 * 1024, |keep_going, suspend| {
// First check and see if we were interrupted/dropped, and only
// continue if we haven't been.
keep_going?;
// Configure our store's suspension context for the rest of the
// execution of this fiber. Note that a raw pointer is stored here
// which is only valid for the duration of this closure.
// Consequently we at least replace it with the previous value when
// we're done. This reset is also required for correctness because
// otherwise our value will overwrite another active fiber's value.
// There should be a test that segfaults in `async_functions.rs` if
// this `Replace` is removed.
let prev = self.inner.current_suspend.replace(suspend);
let _reset = Reset(&self.inner.current_suspend, prev);
slot = Some(func());
Ok(())
})
.map_err(|e| Trap::from(anyhow::Error::from(e)))?;
// Once we have the fiber representing our synchronous computation, we
// wrap that in a custom future implementation which does the
// translation from the future protocol to our fiber API.
FiberFuture { fiber, store: self }.await?;
return Ok(slot.unwrap());
struct FiberFuture<'a> {
fiber: wasmtime_fiber::Fiber<'a, Result<(), Trap>, (), Result<(), Trap>>,
store: &'a Store,
}
impl Future for FiberFuture<'_> {
type Output = Result<(), Trap>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
// We need to carry over this `cx` into our fiber's runtime
// for when it trys to poll sub-futures that are created. Doing
// this must be done unsafely, however, since `cx` is only alive
// for this one singular function call. Here we do a `transmute`
// to extend the lifetime of `Context` so it can be stored in
// our `Store`, and then we replace the current polling context
// with this one.
//
// Note that the replace is done for weird situations where
// futures might be switching contexts and there's multiple
// wasmtime futures in a chain of futures.
//
// On exit from this function, though, we reset the polling
// context back to what it was to signify that `Store` no longer
// has access to this pointer.
let cx =
unsafe { std::mem::transmute::<&mut Context<'_>, *mut Context<'static>>(cx) };
let prev = self.store.inner.current_poll_cx.replace(cx);
let _reste = Reset(&self.store.inner.current_poll_cx, prev);
// After that's set up we resume execution of the fiber, which
// may also start the fiber for the first time. This either
// returns `Ok` saying the fiber finished (yay!) or it returns
// `Err` with the payload passed to `suspend`, which in our case
// is `()`. If `Err` is returned that means the fiber polled a
// future but it said "Pending", so we propagate that here.
match self.fiber.resume(Ok(())) {
Ok(result) => Poll::Ready(result),
Err(()) => Poll::Pending,
}
}
}
// Dropping futures is pretty special in that it means the future has
// been requested to be cancelled. Here we run the risk of dropping an
// in-progress fiber, and if we were to do nothing then the fiber would
// leak all its owned stack resources.
//
// To handle this we implement `Drop` here and, if the fiber isn't done,
// resume execution of the fiber saying "hey please stop you're
// interrupted". Our `Trap` created here (which has the stack trace
// of whomever dropped us) will then get propagated in whatever called
// `block_on`, and the idea is that the trap propagates all the way back
// up to the original fiber start, finishing execution.
//
// We don't actually care about the fiber's return value here (no one's
// around to look at it), we just assert the fiber finished to
// completion.
impl Drop for FiberFuture<'_> {
fn drop(&mut self) {
if self.fiber.done() {
return;
}
let result = self.fiber.resume(Err(Trap::new("future dropped")));
// This resumption with an error should always complete the
// fiber. While it's technically possible for host code to catch
// the trap and re-resume, we'd ideally like to signal that to
// callers that they shouldn't be doing that.
debug_assert!(result.is_ok());
}
}
}
/// Immediately raise a trap on an out-of-gas condition.
fn out_of_gas_trap(&self) -> ! {
#[derive(Debug)]
struct OutOfGasError;
impl fmt::Display for OutOfGasError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("all fuel consumed by WebAssembly")
}
}
impl std::error::Error for OutOfGasError {}
unsafe {
wasmtime_runtime::raise_lib_trap(wasmtime_runtime::Trap::User(Box::new(OutOfGasError)))
}
}
/// Yields execution to the caller on out-of-gas
///
/// This only works on async futures and stores, and assumes that we're
/// executing on a fiber. This will yield execution back to the caller once
/// and when we come back we'll continue with `fuel_to_inject` more fuel.
#[cfg(feature = "async")]
fn out_of_gas_yield(&self, fuel_to_inject: u64) {
// Small future that yields once and then returns ()
#[derive(Default)]
struct Yield {
yielded: bool,
}
impl Future for Yield {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
if self.yielded {
Poll::Ready(())
} else {
// Flag ourselves as yielded to return next time, and also
// flag the waker that we're already ready to get
// re-enqueued for another poll.
self.yielded = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
let mut future = Yield::default();
match self.block_on(unsafe { Pin::new_unchecked(&mut future) }) {
// If this finished successfully then we were resumed normally via a
// `poll`, so inject some more fuel and keep going.
Ok(()) => self.add_fuel(fuel_to_inject).unwrap(),
// If the future was dropped while we were yielded, then we need to
// clean up this fiber. Do so by raising a trap which will abort all
// wasm and get caught on the other side to clean things up.
Err(trap) => unsafe { wasmtime_runtime::raise_user_trap(trap.into()) },
}
}
}
unsafe impl TrapInfo for Store {
fn as_any(&self) -> &dyn Any {
self
}
fn is_wasm_trap(&self, addr: usize) -> bool {
self.frame_info().borrow().lookup_trap_info(addr).is_some()
}
fn custom_signal_handler(&self, call: &dyn Fn(&SignalHandler) -> bool) -> bool {
if let Some(handler) = &*self.inner.signal_handler.borrow() {
return call(handler);
}
false
}
fn max_wasm_stack(&self) -> usize {
self.engine().config().max_wasm_stack
}
fn out_of_gas(&self) {
match self.inner.out_of_gas_behavior.get() {
OutOfGas::Trap => self.out_of_gas_trap(),
#[cfg(feature = "async")]
OutOfGas::InjectFuel {
injection_count,
fuel_to_inject,
} => {
if injection_count == 0 {
self.out_of_gas_trap();
}
self.inner.out_of_gas_behavior.set(OutOfGas::InjectFuel {
injection_count: injection_count - 1,
fuel_to_inject,
});
self.out_of_gas_yield(fuel_to_inject);
}
#[cfg(not(feature = "async"))]
OutOfGas::InjectFuel { .. } => unreachable!(),
}
}
}
impl Default for Store {
fn default() -> Store {
Store::new(&Engine::default())
}
}
impl fmt::Debug for Store {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = &*self.inner as *const StoreInner;
f.debug_struct("Store").field("inner", &inner).finish()
}
}
impl Drop for StoreInner {
fn drop(&mut self) {
for instance in self.instances.get_mut().iter() {
unsafe {
instance.dealloc();
}
}
}
}
/// A threadsafe handle used to interrupt instances executing within a
/// particular `Store`.
///
/// This structure is created by the [`Store::interrupt_handle`] method.
pub struct InterruptHandle {
interrupts: Arc<VMInterrupts>,
}
// The `VMInterrupts` type is a pod-type with no destructor, and we only access
// `interrupts` from other threads, so add in these trait impls which are
// otherwise not available due to the `fuel_consumed` variable in
// `VMInterrupts`.
unsafe impl Send for InterruptHandle {}
unsafe impl Sync for InterruptHandle {}
impl InterruptHandle {
/// Flags that execution within this handle's original [`Store`] should be
/// interrupted.
///
/// This will not immediately interrupt execution of wasm modules, but
/// rather it will interrupt wasm execution of loop headers and wasm
/// execution of function entries. For more information see
/// [`Store::interrupt_handle`].
pub fn interrupt(&self) {
self.interrupts.interrupt()
}
}
// Wrapper struct to implement hash/equality based on the pointer value of the
// `Arc` in question.
struct ArcModuleCode(Arc<ModuleCode>);
impl PartialEq for ArcModuleCode {
fn eq(&self, other: &ArcModuleCode) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl Eq for ArcModuleCode {}
impl Hash for ArcModuleCode {
fn hash<H: Hasher>(&self, hasher: &mut H) {
Arc::as_ptr(&self.0).hash(hasher)
}
}
struct Reset<'a, T: Copy>(&'a Cell<T>, T);
impl<T: Copy> Drop for Reset<'_, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
} | ///
/// let trap = run().unwrap_err(); |
base.rs | pub use SyntaxExtension::*;
use crate::ast::{self, Attribute, Name, PatKind, MetaItem};
use crate::attr::HasAttrs;
use crate::source_map::{SourceMap, Spanned, respan};
use crate::edition::Edition;
use crate::errors::{DiagnosticBuilder, DiagnosticId};
use crate::ext::expand::{self, AstFragment, Invocation};
use crate::ext::hygiene::{self, Mark, SyntaxContext, Transparency};
use crate::mut_visit::{self, MutVisitor};
use crate::parse::{self, parser, DirectoryOwnership};
use crate::parse::token;
use crate::ptr::P;
use crate::symbol::{keywords, Ident, Symbol};
use crate::ThinVec;
use crate::tokenstream::{self, TokenStream};
use smallvec::{smallvec, SmallVec};
use syntax_pos::{Span, MultiSpan, DUMMY_SP};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::{self, Lrc};
use std::iter;
use std::path::PathBuf;
use std::rc::Rc;
use std::default::Default;
#[derive(Debug,Clone)]
pub enum Annotatable {
Item(P<ast::Item>),
TraitItem(P<ast::TraitItem>),
ImplItem(P<ast::ImplItem>),
ForeignItem(P<ast::ForeignItem>),
Stmt(P<ast::Stmt>),
Expr(P<ast::Expr>),
}
impl HasAttrs for Annotatable {
fn attrs(&self) -> &[Attribute] {
match *self {
Annotatable::Item(ref item) => &item.attrs,
Annotatable::TraitItem(ref trait_item) => &trait_item.attrs,
Annotatable::ImplItem(ref impl_item) => &impl_item.attrs,
Annotatable::ForeignItem(ref foreign_item) => &foreign_item.attrs,
Annotatable::Stmt(ref stmt) => stmt.attrs(),
Annotatable::Expr(ref expr) => &expr.attrs,
}
}
fn visit_attrs<F: FnOnce(&mut Vec<Attribute>)>(&mut self, f: F) {
match self {
Annotatable::Item(item) => item.visit_attrs(f),
Annotatable::TraitItem(trait_item) => trait_item.visit_attrs(f),
Annotatable::ImplItem(impl_item) => impl_item.visit_attrs(f),
Annotatable::ForeignItem(foreign_item) => foreign_item.visit_attrs(f),
Annotatable::Stmt(stmt) => stmt.visit_attrs(f),
Annotatable::Expr(expr) => expr.visit_attrs(f),
}
}
}
impl Annotatable {
pub fn span(&self) -> Span {
match *self {
Annotatable::Item(ref item) => item.span,
Annotatable::TraitItem(ref trait_item) => trait_item.span,
Annotatable::ImplItem(ref impl_item) => impl_item.span,
Annotatable::ForeignItem(ref foreign_item) => foreign_item.span,
Annotatable::Stmt(ref stmt) => stmt.span,
Annotatable::Expr(ref expr) => expr.span,
}
}
pub fn expect_item(self) -> P<ast::Item> {
match self {
Annotatable::Item(i) => i,
_ => panic!("expected Item")
}
}
pub fn map_item_or<F, G>(self, mut f: F, mut or: G) -> Annotatable
where F: FnMut(P<ast::Item>) -> P<ast::Item>,
G: FnMut(Annotatable) -> Annotatable
{
match self {
Annotatable::Item(i) => Annotatable::Item(f(i)),
_ => or(self)
}
}
pub fn expect_trait_item(self) -> ast::TraitItem {
match self {
Annotatable::TraitItem(i) => i.into_inner(),
_ => panic!("expected Item")
}
}
pub fn expect_impl_item(self) -> ast::ImplItem {
match self {
Annotatable::ImplItem(i) => i.into_inner(),
_ => panic!("expected Item")
}
}
pub fn expect_foreign_item(self) -> ast::ForeignItem {
match self {
Annotatable::ForeignItem(i) => i.into_inner(),
_ => panic!("expected foreign item")
}
}
pub fn expect_stmt(self) -> ast::Stmt {
match self {
Annotatable::Stmt(stmt) => stmt.into_inner(),
_ => panic!("expected statement"),
}
}
pub fn expect_expr(self) -> P<ast::Expr> {
match self {
Annotatable::Expr(expr) => expr,
_ => panic!("expected expression"),
}
}
pub fn derive_allowed(&self) -> bool {
match *self {
Annotatable::Item(ref item) => match item.node {
ast::ItemKind::Struct(..) |
ast::ItemKind::Enum(..) |
ast::ItemKind::Union(..) => true,
_ => false,
},
_ => false,
}
}
}
// A more flexible ItemDecorator.
pub trait MultiItemDecorator {
fn expand(&self,
ecx: &mut ExtCtxt<'_>,
sp: Span,
meta_item: &ast::MetaItem,
item: &Annotatable,
push: &mut dyn FnMut(Annotatable));
}
impl<F> MultiItemDecorator for F
where F : Fn(&mut ExtCtxt<'_>, Span, &ast::MetaItem, &Annotatable, &mut dyn FnMut(Annotatable))
{
fn expand(&self,
ecx: &mut ExtCtxt<'_>,
sp: Span,
meta_item: &ast::MetaItem,
item: &Annotatable,
push: &mut dyn FnMut(Annotatable)) {
(*self)(ecx, sp, meta_item, item, push)
}
}
// `meta_item` is the annotation, and `item` is the item being modified.
// FIXME Decorators should follow the same pattern too.
pub trait MultiItemModifier {
fn expand(&self,
ecx: &mut ExtCtxt<'_>,
span: Span,
meta_item: &ast::MetaItem,
item: Annotatable)
-> Vec<Annotatable>;
}
impl<F, T> MultiItemModifier for F
where F: Fn(&mut ExtCtxt<'_>, Span, &ast::MetaItem, Annotatable) -> T,
T: Into<Vec<Annotatable>>,
{
fn expand(&self,
ecx: &mut ExtCtxt<'_>,
span: Span,
meta_item: &ast::MetaItem,
item: Annotatable)
-> Vec<Annotatable> {
(*self)(ecx, span, meta_item, item).into()
}
}
impl Into<Vec<Annotatable>> for Annotatable {
fn into(self) -> Vec<Annotatable> {
vec![self]
}
}
pub trait ProcMacro {
fn expand<'cx>(&self,
ecx: &'cx mut ExtCtxt<'_>,
span: Span,
ts: TokenStream)
-> TokenStream;
}
impl<F> ProcMacro for F
where F: Fn(TokenStream) -> TokenStream
{
fn expand<'cx>(&self,
_ecx: &'cx mut ExtCtxt<'_>,
_span: Span,
ts: TokenStream)
-> TokenStream {
// FIXME setup implicit context in TLS before calling self.
(*self)(ts)
}
}
pub trait AttrProcMacro {
fn expand<'cx>(&self,
ecx: &'cx mut ExtCtxt<'_>,
span: Span,
annotation: TokenStream,
annotated: TokenStream)
-> TokenStream;
}
impl<F> AttrProcMacro for F
where F: Fn(TokenStream, TokenStream) -> TokenStream
{
fn expand<'cx>(&self,
_ecx: &'cx mut ExtCtxt<'_>,
_span: Span,
annotation: TokenStream,
annotated: TokenStream)
-> TokenStream {
// FIXME setup implicit context in TLS before calling self.
(*self)(annotation, annotated)
}
}
/// Represents a thing that maps token trees to Macro Results
pub trait TTMacroExpander {
fn expand<'cx>(
&self,
ecx: &'cx mut ExtCtxt<'_>,
span: Span,
input: TokenStream,
def_span: Option<Span>,
) -> Box<dyn MacResult+'cx>;
}
pub type MacroExpanderFn =
for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, &[tokenstream::TokenTree])
-> Box<dyn MacResult+'cx>;
impl<F> TTMacroExpander for F
where F: for<'cx> Fn(&'cx mut ExtCtxt<'_>, Span, &[tokenstream::TokenTree])
-> Box<dyn MacResult+'cx>
{
fn expand<'cx>(
&self,
ecx: &'cx mut ExtCtxt<'_>,
span: Span,
input: TokenStream,
_def_span: Option<Span>,
) -> Box<dyn MacResult+'cx> {
struct AvoidInterpolatedIdents;
impl MutVisitor for AvoidInterpolatedIdents {
fn visit_tt(&mut self, tt: &mut tokenstream::TokenTree) {
if let tokenstream::TokenTree::Token(_, token::Interpolated(nt)) = tt {
if let token::NtIdent(ident, is_raw) = nt.0 {
*tt = tokenstream::TokenTree::Token(ident.span,
token::Ident(ident, is_raw));
}
}
mut_visit::noop_visit_tt(tt, self)
}
fn visit_mac(&mut self, mac: &mut ast::Mac) {
mut_visit::noop_visit_mac(mac, self)
}
}
let input: Vec<_> =
input.trees().map(|mut tt| { AvoidInterpolatedIdents.visit_tt(&mut tt); tt }).collect();
(*self)(ecx, span, &input)
}
}
pub trait IdentMacroExpander {
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt<'_>,
sp: Span,
ident: ast::Ident,
token_tree: Vec<tokenstream::TokenTree>)
-> Box<dyn MacResult+'cx>;
}
pub type IdentMacroExpanderFn =
for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, ast::Ident, Vec<tokenstream::TokenTree>)
-> Box<dyn MacResult+'cx>;
impl<F> IdentMacroExpander for F
where F : for<'cx> Fn(&'cx mut ExtCtxt<'_>, Span, ast::Ident,
Vec<tokenstream::TokenTree>) -> Box<dyn MacResult+'cx>
{
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt<'_>,
sp: Span,
ident: ast::Ident,
token_tree: Vec<tokenstream::TokenTree>)
-> Box<dyn MacResult+'cx>
{
(*self)(cx, sp, ident, token_tree)
}
}
// Use a macro because forwarding to a simple function has type system issues
macro_rules! make_stmts_default {
($me:expr) => {
$me.make_expr().map(|e| smallvec![ast::Stmt {
id: ast::DUMMY_NODE_ID,
span: e.span,
node: ast::StmtKind::Expr(e),
}])
}
}
/// The result of a macro expansion. The return values of the various
/// methods are spliced into the AST at the callsite of the macro.
pub trait MacResult {
/// Create an expression.
fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> {
None
}
/// Create zero or more items.
fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
None
}
/// Create zero or more impl items.
fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[ast::ImplItem; 1]>> {
None
}
/// Create zero or more trait items.
fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[ast::TraitItem; 1]>> {
None
}
/// Create zero or more items in an `extern {}` block
fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[ast::ForeignItem; 1]>> { None }
/// Create a pattern.
fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> {
None
}
/// Create zero or more statements.
///
/// By default this attempts to create an expression statement,
/// returning None if that fails.
fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> {
make_stmts_default!(self)
}
fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> {
None
}
}
macro_rules! make_MacEager {
( $( $fld:ident: $t:ty, )* ) => {
/// `MacResult` implementation for the common case where you've already
/// built each form of AST that you might return.
#[derive(Default)]
pub struct MacEager {
$(
pub $fld: Option<$t>,
)*
}
impl MacEager {
$(
pub fn $fld(v: $t) -> Box<dyn MacResult> {
Box::new(MacEager {
$fld: Some(v),
..Default::default()
})
}
)*
}
}
}
make_MacEager! {
expr: P<ast::Expr>,
pat: P<ast::Pat>,
items: SmallVec<[P<ast::Item>; 1]>,
impl_items: SmallVec<[ast::ImplItem; 1]>,
trait_items: SmallVec<[ast::TraitItem; 1]>,
foreign_items: SmallVec<[ast::ForeignItem; 1]>,
stmts: SmallVec<[ast::Stmt; 1]>,
ty: P<ast::Ty>,
}
impl MacResult for MacEager {
fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> {
self.expr
}
fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
self.items
}
fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[ast::ImplItem; 1]>> {
self.impl_items
}
fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[ast::TraitItem; 1]>> {
self.trait_items
}
fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[ast::ForeignItem; 1]>> {
self.foreign_items
}
fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> {
match self.stmts.as_ref().map_or(0, |s| s.len()) {
0 => make_stmts_default!(self),
_ => self.stmts,
}
}
fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> {
if let Some(p) = self.pat {
return Some(p);
}
if let Some(e) = self.expr {
if let ast::ExprKind::Lit(_) = e.node {
return Some(P(ast::Pat {
id: ast::DUMMY_NODE_ID,
span: e.span,
node: PatKind::Lit(e),
}));
}
}
None
}
fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> {
self.ty
}
}
/// Fill-in macro expansion result, to allow compilation to continue
/// after hitting errors.
#[derive(Copy, Clone)]
pub struct DummyResult {
expr_only: bool,
is_error: bool,
span: Span,
}
impl DummyResult {
/// Create a default MacResult that can be anything.
///
/// Use this as a return value after hitting any errors and
/// calling `span_err`.
pub fn any(span: Span) -> Box<dyn MacResult+'static> {
Box::new(DummyResult { expr_only: false, is_error: true, span })
}
/// Same as `any`, but must be a valid fragment, not error.
pub fn any_valid(span: Span) -> Box<dyn MacResult+'static> {
Box::new(DummyResult { expr_only: false, is_error: false, span })
}
/// Create a default MacResult that can only be an expression.
///
/// Use this for macros that must expand to an expression, so even
/// if an error is encountered internally, the user will receive
/// an error that they also used it in the wrong place.
pub fn expr(span: Span) -> Box<dyn MacResult+'static> {
Box::new(DummyResult { expr_only: true, is_error: true, span })
}
/// A plain dummy expression.
pub fn raw_expr(sp: Span, is_error: bool) -> P<ast::Expr> {
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: if is_error { ast::ExprKind::Err } else { ast::ExprKind::Tup(Vec::new()) },
span: sp,
attrs: ThinVec::new(),
})
}
/// A plain dummy pattern.
pub fn raw_pat(sp: Span) -> ast::Pat {
ast::Pat {
id: ast::DUMMY_NODE_ID,
node: PatKind::Wild,
span: sp,
}
}
/// A plain dummy type.
pub fn raw_ty(sp: Span, is_error: bool) -> P<ast::Ty> {
P(ast::Ty {
id: ast::DUMMY_NODE_ID,
node: if is_error { ast::TyKind::Err } else { ast::TyKind::Tup(Vec::new()) },
span: sp
})
}
}
impl MacResult for DummyResult {
fn make_expr(self: Box<DummyResult>) -> Option<P<ast::Expr>> {
Some(DummyResult::raw_expr(self.span, self.is_error))
}
fn make_pat(self: Box<DummyResult>) -> Option<P<ast::Pat>> {
Some(P(DummyResult::raw_pat(self.span)))
}
fn make_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
// this code needs a comment... why not always just return the Some() ?
if self.expr_only {
None
} else {
Some(SmallVec::new())
}
}
fn make_impl_items(self: Box<DummyResult>) -> Option<SmallVec<[ast::ImplItem; 1]>> {
if self.expr_only {
None
} else {
Some(SmallVec::new())
}
}
fn make_trait_items(self: Box<DummyResult>) -> Option<SmallVec<[ast::TraitItem; 1]>> {
if self.expr_only {
None
} else {
Some(SmallVec::new())
}
}
fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[ast::ForeignItem; 1]>> {
if self.expr_only {
None
} else {
Some(SmallVec::new())
}
}
fn make_stmts(self: Box<DummyResult>) -> Option<SmallVec<[ast::Stmt; 1]>> {
Some(smallvec![ast::Stmt {
id: ast::DUMMY_NODE_ID,
node: ast::StmtKind::Expr(DummyResult::raw_expr(self.span, self.is_error)),
span: self.span,
}])
}
fn make_ty(self: Box<DummyResult>) -> Option<P<ast::Ty>> {
Some(DummyResult::raw_ty(self.span, self.is_error))
}
}
pub type BuiltinDeriveFn =
for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, &MetaItem, &Annotatable, &mut dyn FnMut(Annotatable));
/// Represents different kinds of macro invocations that can be resolved.
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum MacroKind {
/// A bang macro - foo!()
Bang,
/// An attribute macro - #[foo]
Attr,
/// A derive attribute macro - #[derive(Foo)]
Derive,
/// A view of a procedural macro from the same crate that defines it.
ProcMacroStub,
}
impl MacroKind {
pub fn descr(self) -> &'static str {
match self {
MacroKind::Bang => "macro",
MacroKind::Attr => "attribute macro",
MacroKind::Derive => "derive macro",
MacroKind::ProcMacroStub => "crate-local procedural macro",
}
}
pub fn article(self) -> &'static str {
match self {
MacroKind::Attr => "an",
_ => "a",
}
}
}
/// An enum representing the different kinds of syntax extensions.
pub enum SyntaxExtension {
/// A trivial "extension" that does nothing, only keeps the attribute and marks it as known.
NonMacroAttr { mark_used: bool },
/// A syntax extension that is attached to an item and creates new items
/// based upon it.
///
/// `#[derive(...)]` is a `MultiItemDecorator`.
///
/// Prefer ProcMacro or MultiModifier since they are more flexible.
MultiDecorator(Box<dyn MultiItemDecorator + sync::Sync + sync::Send>),
/// A syntax extension that is attached to an item and modifies it
/// in-place. Also allows decoration, i.e., creating new items.
MultiModifier(Box<dyn MultiItemModifier + sync::Sync + sync::Send>),
/// A function-like procedural macro. TokenStream -> TokenStream.
ProcMacro {
expander: Box<dyn ProcMacro + sync::Sync + sync::Send>,
/// Whitelist of unstable features that are treated as stable inside this macro
allow_internal_unstable: Option<Lrc<[Symbol]>>,
edition: Edition,
},
/// An attribute-like procedural macro. TokenStream, TokenStream -> TokenStream.
/// The first TokenSteam is the attribute, the second is the annotated item.
/// Allows modification of the input items and adding new items, similar to
/// MultiModifier, but uses TokenStreams, rather than AST nodes.
AttrProcMacro(Box<dyn AttrProcMacro + sync::Sync + sync::Send>, Edition),
/// A normal, function-like syntax extension.
///
/// `bytes!` is a `NormalTT`.
NormalTT {
expander: Box<dyn TTMacroExpander + sync::Sync + sync::Send>,
def_info: Option<(ast::NodeId, Span)>,
/// Whether the contents of the macro can
/// directly use `#[unstable]` things.
///
/// Only allows things that require a feature gate in the given whitelist
allow_internal_unstable: Option<Lrc<[Symbol]>>,
/// Whether the contents of the macro can use `unsafe`
/// without triggering the `unsafe_code` lint.
allow_internal_unsafe: bool,
/// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`)
/// for a given macro.
local_inner_macros: bool,
/// The macro's feature name if it is unstable, and the stability feature
unstable_feature: Option<(Symbol, u32)>,
/// Edition of the crate in which the macro is defined
edition: Edition,
},
/// A function-like syntax extension that has an extra ident before
/// the block.
IdentTT {
expander: Box<dyn IdentMacroExpander + sync::Sync + sync::Send>,
span: Option<Span>,
allow_internal_unstable: Option<Lrc<[Symbol]>>,
},
/// An attribute-like procedural macro. TokenStream -> TokenStream.
/// The input is the annotated item.
/// Allows generating code to implement a Trait for a given struct
/// or enum item.
ProcMacroDerive(Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
Vec<Symbol> /* inert attribute names */, Edition),
/// An attribute-like procedural macro that derives a builtin trait.
BuiltinDerive(BuiltinDeriveFn),
/// A declarative macro, e.g., `macro m() {}`.
DeclMacro {
expander: Box<dyn TTMacroExpander + sync::Sync + sync::Send>,
def_info: Option<(ast::NodeId, Span)>,
is_transparent: bool,
edition: Edition,
}
}
impl SyntaxExtension {
/// Return which kind of macro calls this syntax extension.
pub fn kind(&self) -> MacroKind {
match *self {
SyntaxExtension::DeclMacro { .. } |
SyntaxExtension::NormalTT { .. } |
SyntaxExtension::IdentTT { .. } |
SyntaxExtension::ProcMacro { .. } =>
MacroKind::Bang,
SyntaxExtension::NonMacroAttr { .. } |
SyntaxExtension::MultiDecorator(..) |
SyntaxExtension::MultiModifier(..) |
SyntaxExtension::AttrProcMacro(..) =>
MacroKind::Attr,
SyntaxExtension::ProcMacroDerive(..) |
SyntaxExtension::BuiltinDerive(..) =>
MacroKind::Derive,
}
}
pub fn default_transparency(&self) -> Transparency {
match *self {
SyntaxExtension::ProcMacro { .. } |
SyntaxExtension::AttrProcMacro(..) |
SyntaxExtension::ProcMacroDerive(..) |
SyntaxExtension::DeclMacro { is_transparent: false, .. } => Transparency::Opaque,
SyntaxExtension::DeclMacro { is_transparent: true, .. } => Transparency::Transparent,
_ => Transparency::SemiTransparent,
}
}
pub fn edition(&self) -> Edition {
match *self {
SyntaxExtension::NormalTT { edition, .. } |
SyntaxExtension::DeclMacro { edition, .. } |
SyntaxExtension::ProcMacro { edition, .. } |
SyntaxExtension::AttrProcMacro(.., edition) |
SyntaxExtension::ProcMacroDerive(.., edition) => edition,
// Unstable legacy stuff
SyntaxExtension::NonMacroAttr { .. } |
SyntaxExtension::IdentTT { .. } |
SyntaxExtension::MultiDecorator(..) |
SyntaxExtension::MultiModifier(..) |
SyntaxExtension::BuiltinDerive(..) => hygiene::default_edition(),
}
}
}
pub type NamedSyntaxExtension = (Name, SyntaxExtension);
pub trait Resolver {
fn next_node_id(&mut self) -> ast::NodeId;
fn get_module_scope(&mut self, id: ast::NodeId) -> Mark;
fn resolve_dollar_crates(&mut self, fragment: &AstFragment);
fn visit_ast_fragment_with_placeholders(&mut self, mark: Mark, fragment: &AstFragment,
derives: &[Mark]);
fn add_builtin(&mut self, ident: ast::Ident, ext: Lrc<SyntaxExtension>);
fn resolve_imports(&mut self);
fn resolve_macro_invocation(&mut self, invoc: &Invocation, invoc_id: Mark, force: bool)
-> Result<Option<Lrc<SyntaxExtension>>, Determinacy>;
fn resolve_macro_path(&mut self, path: &ast::Path, kind: MacroKind, invoc_id: Mark,
derives_in_scope: Vec<ast::Path>, force: bool)
-> Result<Lrc<SyntaxExtension>, Determinacy>;
fn check_unused_macros(&self);
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Determinacy {
Determined,
Undetermined,
}
impl Determinacy {
pub fn determined(determined: bool) -> Determinacy {
if determined { Determinacy::Determined } else { Determinacy::Undetermined }
}
}
pub struct DummyResolver;
impl Resolver for DummyResolver {
fn next_node_id(&mut self) -> ast::NodeId { ast::DUMMY_NODE_ID }
fn get_module_scope(&mut self, _id: ast::NodeId) -> Mark { Mark::root() }
fn resolve_dollar_crates(&mut self, _fragment: &AstFragment) {}
fn visit_ast_fragment_with_placeholders(&mut self, _invoc: Mark, _fragment: &AstFragment,
_derives: &[Mark]) {}
fn add_builtin(&mut self, _ident: ast::Ident, _ext: Lrc<SyntaxExtension>) {}
fn resolve_imports(&mut self) {}
fn resolve_macro_invocation(&mut self, _invoc: &Invocation, _invoc_id: Mark, _force: bool)
-> Result<Option<Lrc<SyntaxExtension>>, Determinacy> {
Err(Determinacy::Determined)
}
fn resolve_macro_path(&mut self, _path: &ast::Path, _kind: MacroKind, _invoc_id: Mark,
_derives_in_scope: Vec<ast::Path>, _force: bool)
-> Result<Lrc<SyntaxExtension>, Determinacy> {
Err(Determinacy::Determined)
}
fn check_unused_macros(&self) {}
}
#[derive(Clone)]
pub struct ModuleData {
pub mod_path: Vec<ast::Ident>,
pub directory: PathBuf,
}
#[derive(Clone)]
pub struct ExpansionData {
pub mark: Mark,
pub depth: usize,
pub module: Rc<ModuleData>,
pub directory_ownership: DirectoryOwnership,
pub crate_span: Option<Span>,
}
/// One of these is made during expansion and incrementally updated as we go;
/// when a macro expansion occurs, the resulting nodes have the `backtrace()
/// -> expn_info` of their expansion context stored into their span.
pub struct ExtCtxt<'a> {
pub parse_sess: &'a parse::ParseSess,
pub ecfg: expand::ExpansionConfig<'a>,
pub root_path: PathBuf,
pub resolver: &'a mut dyn Resolver,
pub current_expansion: ExpansionData,
pub expansions: FxHashMap<Span, Vec<String>>,
}
impl<'a> ExtCtxt<'a> {
pub fn new(parse_sess: &'a parse::ParseSess,
ecfg: expand::ExpansionConfig<'a>,
resolver: &'a mut dyn Resolver)
-> ExtCtxt<'a> {
ExtCtxt {
parse_sess,
ecfg,
root_path: PathBuf::new(),
resolver,
current_expansion: ExpansionData {
mark: Mark::root(),
depth: 0,
module: Rc::new(ModuleData { mod_path: Vec::new(), directory: PathBuf::new() }),
directory_ownership: DirectoryOwnership::Owned { relative: None },
crate_span: None,
},
expansions: FxHashMap::default(),
}
}
/// Returns a `Folder` for deeply expanding all macros in an AST node.
pub fn expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> {
expand::MacroExpander::new(self, false)
}
/// Returns a `Folder` that deeply expands all macros and assigns all node ids in an AST node.
/// Once node ids are assigned, the node may not be expanded, removed, or otherwise modified.
pub fn monotonic_expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> {
expand::MacroExpander::new(self, true)
}
pub fn new_parser_from_tts(&self, tts: &[tokenstream::TokenTree]) -> parser::Parser<'a> {
parse::stream_to_parser(self.parse_sess, tts.iter().cloned().collect())
}
pub fn source_map(&self) -> &'a SourceMap { self.parse_sess.source_map() }
pub fn parse_sess(&self) -> &'a parse::ParseSess { self.parse_sess }
pub fn cfg(&self) -> &ast::CrateConfig { &self.parse_sess.config }
pub fn call_site(&self) -> Span {
match self.current_expansion.mark.expn_info() {
Some(expn_info) => expn_info.call_site,
None => DUMMY_SP,
}
}
pub fn backtrace(&self) -> SyntaxContext {
SyntaxContext::empty().apply_mark(self.current_expansion.mark)
}
/// Returns span for the macro which originally caused the current expansion to happen.
///
/// Stops backtracing at include! boundary.
pub fn expansion_cause(&self) -> Option<Span> {
let mut ctxt = self.backtrace();
let mut last_macro = None;
loop {
if ctxt.outer().expn_info().map_or(None, |info| {
if info.format.name() == "include" {
// Stop going up the backtrace once include! is encountered
return None;
}
ctxt = info.call_site.ctxt();
last_macro = Some(info.call_site);
Some(())
}).is_none() {
break
}
}
last_macro
}
pub fn struct_span_warn<S: Into<MultiSpan>>(&self,
sp: S,
msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.struct_span_warn(sp, msg)
}
pub fn struct_span_err<S: Into<MultiSpan>>(&self,
sp: S,
msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.struct_span_err(sp, msg)
}
pub fn struct_span_fatal<S: Into<MultiSpan>>(&self,
sp: S,
msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.struct_span_fatal(sp, msg)
}
/// Emit `msg` attached to `sp`, and stop compilation immediately.
///
/// `span_err` should be strongly preferred where-ever possible:
/// this should *only* be used when:
///
/// - continuing has a high risk of flow-on errors (e.g., errors in
/// declaring a macro would cause all uses of that macro to
/// complain about "undefined macro"), or
/// - there is literally nothing else that can be done (however,
/// in most cases one can construct a dummy expression/item to
/// substitute; we never hit resolve/type-checking so the dummy
/// value doesn't have to match anything)
pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
self.parse_sess.span_diagnostic.span_fatal(sp, msg).raise();
}
/// Emit `msg` attached to `sp`, without immediately stopping
/// compilation.
///
/// Compilation will be stopped in the near future (at the end of
/// the macro expansion phase).
pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) |
pub fn span_err_with_code<S: Into<MultiSpan>>(&self, sp: S, msg: &str, code: DiagnosticId) {
self.parse_sess.span_diagnostic.span_err_with_code(sp, msg, code);
}
pub fn mut_span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.mut_span_err(sp, msg)
}
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
self.parse_sess.span_diagnostic.span_warn(sp, msg);
}
pub fn span_unimpl<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
self.parse_sess.span_diagnostic.span_unimpl(sp, msg);
}
pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
self.parse_sess.span_diagnostic.span_bug(sp, msg);
}
pub fn trace_macros_diag(&mut self) {
for (sp, notes) in self.expansions.iter() {
let mut db = self.parse_sess.span_diagnostic.span_note_diag(*sp, "trace_macro");
for note in notes {
db.note(note);
}
db.emit();
}
// Fixme: does this result in errors?
self.expansions.clear();
}
pub fn bug(&self, msg: &str) -> ! {
self.parse_sess.span_diagnostic.bug(msg);
}
pub fn trace_macros(&self) -> bool {
self.ecfg.trace_mac
}
pub fn set_trace_macros(&mut self, x: bool) {
self.ecfg.trace_mac = x
}
pub fn ident_of(&self, st: &str) -> ast::Ident {
ast::Ident::from_str(st)
}
pub fn std_path(&self, components: &[&str]) -> Vec<ast::Ident> {
let def_site = DUMMY_SP.apply_mark(self.current_expansion.mark);
iter::once(Ident::new(keywords::DollarCrate.name(), def_site))
.chain(components.iter().map(|s| self.ident_of(s)))
.collect()
}
pub fn name_of(&self, st: &str) -> ast::Name {
Symbol::intern(st)
}
pub fn check_unused_macros(&self) {
self.resolver.check_unused_macros();
}
}
/// Extract a string literal from the macro expanded version of `expr`,
/// emitting `err_msg` if `expr` is not a string literal. This does not stop
/// compilation on error, merely emits a non-fatal error and returns None.
pub fn expr_to_spanned_string<'a>(
cx: &'a mut ExtCtxt<'_>,
mut expr: P<ast::Expr>,
err_msg: &str,
) -> Result<Spanned<(Symbol, ast::StrStyle)>, Option<DiagnosticBuilder<'a>>> {
// Update `expr.span`'s ctxt now in case expr is an `include!` macro invocation.
expr.span = expr.span.apply_mark(cx.current_expansion.mark);
// we want to be able to handle e.g., `concat!("foo", "bar")`
cx.expander().visit_expr(&mut expr);
Err(match expr.node {
ast::ExprKind::Lit(ref l) => match l.node {
ast::LitKind::Str(s, style) => return Ok(respan(expr.span, (s, style))),
_ => Some(cx.struct_span_err(l.span, err_msg))
},
ast::ExprKind::Err => None,
_ => Some(cx.struct_span_err(expr.span, err_msg))
})
}
pub fn expr_to_string(cx: &mut ExtCtxt<'_>, expr: P<ast::Expr>, err_msg: &str)
-> Option<(Symbol, ast::StrStyle)> {
expr_to_spanned_string(cx, expr, err_msg)
.map_err(|err| err.map(|mut err| err.emit()))
.ok()
.map(|s| s.node)
}
/// Non-fatally assert that `tts` is empty. Note that this function
/// returns even when `tts` is non-empty, macros that *need* to stop
/// compilation should call
/// `cx.parse_sess.span_diagnostic.abort_if_errors()` (this should be
/// done as rarely as possible).
pub fn check_zero_tts(cx: &ExtCtxt<'_>,
sp: Span,
tts: &[tokenstream::TokenTree],
name: &str) {
if !tts.is_empty() {
cx.span_err(sp, &format!("{} takes no arguments", name));
}
}
/// Interpreting `tts` as a comma-separated sequence of expressions,
/// expect exactly one string literal, or emit an error and return None.
pub fn get_single_str_from_tts(cx: &mut ExtCtxt<'_>,
sp: Span,
tts: &[tokenstream::TokenTree],
name: &str)
-> Option<String> {
let mut p = cx.new_parser_from_tts(tts);
if p.token == token::Eof {
cx.span_err(sp, &format!("{} takes 1 argument", name));
return None
}
let ret = panictry!(p.parse_expr());
let _ = p.eat(&token::Comma);
if p.token != token::Eof {
cx.span_err(sp, &format!("{} takes 1 argument", name));
}
expr_to_string(cx, ret, "argument must be a string literal").map(|(s, _)| {
s.to_string()
})
}
/// Extract comma-separated expressions from `tts`. If there is a
/// parsing error, emit a non-fatal error and return None.
pub fn get_exprs_from_tts(cx: &mut ExtCtxt<'_>,
sp: Span,
tts: &[tokenstream::TokenTree]) -> Option<Vec<P<ast::Expr>>> {
let mut p = cx.new_parser_from_tts(tts);
let mut es = Vec::new();
while p.token != token::Eof {
let mut expr = panictry!(p.parse_expr());
cx.expander().visit_expr(&mut expr);
es.push(expr);
if p.eat(&token::Comma) {
continue;
}
if p.token != token::Eof {
cx.span_err(sp, "expected token: `,`");
return None;
}
}
Some(es)
}
| {
self.parse_sess.span_diagnostic.span_err(sp, msg);
} |
client_side_middleware.py | # Copyright 2020-present Kensho Technologies, LLC.
"""Implementing client-side grpc interceptors"""
import functools
import json
import backoff
import grpc
import prometheus_client
CLIENTSIDE_METRICS_HISTO = prometheus_client.Histogram(
"clientside_grpc_endpoint",
"Response time histogram for grpc endpoints from the client-side",
labelnames=("client_name", "server_name", "service", "endpoint"),
)
CLIENTSIDE_ERROR_COUNTER = prometheus_client.Counter(
"clientside_grpc_endpoint_error",
"Clientside exception counts for grpc methods",
labelnames=("client_name", "server_name", "service", "endpoint", "exception"),
)
GRPC_RENDEZVOUS_ERROR = "_Rendezvous"
def get_service_and_method_from_url(method_url):
"""Extract service and method names from the method url string.
Returns strings that are applicable as prometheus metrics and/or labels.
Args:
method_url: string
Returns:
tuple(service_name, method_name)
"""
name_parts = method_url.split("/")
if len(name_parts) != 3 or name_parts[0] != "" or name_parts[1] == "" or name_parts[2] == "":
raise AssertionError("Invalid method name: {}".format(method_url))
return (name_parts[1].replace(".", "_"), name_parts[2].replace(".", "_"))
class GRPCClientGeneralInterceptor(
grpc.UnaryUnaryClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamStreamClientInterceptor,
):
"""General GRPC client interceptor that intercepts all functions."""
def __init__(self, decorator_fn):
"""Initialize interceptor with a factory function producing decorators."""
super(GRPCClientGeneralInterceptor, self).__init__()
self._decorator_fn = decorator_fn
def _intercept_call(self, continuation, client_call_details, request_or_iterator):
"""Interceptor implementation."""
metadata = _get_metadata_map_from_client_details(client_call_details)
decorator = self._decorator_fn(client_call_details.method, metadata)
if not decorator:
handler = continuation
else:
handler = decorator(continuation)
return handler(client_call_details, request_or_iterator)
def intercept_unary_unary(self, continuation, client_call_details, request):
"""Intercept unary-unary."""
return self._intercept_call(continuation, client_call_details, request)
def intercept_stream_unary(self, continuation, client_call_details, request_iterator):
"""Intercept stream-unary."""
return self._intercept_call(continuation, client_call_details, request_iterator)
def intercept_unary_stream(self, continuation, client_call_details, request):
"""Intercept unary-stream."""
return self._intercept_call(continuation, client_call_details, request)
def intercept_stream_stream(self, continuation, client_call_details, request_iterator):
"""Intercept stream-stream."""
return self._intercept_call(continuation, client_call_details, request_iterator)
class GRPCClientUnaryOutputInterceptor(
grpc.UnaryUnaryClientInterceptor, grpc.StreamUnaryClientInterceptor
):
"""GRPC interceptor that makes intercepts only unary-output grpcs."""
def __init__(self, decorator_fn):
"""Initialize interceptor with a factory function producing decorators."""
super(GRPCClientUnaryOutputInterceptor, self).__init__()
self._decorator_fn = decorator_fn
def _intercept_call(self, continuation, client_call_details, request_or_iterator):
"""Interceptor implementation"""
metadata = _get_metadata_map_from_client_details(client_call_details)
decorator = self._decorator_fn(client_call_details.method, metadata)
if not decorator:
handler = continuation
else:
handler = decorator(continuation)
return handler(client_call_details, request_or_iterator)
def intercept_unary_unary(self, continuation, client_call_details, request):
"""Intercept unary-unary."""
return self._intercept_call(continuation, client_call_details, request)
def intercept_stream_unary(self, continuation, client_call_details, request_iterator):
"""Intercept stream-unary."""
return self._intercept_call(continuation, client_call_details, request_iterator)
class GRPCClientMiddleware(object):
"""Base class for GRPC client-side middleware. |
Which takes a string method name, and dict of rpc leading metadata and
returns a decorator that can be applied to the underlying rpc method.
Additionally:
__init__ is guaranteed to be called before the server is started.
get_interceptors(self) will be called to retrieve all GRPC interceptors
necessary for the middleware. Users may extend this method to include
additional interceptors.
"""
def __init__(self, client_label, server_label, interceptor_class):
"""Initialize"""
super(GRPCClientMiddleware, self).__init__()
self._server_label = server_label
self._client_label = client_label
self._interceptor_class = interceptor_class
@property
def server_label(self):
"""Get server label."""
return self._server_label
@property
def client_label(self):
"""Get client label."""
return self._client_label
def get_interceptors(self):
"""Get a list of interceptors needed by the middleware."""
return [self._interceptor_class(self.get_decorator)]
class ClientSideMetricsMiddleware(GRPCClientMiddleware):
"""GRPC middleware that captures prometheus metrics."""
def __init__(self, client_label, server_label):
"""Initialize"""
super(ClientSideMetricsMiddleware, self).__init__(
client_label, server_label, GRPCClientGeneralInterceptor
)
class Timer(object):
"""Decorator that wraps a function in a prometheus histogram."""
def __init__(self, histogram):
"""Initializes with the histogram object."""
self._histogram = histogram
def __call__(self, fn):
"""Wrap a method with a histogram."""
@functools.wraps(fn)
def wrap(request, context):
"""Inner wrapper."""
with self._histogram.time():
return fn(request, context)
return wrap
def get_decorator(self, method_name, _):
"""Normalize metric name and return decorator that captures metrics."""
service_label, endpoint_label = get_service_and_method_from_url(method_name)
return self.Timer(
CLIENTSIDE_METRICS_HISTO.labels(
client_name=self.client_label,
server_name=self.server_label,
service=service_label,
endpoint=endpoint_label,
)
)
class ClientSideExceptionCountMiddleware(GRPCClientMiddleware):
"""GRPC middleware that captures prometheus metrics for unary outputs."""
def __init__(self, client_label, server_label):
"""Initialize"""
super(ClientSideExceptionCountMiddleware, self).__init__(
client_label, server_label, GRPCClientUnaryOutputInterceptor
)
class Counter(object):
"""Decorator that wraps a function in a exception counter."""
def __init__(self, counter, client_name, server_name, service, endpoint):
"""Initializes with the counter object."""
self._counter = counter
self._client_name = client_name
self._server_name = server_name
self._service = service
self._endpoint = endpoint
def __call__(self, fn):
"""Wrap a method with an exception counter."""
@functools.wraps(fn)
def wrap(request, context):
"""Inner wrapper."""
r = fn(request, context)
if r.exception():
# If we get a Rendezvous error, we want some more information about the type
# of error we are getting. For example, a GRPC timeout error will be labelled as
# exception "_Rendezvous: <StatusCode.DEADLINE_EXCEEDED: 4>". All errors can be
# found at https://grpc.github.io/grpc/python/grpc.html#grpc-status-code
if type(r.exception()).__name__ == GRPC_RENDEZVOUS_ERROR:
exception = GRPC_RENDEZVOUS_ERROR + ": " + repr(r.exception().code())
# No guarantees of status code for other errors--only report error type.
else:
exception = type(r.exception()).__name__
self._counter.labels(
client_name=self._client_name,
server_name=self._server_name,
service=self._service,
endpoint=self._endpoint,
exception=exception,
).inc()
return r
return wrap
def get_decorator(self, method_name, _):
"""Normalize method name and return decorator that captures exceptions"""
service_label, endpoint_label = get_service_and_method_from_url(method_name)
return self.Counter(
CLIENTSIDE_ERROR_COUNTER,
self.client_label,
self.server_label,
service_label,
endpoint_label,
)
class ClientExceptionTranslationMiddlewareUnaryOutput(GRPCClientMiddleware):
"""Translate client exception"""
def __init__(self, client_label, server_label, code_to_exception_class_func):
"""Initialize"""
super(ClientExceptionTranslationMiddlewareUnaryOutput, self).__init__(
client_label, server_label, GRPCClientUnaryOutputInterceptor
)
self._code_to_exception_class_func = code_to_exception_class_func
class Translator(object):
"""Decorator that wraps a function in a exception translator"""
def __init__(self, code_to_exception_class_func):
"""Initializes with the counter object"""
self._code_to_exception_class_func = code_to_exception_class_func
def __call__(self, fn):
"""Wrap a method with an exception counter"""
@functools.wraps(fn)
def wrap(request, context):
"""Execute a function, if an exception is raised, change its type if necessary"""
try:
result = fn(request, context)
if result.code() is grpc.StatusCode.OK:
return result
else:
raise result
except grpc.RpcError as exc:
raise_exception_from_grpc_exception(self._code_to_exception_class_func, exc)
return wrap
def get_decorator(self, method_name, _):
"""Return exception translator decorator"""
return self.Translator(self._code_to_exception_class_func)
class ClientRetryingMiddlewareUnaryOutput(GRPCClientMiddleware):
"""Translate client exception"""
def __init__(self, client_label, server_label, exceptions_to_retry, max_retries):
"""Initialize"""
super(ClientRetryingMiddlewareUnaryOutput, self).__init__(
client_label, server_label, GRPCClientUnaryOutputInterceptor
)
self._exceptions_to_retry = exceptions_to_retry
self._max_retries = max_retries
class Retrier(object):
"""Decorator that wraps a function in a exception translator"""
def __init__(self, exceptions_to_retry, max_retries):
"""Initializes with the counter object"""
self._exceptions_to_retry = exceptions_to_retry
self._max_retries = max_retries
def __call__(self, fn):
"""Wrap a method with an exception counter"""
return backoff.on_exception(backoff.expo, self._exceptions_to_retry, self._max_retries)(
fn
)
def get_decorator(self, method_name, _):
"""Return exception translator decorator"""
return self.Retrier(self._exceptions_to_retry, self._max_retries)
def raise_exception_from_grpc_exception(code_to_exception_class_func, exc):
"""Raise exception from exc, translating with code_to_exception_class_func"""
code = None
details = "[]" # Details are expected to be jsondeserializable
if exc.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
raise TimeoutError()
elif exc.code() == grpc.StatusCode.UNIMPLEMENTED:
raise NotImplementedError()
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
raise ConnectionRefusedError()
for key, value in exc.trailing_metadata():
if key == "error_code":
try:
code = int(value)
except (TypeError, ValueError):
pass
elif key == "error_details":
details = value
if code_to_exception_class_func:
exception_class = code_to_exception_class_func(code)
if exception_class:
exception_args = json.loads(details)
raise exception_class(*exception_args)
raise exc
def _get_metadata_map_from_client_details(client_call_details):
"""Get metadata key->value map from client_call_details"""
metadata = {metadatum[0]: metadatum[1] for metadatum in (client_call_details.metadata or [])}
return metadata |
GRPCMiddleware implementations must provide a get_decorator method:
# def get_decorator(self, method_name, metadata) |
Slogan.tsx | import { FC } from 'react'
export const Slogan: FC = () => {
return (
<svg width="250" height="16" viewBox="0 0 250 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3.06319 4.6258L7.59692 11.5008V4.55635C7.57129 4.20191 7.67334 3.85001 7.88462 3.56428C7.98193 3.4533 8.10278 3.36543 8.23836 3.30708C8.37395 3.24873 8.52085 3.22136 8.66835 3.22698C8.81913 3.21833 8.96989 3.24422 9.10915 3.30267C9.24841 3.36113 9.37249 3.4506 9.47192 3.56428C9.68495 3.84934 9.78878 4.2013 9.76457 4.55635V13.7081C9.76457 14.73 9.33799 15.2409 8.49473 15.2409C8.30089 15.2438 8.10797 15.2136 7.9243 15.1516C7.73962 15.0913 7.57034 14.9914 7.42827 14.8589C7.27204 14.7217 7.13222 14.5669 7.0116 14.3976C6.88263 14.219 6.75863 14.0405 6.62966 13.852L2.21001 7.07123V13.8917C2.23179 14.2496 2.12049 14.6029 1.89751 14.8837C1.79738 14.9939 1.67461 15.081 1.53762 15.1393C1.40062 15.1975 1.25265 15.2254 1.10386 15.221C0.953504 15.2283 0.803454 15.2013 0.66509 15.142C0.526726 15.0827 0.403677 14.9927 0.305252 14.8788C0.0886848 14.5951 -0.0187027 14.243 0.00267261 13.8867V4.90852C-0.0108184 4.60382 0.0329237 4.29929 0.131638 4.01071C0.231537 3.76843 0.405057 3.56368 0.62767 3.42539C0.819467 3.30444 1.03699 3.23021 1.26271 3.20867C1.48844 3.18713 1.71606 3.21889 1.92727 3.30139C2.07474 3.37011 2.20905 3.46412 2.3241 3.57916C2.45632 3.71801 2.57276 3.87106 2.67132 4.03552L3.06319 4.6258Z" fill="white"/>
<path d="M13.9858 10.2409V4.64563C13.9549 4.26067 14.07 3.87818 14.3082 3.5742C14.4141 3.4566 14.5445 3.36367 14.6902 3.30196C14.8359 3.24025 14.9934 3.21126 15.1515 3.21705C15.3143 3.20924 15.4768 3.23717 15.6276 3.29887C15.7784 3.36057 15.9139 3.45454 16.0245 3.5742C16.2588 3.87999 16.3719 4.26156 16.342 4.64563V10.3698C16.3279 10.9223 16.4033 11.4734 16.5652 12.0018C16.6965 12.4207 16.9702 12.7804 17.339 13.0186C17.8131 13.2851 18.3535 13.4108 18.8965 13.3808C19.2566 13.4177 19.6203 13.3701 19.9587 13.2418C20.2971 13.1135 20.6009 12.908 20.8459 12.6417C21.2757 11.9821 21.4762 11.1994 21.4164 10.4145V4.64563C21.3862 4.25921 21.5011 3.87543 21.7388 3.56925C21.845 3.45106 21.9763 3.3581 22.1231 3.29716C22.2698 3.23623 22.4283 3.20886 22.587 3.21705C22.747 3.21013 22.9066 3.23805 23.0548 3.29886C23.2029 3.35966 23.3361 3.45192 23.4451 3.56925C23.6927 3.87063 23.812 4.25714 23.7775 4.64563V10.2409C23.7951 11.0102 23.705 11.7783 23.5096 12.5226C23.3306 13.1369 22.9877 13.6908 22.5176 14.1248C22.0923 14.519 21.5837 14.8123 21.0295 14.9829C20.3858 15.1739 19.7166 15.2642 19.0453 15.2508C18.2655 15.2696 17.487 15.1761 16.7338 14.973C16.1412 14.8105 15.5988 14.5018 15.1565 14.0752C14.7233 13.6351 14.4121 13.0897 14.2537 12.4929C14.0576 11.7588 13.9674 11.0005 13.9858 10.2409V10.2409ZM19.214 0.379752H20.2854C20.5533 0.379752 20.6922 0.419445 20.6922 0.49881C20.5686 0.685272 20.4182 0.852427 20.2457 0.994842C19.9481 1.29742 19.6505 1.58512 19.3479 1.8629C19.1558 2.04977 18.9447 2.21603 18.718 2.35893C18.4188 2.51227 18.0869 2.59058 17.7507 2.5871C17.5225 2.5871 17.4084 2.52262 17.4084 2.38869C17.4407 2.2192 17.5121 2.05956 17.6168 1.92242L17.9739 1.25277C18.1016 0.971981 18.2856 0.720423 18.5146 0.51369C18.7299 0.402452 18.9728 0.355926 19.214 0.379752V0.379752Z" fill="white"/>
<path d="M37.8697 11.3768C37.859 11.7931 37.7646 12.2031 37.5919 12.5821C37.3851 13.0539 37.0943 13.4841 36.7338 13.852C36.3118 14.282 35.805 14.6199 35.2457 14.844C34.5693 15.1137 33.8458 15.2453 33.1177 15.2309C32.5558 15.2357 31.9951 15.1775 31.4461 15.0573C30.9624 14.9485 30.4987 14.7644 30.0721 14.5117C29.6441 14.2441 29.2622 13.9091 28.9412 13.5196C28.629 13.1461 28.3657 12.7344 28.1574 12.2944C27.9339 11.8336 27.7673 11.3472 27.6614 10.846C27.5514 10.3074 27.4982 9.75881 27.5027 9.20913C27.4883 8.35336 27.626 7.50184 27.9094 6.69425C28.1621 5.98589 28.5609 5.33862 29.08 4.79445C29.5813 4.26852 30.1905 3.85731 30.8658 3.58908C31.5596 3.31129 32.3009 3.17145 33.0483 3.17738C33.9207 3.16266 34.7842 3.35474 35.5681 3.7379C36.2359 4.05972 36.8161 4.53786 37.2596 5.13175C37.6126 5.58086 37.8172 6.12865 37.8449 6.69921C37.8469 6.96644 37.7456 7.2241 37.5622 7.41845C37.4749 7.51606 37.3678 7.59397 37.2481 7.647C37.1283 7.70002 36.9987 7.72696 36.8677 7.726C36.6236 7.74628 36.3813 7.66965 36.1931 7.51269C35.9908 7.29326 35.8235 7.04398 35.6971 6.77361C35.4463 6.25974 35.0733 5.81521 34.6108 5.47896C34.1566 5.18043 33.6213 5.02969 33.0781 5.04742C32.6317 5.03352 32.1885 5.12668 31.7855 5.31912C31.3825 5.51156 31.0314 5.79769 30.7616 6.15357C30.1316 7.07271 29.8277 8.17643 29.8985 9.2885C29.8752 10.0603 30.0051 10.829 30.2804 11.5504C30.4949 12.0985 30.8723 12.5677 31.3618 12.8946C31.8526 13.1991 32.4213 13.3542 32.9987 13.3411C33.6091 13.3634 34.2106 13.1896 34.715 12.845C35.2137 12.4703 35.5845 11.9505 35.7765 11.3569C35.8572 11.088 35.983 10.8348 36.1485 10.6079C36.2362 10.5067 36.3467 10.4278 36.4709 10.3778C36.5951 10.3278 36.7294 10.308 36.8628 10.3202C36.997 10.3175 37.1304 10.342 37.2549 10.3923C37.3794 10.4426 37.4924 10.5176 37.587 10.6129C37.6855 10.713 37.7616 10.8328 37.8104 10.9645C37.8591 11.0962 37.8793 11.2367 37.8697 11.3768Z" fill="white"/>
<path d="M43.9263 4.64563V13.1179H48.7031C49.0205 13.0948 49.3346 13.1942 49.581 13.3956C49.6774 13.4844 49.7543 13.5921 49.8069 13.7121C49.8595 13.832 49.8867 13.9616 49.8867 14.0926C49.8867 14.2235 49.8595 14.3531 49.8069 14.4731C49.7543 14.593 49.6774 14.7008 49.581 14.7895C49.3279 14.9803 49.0144 15.0737 48.6981 15.0524H43.0235C42.8252 15.092 42.6203 15.0822 42.4267 15.0238C42.2332 14.9654 42.057 14.8602 41.9138 14.7175C41.7706 14.5748 41.6648 14.3989 41.6057 14.2056C41.5466 14.0122 41.5361 13.8073 41.5751 13.6089V4.64563C41.5466 4.26093 41.6615 3.8793 41.8975 3.5742C42.0034 3.4566 42.1338 3.36367 42.2795 3.30196C42.4252 3.24025 42.5826 3.21126 42.7408 3.21706C42.9015 3.20974 43.062 3.23746 43.211 3.29827C43.36 3.35908 43.4941 3.45154 43.6039 3.56925C43.8433 3.87452 43.9584 4.25901 43.9263 4.64563Z" fill="white"/>
<path d="M60.7318 5.23095H55.4243V8.09306H60.3151C60.6047 8.0707 60.8924 8.15542 61.1237 8.33114C61.2103 8.41315 61.2785 8.51259 61.3238 8.62289C61.3691 8.73319 61.3905 8.85184 61.3866 8.97103C61.3916 9.09137 61.3712 9.21143 61.3268 9.32339C61.2823 9.43534 61.2148 9.53668 61.1286 9.62083C60.8984 9.803 60.6079 9.89157 60.3151 9.86884H55.4243V13.1823H60.9203C61.2214 13.1601 61.5201 13.2502 61.7586 13.4353C61.8523 13.5233 61.9261 13.6304 61.9749 13.7493C62.0237 13.8683 62.0463 13.9963 62.0413 14.1248C62.0458 14.2503 62.0227 14.3752 61.9739 14.4908C61.9251 14.6065 61.8516 14.7101 61.7586 14.7944C61.5219 14.9835 61.2223 15.0757 60.9203 15.0524H54.5066C54.3087 15.0911 54.1044 15.0806 53.9115 15.0217C53.7186 14.9629 53.5432 14.8575 53.4006 14.715C53.258 14.5724 53.1527 14.3969 53.0938 14.2041C53.035 14.0112 53.0245 13.8068 53.0632 13.6089V4.85892C53.053 4.57533 53.1037 4.29286 53.212 4.03056C53.3068 3.8216 53.4742 3.65415 53.6832 3.55932C53.9451 3.45611 54.2252 3.40718 54.5066 3.41548H60.7318C61.0322 3.39226 61.3307 3.48056 61.5701 3.6635C61.6568 3.75011 61.7256 3.85295 61.7725 3.96615C61.8194 4.07935 61.8436 4.20068 61.8436 4.32322C61.8436 4.44575 61.8194 4.56708 61.7725 4.68028C61.7256 4.79348 61.6568 4.89632 61.5701 4.98293C61.3307 5.16587 61.0322 5.25417 60.7318 5.23095V5.23095Z" fill="white"/>
<path d="M70.6525 3.21706C71.7356 3.18536 72.808 3.43851 73.7626 3.95119C74.6259 4.42286 75.3198 5.15316 75.7467 6.03948C76.2133 7.03313 76.4426 8.12162 76.4164 9.21905C76.4258 10.0502 76.3019 10.8775 76.0493 11.6694C75.8223 12.3775 75.4488 13.0299 74.9531 13.5841C74.4586 14.1309 73.8439 14.5554 73.1574 14.8242C72.3807 15.1222 71.5537 15.267 70.7219 15.2508C69.8856 15.2664 69.0545 15.1164 68.2765 14.8093C67.5892 14.5395 66.9731 14.1152 66.4759 13.5693C65.9803 13.0101 65.6084 12.3526 65.3846 11.6397C65.1313 10.8529 65.0057 10.0307 65.0126 9.20417C65.0016 8.3616 65.1324 7.52319 65.3995 6.72401C65.633 6.01745 66.0133 5.3683 66.5156 4.81925C67.0102 4.2912 67.617 3.88097 68.2914 3.61885C69.0462 3.33774 69.8472 3.20143 70.6525 3.21706V3.21706ZM74.0057 9.20417C74.0227 8.4446 73.8826 7.68971 73.594 6.9869C73.3528 6.40563 72.9412 5.91104 72.4134 5.56826C71.8857 5.23843 71.2747 5.06633 70.6525 5.07223C70.2036 5.06797 69.7589 5.15927 69.3479 5.34008C68.9506 5.51779 68.598 5.78221 68.3162 6.1139C68.0043 6.49683 67.7732 6.93889 67.6366 7.4135C67.4606 7.98871 67.3753 8.58781 67.3836 9.18929C67.3757 9.79561 67.461 10.3996 67.6366 10.98C67.7743 11.4681 68.0145 11.9212 68.341 12.3093C68.6264 12.6485 68.984 12.9197 69.3876 13.103C69.8639 13.3072 70.3817 13.396 70.8989 13.3624C71.416 13.3288 71.9179 13.1736 72.3638 12.9095C72.8961 12.5762 73.3161 12.0906 73.5691 11.5157C73.8839 10.787 74.033 9.99748 74.0057 9.20417V9.20417Z" fill="white"/>
<path d="M87.1703 3.41548H90.2407C90.9332 3.40412 91.6245 3.47741 92.2993 3.63374C92.8765 3.78582 93.41 4.07125 93.8568 4.46707C95.0671 5.50873 95.6739 7.08776 95.6772 9.20416C95.6812 9.84532 95.6197 10.4852 95.4937 11.1139C95.3824 11.6595 95.1919 12.1859 94.9282 12.6764C94.6688 13.142 94.3335 13.5611 93.9362 13.9165C93.6321 14.1923 93.2875 14.4198 92.9143 14.5911C92.5337 14.7642 92.1317 14.886 91.7189 14.9532C91.2409 15.0221 90.7584 15.0553 90.2755 15.0524H87.205C86.8631 15.0765 86.5208 15.0097 86.213 14.8589C86.1099 14.7995 86.0195 14.7202 85.9472 14.6257C85.8748 14.5312 85.8219 14.4234 85.7913 14.3083C85.7183 14.0098 85.6849 13.7029 85.6921 13.3956V4.85892C85.6561 4.45952 85.7791 4.06205 86.0344 3.75278C86.1905 3.62176 86.3719 3.52422 86.5673 3.4662C86.7627 3.40817 86.968 3.39091 87.1703 3.41548V3.41548ZM88.083 5.28055V13.1823H89.8737C90.18 13.1865 90.4862 13.1749 90.7913 13.1476C91.0285 13.1234 91.2617 13.0701 91.4858 12.9889C91.7116 12.9098 91.9203 12.7886 92.1009 12.6317C92.8879 11.9704 93.2831 10.8212 93.2864 9.18432C93.3428 8.29367 93.1632 7.40392 92.7655 6.60496C92.4892 6.09495 92.0331 5.70596 91.4858 5.51369C90.8852 5.34721 90.2637 5.26869 89.6406 5.28055H88.083Z" fill="white"/>
<path d="M107.066 5.23094H101.749V8.09305H106.645C106.932 8.07147 107.218 8.15617 107.448 8.33114C107.536 8.41209 107.606 8.51122 107.652 8.62169C107.698 8.73216 107.72 8.85134 107.716 8.97102C107.72 9.0917 107.699 9.21189 107.653 9.32382C107.608 9.43575 107.54 9.53693 107.453 9.62082C107.225 9.80312 106.936 9.89178 106.645 9.86884H101.749V13.1823H107.245C107.546 13.16 107.845 13.2502 108.083 13.4353C108.177 13.5233 108.251 13.6304 108.299 13.7493C108.348 13.8683 108.371 13.9963 108.366 14.1248C108.37 14.2503 108.347 14.3752 108.298 14.4908C108.25 14.6065 108.176 14.7101 108.083 14.7944C107.846 14.9835 107.547 15.0757 107.245 15.0524H100.836C100.437 15.0892 100.04 14.9681 99.7298 14.7151C99.4782 14.404 99.3573 14.0074 99.3925 13.6089V4.85892C99.3812 4.57477 99.4337 4.29169 99.5463 4.03055C99.6396 3.82053 99.8075 3.6526 100.018 3.55932C100.278 3.45555 100.556 3.40659 100.836 3.41548H107.066C107.365 3.39303 107.662 3.48131 107.899 3.6635C107.986 3.7501 108.055 3.85294 108.102 3.96614C108.149 4.07934 108.173 4.20067 108.173 4.32321C108.173 4.44575 108.149 4.56708 108.102 4.68028C108.055 4.79348 107.986 4.89632 107.899 4.98293C107.662 5.16511 107.365 5.25339 107.066 5.23094Z" fill="white"/>
<path d="M125.003 13.7379L124.447 12.2746H119.715L119.159 13.7627C119.022 14.177 118.836 14.5734 118.604 14.9432C118.512 15.0507 118.395 15.1343 118.264 15.187C118.133 15.2397 117.991 15.2599 117.85 15.2458C117.704 15.2476 117.56 15.22 117.425 15.1646C117.29 15.1092 117.168 15.0271 117.066 14.9234C116.96 14.8314 116.875 14.718 116.816 14.5906C116.757 14.4632 116.725 14.3247 116.724 14.1843C116.725 14.0159 116.752 13.8486 116.803 13.6883C116.853 13.5196 116.942 13.2865 117.066 12.9839L120.042 5.4244C120.127 5.21111 120.231 4.92838 120.345 4.64068C120.447 4.37727 120.575 4.12468 120.727 3.8867C120.861 3.68109 121.043 3.51094 121.258 3.39067C121.617 3.18836 122.038 3.1271 122.44 3.21881C122.842 3.31053 123.195 3.54865 123.43 3.8867C123.565 4.08503 123.679 4.29603 123.773 4.51667C123.862 4.74485 123.981 5.04245 124.125 5.41944L127.165 12.9294C127.356 13.3221 127.477 13.7451 127.523 14.1794C127.519 14.319 127.488 14.4565 127.43 14.5835C127.372 14.7105 127.289 14.8245 127.185 14.9185C127.08 15.0276 126.954 15.1139 126.814 15.172C126.674 15.23 126.523 15.2585 126.372 15.2557C126.217 15.2624 126.063 15.2326 125.921 15.1689C125.78 15.1051 125.656 15.0092 125.558 14.8887C125.442 14.7254 125.345 14.5489 125.271 14.3629C125.166 14.1248 125.077 13.9165 125.003 13.7379ZM120.335 10.5087H123.807L122.056 5.70714L120.335 10.5087Z" fill="white"/>
<path d="M133.187 4.64563V13.1179H137.969C138.286 13.0948 138.601 13.1942 138.847 13.3956C138.945 13.4823 139.023 13.5894 139.075 13.7094C139.127 13.8293 139.153 13.9593 139.15 14.0901C139.155 14.2218 139.13 14.3531 139.078 14.4741C139.025 14.5951 138.946 14.7029 138.847 14.7895C138.596 14.9808 138.284 15.0743 137.969 15.0524H132.275C132.077 15.0911 131.872 15.0806 131.679 15.0217C131.487 14.9629 131.311 14.8575 131.168 14.715C131.026 14.5724 130.921 14.3969 130.862 14.2041C130.803 14.0112 130.792 13.8068 130.831 13.6089V4.64563C130.803 4.26093 130.917 3.8793 131.153 3.5742C131.259 3.4566 131.39 3.36367 131.535 3.30196C131.681 3.24025 131.839 3.21126 131.997 3.21706C132.158 3.20974 132.318 3.23746 132.467 3.29827C132.616 3.35908 132.75 3.45154 132.86 3.56925C133.101 3.87381 133.218 4.25836 133.187 4.64563V4.64563Z" fill="white"/>
<path d="M149.998 5.23095H144.71V8.09306H149.601C149.889 8.07053 150.175 8.15533 150.404 8.33114C150.493 8.4121 150.562 8.51123 150.609 8.6217C150.655 8.73216 150.677 8.85135 150.672 8.97103C150.676 9.0917 150.655 9.2119 150.61 9.32383C150.565 9.43576 150.496 9.53693 150.409 9.62083C150.181 9.80313 149.892 9.89179 149.601 9.86884H144.71V13.1823H150.166C150.466 13.1599 150.763 13.2501 151 13.4353C151.093 13.5233 151.167 13.6304 151.216 13.7493C151.265 13.8683 151.287 13.9963 151.282 14.1248C151.287 14.2503 151.264 14.3752 151.215 14.4908C151.166 14.6065 151.093 14.7101 151 14.7944C150.765 14.9837 150.467 15.0759 150.166 15.0524H143.773C143.574 15.092 143.369 15.0822 143.176 15.0238C142.982 14.9654 142.806 14.8602 142.663 14.7175C142.52 14.5748 142.414 14.3989 142.355 14.2056C142.296 14.0122 142.285 13.8073 142.324 13.6089V4.85892C142.313 4.57477 142.365 4.29169 142.478 4.03056C142.571 3.82053 142.739 3.6526 142.949 3.55932C143.211 3.45515 143.491 3.40619 143.773 3.41548H149.998C150.297 3.3916 150.594 3.48003 150.831 3.6635C150.919 3.74943 150.989 3.85204 151.036 3.96533C151.084 4.07862 151.109 4.2003 151.109 4.32322C151.109 4.44613 151.084 4.56781 151.036 4.6811C150.989 4.79439 150.919 4.897 150.831 4.98293C150.594 5.1664 150.297 5.25483 149.998 5.23095Z" fill="white"/>
<path d="M154.894 13.8173V4.64563C154.864 4.26002 154.981 3.87747 155.221 3.5742C155.326 3.45752 155.456 3.36516 155.601 3.30349C155.746 3.24183 155.902 3.21233 156.059 3.21706C156.22 3.20974 156.38 3.23746 156.529 3.29827C156.679 3.35908 156.813 3.45154 156.922 3.56925C157.165 3.87309 157.284 4.25771 157.255 4.64563V13.8173C157.284 14.2051 157.165 14.5896 156.922 14.8936C156.813 15.012 156.679 15.1052 156.53 15.1668C156.381 15.2285 156.22 15.2571 156.059 15.2508C155.903 15.2551 155.747 15.2254 155.603 15.1638C155.459 15.1021 155.331 15.0099 155.226 14.8936C154.983 14.5898 154.864 14.2052 154.894 13.8173V13.8173Z" fill="white"/>
<path d="M169.07 5.35992H166.501V13.8173C166.532 14.2061 166.415 14.5925 166.173 14.8986C166.068 15.0147 165.938 15.1063 165.793 15.1671C165.648 15.2279 165.492 15.2565 165.335 15.2508C165.175 15.2567 165.016 15.2278 164.869 15.1661C164.722 15.1045 164.59 15.0115 164.482 14.8936C164.238 14.5905 164.119 14.2054 164.149 13.8173V5.35992H161.575C161.251 5.38566 160.93 5.2902 160.672 5.09206C160.576 5.00346 160.5 4.8951 160.449 4.77438C160.399 4.65366 160.375 4.52345 160.38 4.39267C160.374 4.25812 160.398 4.12396 160.452 4.00027C160.505 3.87657 160.585 3.76654 160.687 3.67838C160.941 3.48554 161.257 3.39206 161.575 3.41548H169.065C169.392 3.38915 169.717 3.48453 169.978 3.68333C170.076 3.77277 170.154 3.8826 170.205 4.00511C170.256 4.12763 170.28 4.2599 170.275 4.39267C170.279 4.52426 170.254 4.65503 170.202 4.77585C170.149 4.89666 170.071 5.00459 169.973 5.09206C169.715 5.28892 169.394 5.38424 169.07 5.35992V5.35992Z" fill="white"/>
<path d="M179.725 13.7379L179.169 12.2746H174.437L173.882 13.7627C173.744 14.177 173.558 14.5734 173.326 14.9433C173.234 15.05 173.117 15.1332 172.986 15.1858C172.855 15.2385 172.713 15.259 172.572 15.2458C172.427 15.2471 172.282 15.2192 172.148 15.1638C172.013 15.1084 171.891 15.0267 171.788 14.9234C171.682 14.8314 171.597 14.718 171.538 14.5906C171.479 14.4632 171.448 14.3247 171.446 14.1843C171.447 14.0159 171.474 13.8486 171.525 13.6883C171.575 13.5196 171.664 13.2865 171.788 12.9839L174.765 5.4244C174.849 5.21111 174.948 4.92838 175.067 4.64068C175.169 4.37727 175.297 4.12468 175.449 3.8867C175.584 3.67944 175.768 3.50896 175.985 3.39067C176.235 3.25942 176.516 3.19614 176.798 3.20714C177.082 3.1962 177.364 3.25945 177.617 3.39067C177.831 3.51316 178.014 3.6828 178.152 3.8867C178.287 4.08404 178.4 4.29524 178.49 4.51667C178.584 4.74485 178.703 5.04245 178.847 5.41944L181.912 12.9244C182.103 13.3172 182.224 13.7402 182.27 14.1744C182.266 14.314 182.235 14.4515 182.177 14.5785C182.119 14.7056 182.036 14.8195 181.932 14.9135C181.827 15.0235 181.699 15.1103 181.559 15.1684C181.418 15.2265 181.266 15.2546 181.114 15.2508C180.96 15.2566 180.806 15.2265 180.666 15.1628C180.526 15.0991 180.402 15.0035 180.305 14.8837C180.189 14.7205 180.092 14.544 180.018 14.3579C179.889 14.1248 179.799 13.9165 179.725 13.7379ZM175.057 10.5087H178.529L176.778 5.70715L175.057 10.5087Z" fill="white"/> | <path d="M243.916 3.21706C244.999 3.18652 246.071 3.4396 247.026 3.95119C247.888 4.42576 248.581 5.15524 249.011 6.03948C249.475 7.03354 249.703 8.12201 249.675 9.21905C249.687 10.0504 249.563 10.8781 249.308 11.6694C249.083 12.3772 248.711 13.0296 248.217 13.5841C247.721 14.1288 247.106 14.5529 246.421 14.8242C245.643 15.1228 244.814 15.2677 243.981 15.2508C243.146 15.2664 242.317 15.1164 241.54 14.8093C240.852 14.5384 240.235 14.1143 239.735 13.5693C239.241 13.0089 238.869 12.3517 238.644 11.6397C238.392 10.8527 238.268 10.0304 238.276 9.20417C238.266 8.36206 238.395 7.52406 238.658 6.72401C238.894 6.01831 239.274 5.36954 239.775 4.81925C240.271 4.29111 240.879 3.88092 241.555 3.61885C242.31 3.33774 243.111 3.20143 243.916 3.21706V3.21706ZM247.265 9.20417C247.284 8.4444 247.144 7.68901 246.853 6.9869C246.614 6.40598 246.204 5.91132 245.677 5.56826C245.149 5.2392 244.538 5.06715 243.916 5.07223C243.467 5.06751 243.023 5.15883 242.612 5.34008C242.213 5.51774 241.859 5.78211 241.575 6.1139C241.265 6.49801 241.034 6.9397 240.896 7.4135C240.721 7.98899 240.638 8.58808 240.647 9.18929C240.638 9.79536 240.722 10.3993 240.896 10.98C241.037 11.4664 241.277 11.9187 241.6 12.3093C241.887 12.6491 242.246 12.9203 242.651 13.103C243.128 13.3072 243.646 13.396 244.163 13.3624C244.68 13.3288 245.182 13.1736 245.628 12.9095C246.16 12.5762 246.58 12.0906 246.833 11.5157C247.145 10.7861 247.294 9.99724 247.269 9.20417H247.265Z" fill="white"/>
</svg>
)
} | <path d="M189.531 13.3014L187.666 5.88572V13.9165C187.689 14.2721 187.584 14.6241 187.369 14.9085C187.266 15.0131 187.144 15.0961 187.009 15.1528C186.874 15.2095 186.729 15.2387 186.582 15.2387C186.436 15.2387 186.291 15.2095 186.156 15.1528C186.021 15.0961 185.899 15.0131 185.796 14.9085C185.58 14.6248 185.472 14.2727 185.494 13.9165V4.71012C185.47 4.5188 185.494 4.32462 185.563 4.1447C185.633 3.96478 185.745 3.80465 185.891 3.67839C186.216 3.48875 186.59 3.39745 186.967 3.41549H187.696C188.02 3.39954 188.344 3.43981 188.653 3.53453C188.851 3.61316 189.01 3.76641 189.095 3.96111C189.229 4.28272 189.336 4.61446 189.417 4.95318L191.114 11.3321L192.8 4.95318C192.885 4.61483 192.994 4.28323 193.128 3.96111C193.169 3.86425 193.23 3.77665 193.305 3.70341C193.381 3.63017 193.471 3.57276 193.569 3.53453C193.879 3.43942 194.203 3.39913 194.526 3.41549H195.256C195.631 3.39727 196.003 3.48862 196.327 3.67839C196.473 3.80409 196.586 3.96414 196.656 4.14422C196.725 4.32429 196.748 4.51874 196.724 4.71012V13.9165C196.747 14.2721 196.641 14.6241 196.426 14.9085C196.327 15.0206 196.203 15.1088 196.065 15.1663C195.927 15.2239 195.777 15.2494 195.628 15.2409C195.483 15.2449 195.339 15.2172 195.206 15.1598C195.074 15.1024 194.955 15.0166 194.859 14.9085C194.639 14.6268 194.531 14.2732 194.556 13.9165V5.88572L192.691 13.3014C192.567 13.7974 192.473 14.1347 192.394 14.3579C192.308 14.5969 192.161 14.8091 191.967 14.973C191.729 15.1715 191.423 15.2709 191.114 15.2508C190.868 15.2593 190.626 15.1974 190.414 15.0722C190.228 14.962 190.074 14.8046 189.968 14.6159C189.857 14.4228 189.772 14.216 189.715 14.0008L189.531 13.3014Z" fill="white"/>
<path d="M208.44 5.23094H203.133V8.09305H208.023C208.311 8.07147 208.597 8.15617 208.827 8.33114C208.915 8.4121 208.985 8.51122 209.031 8.62169C209.077 8.73216 209.099 8.85134 209.095 8.97102C209.099 9.0917 209.078 9.21189 209.032 9.32382C208.987 9.43575 208.919 9.53693 208.832 9.62082C208.604 9.80312 208.315 9.89178 208.023 9.86884H203.133V13.1823H208.624C208.925 13.1601 209.223 13.2502 209.462 13.4353C209.556 13.5233 209.629 13.6304 209.678 13.7493C209.727 13.8683 209.75 13.9963 209.745 14.1248C209.749 14.2503 209.726 14.3752 209.677 14.4908C209.628 14.6065 209.555 14.7101 209.462 14.7944C209.225 14.9835 208.926 15.0757 208.624 15.0524H202.215C202.017 15.0911 201.813 15.0806 201.62 15.0217C201.427 14.9629 201.251 14.8575 201.109 14.715C200.966 14.5724 200.861 14.3969 200.802 14.2041C200.743 14.0112 200.733 13.8068 200.771 13.6089V4.85892C200.761 4.57533 200.812 4.29286 200.92 4.03055C201.022 3.81774 201.199 3.64993 201.416 3.55932C201.676 3.45555 201.955 3.40659 202.235 3.41548H208.46C208.76 3.3913 209.059 3.4797 209.298 3.66349C209.386 3.74942 209.456 3.85203 209.504 3.96533C209.551 4.07862 209.576 4.20029 209.576 4.32321C209.576 4.44613 209.551 4.5678 209.504 4.6811C209.456 4.79439 209.386 4.897 209.298 4.98293C209.054 5.17154 208.747 5.2602 208.44 5.23094V5.23094Z" fill="white"/>
<path d="M216.317 4.6258L220.851 11.5008V4.55635C220.825 4.20192 220.927 3.85001 221.139 3.56428C221.236 3.4533 221.357 3.36543 221.492 3.30708C221.628 3.24873 221.775 3.22136 221.922 3.22699C222.073 3.21833 222.224 3.24422 222.363 3.30267C222.502 3.36113 222.627 3.4506 222.726 3.56428C222.939 3.84934 223.043 4.2013 223.019 4.55635V13.7081C223.019 14.73 222.597 15.2409 221.749 15.2409C221.555 15.2438 221.362 15.2136 221.178 15.1516C220.994 15.09 220.825 14.9903 220.682 14.8589C220.526 14.7217 220.386 14.5669 220.266 14.3976C220.142 14.219 220.013 14.0405 219.889 13.852L215.464 7.07123V13.8917C215.486 14.2496 215.374 14.6029 215.151 14.8837C215.051 14.9939 214.929 15.081 214.792 15.1393C214.655 15.1975 214.507 15.2254 214.358 15.221C214.207 15.2283 214.057 15.2013 213.919 15.142C213.781 15.0827 213.658 14.9927 213.559 14.8788C213.343 14.5951 213.235 14.243 213.257 13.8867V4.90852C213.246 4.60395 213.289 4.29987 213.386 4.01071C213.487 3.76947 213.66 3.56518 213.882 3.4254C214.108 3.27873 214.371 3.20115 214.641 3.20217C214.826 3.19821 215.01 3.23197 215.181 3.30139C215.329 3.3689 215.464 3.46305 215.578 3.57916C215.711 3.71906 215.829 3.87196 215.93 4.03552L216.317 4.6258Z" fill="white"/>
<path d="M234.784 5.35992H232.21V13.8173C232.24 14.2052 232.126 14.5907 231.888 14.8986C231.781 15.0148 231.65 15.1064 231.505 15.1672C231.359 15.228 231.202 15.2565 231.044 15.2508C230.885 15.2564 230.726 15.2273 230.579 15.1657C230.431 15.104 230.299 15.0112 230.191 14.8936C229.95 14.5889 229.833 14.2045 229.864 13.8173V5.35992H227.289C226.967 5.38586 226.647 5.2903 226.392 5.09206C226.298 4.99975 226.223 4.88972 226.173 4.76837C226.122 4.64702 226.096 4.51678 226.096 4.38522C226.096 4.25366 226.122 4.12342 226.173 4.00207C226.223 3.88072 226.298 3.77069 226.392 3.67838C226.646 3.48554 226.961 3.39206 227.279 3.41549H234.774C235.1 3.38992 235.423 3.48529 235.682 3.68333C235.78 3.77278 235.858 3.8826 235.909 4.00511C235.961 4.12763 235.985 4.2599 235.98 4.39267C235.985 4.52444 235.96 4.65565 235.908 4.77668C235.856 4.89771 235.777 5.00548 235.677 5.09206C235.422 5.28701 235.105 5.38224 234.784 5.35992V5.35992Z" fill="white"/> |
dfn_futures.rs | //! This module contains all mechanisms required to enable asynchronous
//! programming in Rust, based on native async Rust capabilities:
//!
//! - the future returned by the asynchronous System API call, and
//! - the kickstarting/waker implementations to advance top level futures on
//! every inter-canister callback call.
use std::{
cell::{RefCell, RefMut},
future::Future,
pin::Pin,
rc::Rc,
task::{Context, Poll, Waker},
};
/// A reference counter wrapper we use with the CallFuture.
/// This is required, because the future we return from the `call` method can
/// either have two owners (the callback closure and the canister runtime) if
/// the underlying system call succeeded, or just one (the canister runtime) it
/// the system call failed.
pub struct RefCounted<T>(Rc<RefCell<T>>);
impl<T> RefCounted<T> {
pub fn new(val: T) -> Self {
RefCounted(Rc::new(RefCell::new(val)))
}
pub fn into_raw(self) -> *const RefCell<T> {
Rc::into_raw(self.0)
}
pub unsafe fn from_raw(ptr: *const RefCell<T>) -> Self {
Self(Rc::from_raw(ptr))
}
pub fn borrow_mut(&self) -> RefMut<'_, T> {
self.0.borrow_mut()
}
}
impl<O, T: Future<Output = O>> Future for RefCounted<T> {
type Output = O;
fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
unsafe { Pin::new_unchecked(&mut *self.0.borrow_mut()) }.poll(ctx)
}
}
impl<T> Clone for RefCounted<T> {
fn clone(&self) -> Self {
RefCounted(Rc::clone(&self.0))
}
}
/// The result type of the CallFuture.
pub(super) type FutureResult = Result<Vec<u8>, (i32, String)>;
/// The Future trait implemenation, returned by the asynchronous inter-canister
/// call.
#[derive(Default)]
pub(super) struct CallFuture {
/// result of the canister call
pub result: Option<FutureResult>,
/// waker (callback)
pub waker: Option<Waker>,
}
impl CallFuture {
pub fn new() -> Self {
CallFuture::default()
}
}
impl Future for CallFuture {
type Output = FutureResult;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(result) = self.result.take() {
return Poll::Ready(result);
}
self.waker = Some(cx.waker().clone());
Poll::Pending
}
}
/// Must be called on every top-level future corresponding to a method call of a
/// canister by the IC.
///
/// Saves the pointer to the future on the heap and kickstarts the future by
/// polling it once. During the polling we also need to provide the waker
/// callback which is triggered after the future made progress. The waker would
/// then. The waker would then poll the future one last time to advance it to
/// the final state. For that, we pass the future pointer to the waker, so that
/// it can be restored into a box from a raw pointer and then dropped if not
/// needed anymore.
///
/// Technically, we store 2 pointers on the heap: the pointer to the future
/// itself, and a pointer to that pointer. The reason for this is that the waker
/// API requires us to pass one thin pointer, while a a pointer to a `dyn Trait`
/// can only be fat. So we create one additional thin pointer, pointing to the
/// fat pointer and pass it instead.
pub fn kickstart<F: 'static + Future<Output = ()>>(future: F) {
let future_ptr = Box::into_raw(Box::new(future));
let future_ptr_ptr: *mut *mut dyn Future<Output = ()> = Box::into_raw(Box::new(future_ptr));
let mut pinned_future = unsafe { Pin::new_unchecked(&mut *future_ptr) };
if let Poll::Ready(_) = pinned_future
.as_mut()
.poll(&mut Context::from_waker(&waker::waker(
future_ptr_ptr as *const (),
)))
{
unsafe {
let _ = Box::from_raw(future_ptr);
let _ = Box::from_raw(future_ptr_ptr);
}
}
}
// This module conatins the implementation of a waker we're using for waking
// top-level futures (the ones returned by canister methods). The waker polls
// the future once and re-pins it on the heap, if it's pending. If the future is
// done, we do nothing. Hence, it will be deallocated once we exit the scope and
// we're not interested in the result, as it can only be a unit `()` if the
// waker was used as intended.
mod waker {
use super::*;
use std::task::{RawWaker, RawWakerVTable, Waker};
type FuturePtr = *mut dyn Future<Output = ()>;
static MY_VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
fn raw_waker(ptr: *const ()) -> RawWaker {
RawWaker::new(ptr, &MY_VTABLE)
}
fn | (ptr: *const ()) -> RawWaker {
raw_waker(ptr)
}
// Our waker will be called only if one of the response callbacks is triggered.
// Then, the waker will restore the future from the pointer we passed into the
// waker inside the `kickstart` method and poll the future again. If the future
// is pending, we leave it on the heap. If it's ready, we deallocate the
// pointer.
unsafe fn wake(ptr: *const ()) {
let boxed_future_ptr_ptr = Box::from_raw(ptr as *mut FuturePtr);
let future_ptr: FuturePtr = *boxed_future_ptr_ptr;
let boxed_future = Box::from_raw(future_ptr);
let mut pinned_future = Pin::new_unchecked(&mut *future_ptr);
if let Poll::Pending = pinned_future
.as_mut()
.poll(&mut Context::from_waker(&waker::waker(ptr)))
{
Box::into_raw(boxed_future_ptr_ptr);
Box::into_raw(boxed_future);
}
}
fn wake_by_ref(_: *const ()) {}
fn drop(_: *const ()) {}
pub fn waker(ptr: *const ()) -> Waker {
unsafe { Waker::from_raw(raw_waker(ptr)) }
}
}
| clone |
api.go | // Copyright ยฉ 2016 Abcum Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this info except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s3
import (
"bytes"
"io"
"strings"
"github.com/abcum/orbit"
"github.com/abcum/otto"
"github.com/abcum/cirrius/cpm/file"
"github.com/abcum/cirrius/cpm/stream"
"github.com/abcum/cirrius/util/args"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type API struct {
orb *orbit.Orbit
api *s3.S3
opt map[string]interface{}
}
func NewAPI(orb *orbit.Orbit, opt map[string]interface{}) *API { |
func (this *API) init() *API {
cnf := &aws.Config{}
if val, ok := this.opt["region"].(string); ok {
cnf.Region = aws.String(val)
}
if val, ok := this.opt["insecure"].(bool); ok {
cnf.DisableSSL = aws.Bool(val)
}
ses := session.Must(session.NewSession())
this.api = s3.New(ses, cnf)
return this
}
func (this *API) Del(bucket, name string) {
ctx := this.orb.Context()
cnf := &s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(name),
}
_, err := this.api.DeleteObjectWithContext(ctx, cnf)
if err != nil {
this.orb.Quit(err)
}
return
}
func (this *API) Get(bucket, name string) *stream.ReadCloser {
ctx := this.orb.Context()
cnf := &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(name),
}
res, err := this.api.GetObjectWithContext(ctx, cnf)
if err != nil {
this.orb.Quit(err)
}
return stream.NewReadCloser(this.orb, res.Body)
}
func (this *API) Put(call otto.FunctionCall) otto.Value {
var rdr io.Reader
args.Size(this.orb, call, 3, 4)
buck := args.String(this.orb, call, 0)
name := args.String(this.orb, call, 1)
body := args.Any(this.orb, call, 2)
opts := args.Object(this.orb, call, 3)
switch val := body.(type) {
case io.ReadSeeker:
val.Seek(0, 0)
rdr = val
case io.Reader:
fil := file.NewTemp(this.orb)
fil.Consume(val)
fil.Seek(0, 0)
rdr = fil
case []byte:
rdr = bytes.NewReader(val)
case string:
rdr = strings.NewReader(val)
}
ctx := this.orb.Context()
cnf := &s3.PutObjectInput{
Bucket: aws.String(buck),
Key: aws.String(name),
Body: aws.ReadSeekCloser(rdr),
}
if val, ok := opts["ACL"].(string); ok {
cnf.ACL = aws.String(val)
}
if val, ok := opts["Tagging"].(string); ok {
cnf.Tagging = aws.String(val)
}
if val, ok := opts["StorageClass"].(string); ok {
cnf.StorageClass = aws.String(val)
}
if val, ok := opts["ServerSideEncryption"].(string); ok {
cnf.StorageClass = aws.String(val)
}
if val, ok := opts["CacheControl"].(string); ok {
cnf.CacheControl = aws.String(val)
}
if val, ok := opts["ContentType"].(string); ok {
cnf.ContentType = aws.String(val)
}
if val, ok := opts["ContentEncoding"].(string); ok {
cnf.ContentEncoding = aws.String(val)
}
if val, ok := opts["ContentDisposition"].(string); ok {
cnf.ContentDisposition = aws.String(val)
}
_, err := this.api.PutObjectWithContext(ctx, cnf)
if err != nil {
this.orb.Quit(err)
}
return otto.UndefinedValue()
}
func (this *API) File(call otto.FunctionCall) otto.Value {
args.Size(this.orb, call, 2, 3)
buck := args.String(this.orb, call, 0)
name := args.String(this.orb, call, 1)
opts := args.Object(this.orb, call, 2)
file := NewFile(this.orb, this.api, buck, name, opts)
return args.Value(this.orb, file)
}
|
return (&API{
orb: orb,
opt: opt,
}).init()
}
|
benchmark.rs | use std::time::Duration;
use criterion::{criterion_group, criterion_main, Criterion};
pub fn day01_benchmark(c: &mut Criterion) {
c.bench_function("day01", |b| b.iter(day01::solve));
}
pub fn day02_benchmark(c: &mut Criterion) {
c.bench_function("day02", |b| b.iter(day02::solve));
}
pub fn day03_benchmark(c: &mut Criterion) {
c.bench_function("day03", |b| b.iter(day03::solve));
}
pub fn day04_benchmark(c: &mut Criterion) {
c.bench_function("day04", |b| b.iter(day04::solve));
}
pub fn day05_benchmark(c: &mut Criterion) {
c.bench_function("day05", |b| b.iter(day05::solve));
}
pub fn day06_benchmark(c: &mut Criterion) {
c.bench_function("day06", |b| b.iter(day06::solve));
}
pub fn day07_benchmark(c: &mut Criterion) {
c.bench_function("day07", |b| b.iter(day07::solve));
}
pub fn day08_benchmark(c: &mut Criterion) {
c.bench_function("day08", |b| b.iter(day08::solve));
}
pub fn day09_benchmark(c: &mut Criterion) { | let mut group = c.benchmark_group("day10");
group.sample_size(10);
group.bench_function("solve", |b| b.iter(day10::solve));
group.finish()
}
pub fn day11_benchmark(c: &mut Criterion) {
c.bench_function("day11", |b| b.iter(day11::solve));
}
pub fn day12_benchmark(c: &mut Criterion) {
c.bench_function("day12", |b| b.iter(day12::solve));
}
pub fn day13_benchmark(c: &mut Criterion) {
c.bench_function("day13", |b| b.iter(day13::solve));
}
pub fn day14_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("day14");
group.sample_size(10);
group.sampling_mode(criterion::SamplingMode::Flat);
group.bench_function("parsing", |b| b.iter(day14::load_input));
let input = day14::load_input();
group.bench_function("part1", |b| b.iter(|| day14::solve_part1(&input)));
group.bench_function("part2", |b| {
b.iter_batched_ref(
|| input.clone(),
|input| day14::solve_part2(input),
criterion::BatchSize::SmallInput,
)
});
group.bench_function("solve", |b| b.iter(day19::solve));
group.finish();
}
pub fn day15_benchmark(c: &mut Criterion) {
c.bench_function("day15", |b| b.iter(day15::solve));
}
pub fn day16_benchmark(c: &mut Criterion) {
c.bench_function("day16", |b| b.iter(day16::solve));
}
pub fn day17_benchmark(c: &mut Criterion) {
c.bench_function("day17", |b| b.iter(day17::solve));
}
pub fn day18_benchmark(c: &mut Criterion) {
c.bench_function("day18", |b| b.iter(day18::solve));
}
pub fn day19_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("day19");
group.sample_size(10);
group.sampling_mode(criterion::SamplingMode::Flat);
group.bench_function("parsing", |b| b.iter(day19::load_input));
let input = day19::load_input();
group.bench_function("part1", |b| {
b.iter(|| day19::solve_part1(input.0, &input.1))
});
group.bench_function("part2", |b| {
b.iter(|| day19::solve_part2(input.0, &input.1))
});
group.bench_function("solve", |b| b.iter(day19::solve));
group.finish();
}
pub fn day20_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("day20");
group.sample_size(10);
group.sampling_mode(criterion::SamplingMode::Flat);
group.bench_function("parsing", |b| b.iter(day20::load_input));
let input = day20::load_input();
group.bench_function("part1", |b| b.iter(|| day20::solve_part1(input)));
group.bench_function("part2", |b| b.iter(|| day20::solve_part2(input)));
group.bench_function("solve", |b| b.iter(day20::solve));
group.finish();
}
pub fn day21_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("day21");
group.bench_function("parsing", |b| b.iter(day21::load_input));
let input = day21::load_input();
group.bench_function("part1", |b| b.iter(|| day21::solve_part1(input)));
group.bench_function("part2", |b| b.iter(|| day21::solve_part2(input)));
group.bench_function("solve", |b| b.iter(day21::solve));
group.finish();
}
pub fn day22_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("day22");
group.bench_function("parsing", |b| b.iter(day22::load_input));
let input = day22::load_input();
group.bench_function("part1", |b| b.iter(|| day22::solve_part1(input.0, input.1)));
group.bench_function("part2", |b| b.iter(|| day22::solve_part2(input.0, input.1)));
group.bench_function("solve", |b| b.iter(day22::solve));
group.finish();
}
pub fn day23_benchmark(c: &mut Criterion) {
c.bench_function("day23", |b| b.iter(day23::solve));
}
pub fn day24_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("day24");
group.sample_size(10);
group.sampling_mode(criterion::SamplingMode::Flat);
group.bench_function("parsing", |b| b.iter(day24::load_input));
let input = day24::load_input();
group.bench_function("part1", |b| b.iter(|| day24::solve_part1(&input)));
group.bench_function("part2", |b| b.iter(|| day24::solve_part2(&input)));
group.bench_function("solve", |b| b.iter(day24::solve));
group.finish();
}
pub fn day25_benchmark(c: &mut Criterion) {
c.bench_function("day25", |b| b.iter(day25::solve));
}
pub fn alldays_benchmark(c: &mut Criterion) {
c.bench_function("all", |b| {
b.iter(|| {
(
day01::solve(),
day02::solve(),
day03::solve(),
day04::solve(),
day05::solve(),
day06::solve(),
day07::solve(),
day08::solve(),
day09::solve(),
day10::solve(),
day11::solve(),
day12::solve(),
day13::solve(),
day14::solve(),
day15::solve(),
day16::solve(),
day17::solve(),
day18::solve(),
day19::solve(),
day20::solve(),
day21::solve(),
day22::solve(),
day23::solve(),
day24::solve(),
day25::solve(),
)
})
});
}
criterion_group! {
name = benches;
config = Criterion::default()
.significance_level(0.1)
.sample_size(350)
.measurement_time(Duration::from_secs(30 / 3))
.warm_up_time(Duration::from_secs(15 / 3))
.noise_threshold(0.05);
targets =
day01_benchmark,
day02_benchmark,
day03_benchmark,
day04_benchmark,
day05_benchmark,
day06_benchmark,
day07_benchmark,
day08_benchmark,
day09_benchmark,
day10_benchmark,
day11_benchmark,
day12_benchmark,
day13_benchmark,
day14_benchmark,
day15_benchmark,
day16_benchmark,
day17_benchmark,
day18_benchmark,
day19_benchmark,
day20_benchmark,
day21_benchmark,
day22_benchmark,
day23_benchmark,
day24_benchmark,
day25_benchmark,
alldays_benchmark,
}
criterion_main!(benches); | c.bench_function("day09", |b| b.iter(day09::solve));
}
pub fn day10_benchmark(c: &mut Criterion) { |
unquote.go | package shellquote
import (
"bytes"
"errors"
"strings"
"unicode/utf8"
)
var (
UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") | UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string")
UnterminatedEscapeError = errors.New("Unterminated backslash-escape")
)
const (
DefaultSplitChars = " \n\t"
DefaultSingleChar = '\''
DefaultDoubleChar = '"'
DefaultEscapeChar = '\\'
DefaultDoubleEscapeChars = "$`\"\n\\"
)
type SplitOptions struct {
SplitChars string
SingleChar rune
DoubleChar rune
EscapeChar rune
DoubleEscapeChars string
Limit int
}
func DefaultSplitOptions() *SplitOptions {
return &SplitOptions{
SplitChars: DefaultSplitChars,
SingleChar: DefaultSingleChar,
DoubleChar: DefaultDoubleChar,
EscapeChar: DefaultEscapeChar,
DoubleEscapeChars: DefaultDoubleEscapeChars,
Limit: -1,
}
}
func NoEscapeSplitOptions() *SplitOptions {
opts := DefaultSplitOptions()
opts.EscapeChar = 0
return opts
}
// SplitWithOptions splits a string according to /bin/sh's word-splitting rules and
// the options given.
// It supports backslash-escapes, single-quotes, and double-quotes. Notably it does
// not support the $'' style of quoting. It also doesn't attempt to perform any
// other sort of expansion, including brace expansion, shell expansion, or
// pathname expansion.
//
// If the given input has an unterminated quoted string or ends in a
// backslash-escape, one of UnterminatedSingleQuoteError,
// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned.
func SplitWithOptions(input string, opts *SplitOptions) (words []string, err error) {
if opts == nil {
opts = DefaultSplitOptions()
}
splitChars := opts.SplitChars
if len(splitChars) == 0 {
splitChars = DefaultSplitChars
}
switch opts.Limit {
case 0:
words = []string{}
return
case 1:
words = []string{}
input = strings.TrimLeft(strings.TrimRight(input, splitChars), splitChars)
if len(input) > 0 {
words = append(words, input)
}
return
}
var buf bytes.Buffer
words = make([]string, 0)
for len(input) > 0 {
// skip any splitChars at the start
c, l := utf8.DecodeRuneInString(input)
if strings.ContainsRune(splitChars, c) {
input = input[l:]
continue
} else if c == opts.EscapeChar {
// Look ahead for escaped newline so we can skip over it
next := input[l:]
if len(next) == 0 {
err = UnterminatedEscapeError
return
}
c2, l2 := utf8.DecodeRuneInString(next)
if c2 == '\n' {
input = next[l2:]
continue
}
}
var word string
word, input, err = splitWord(input, &buf, opts)
if err != nil {
return
}
words = append(words, word)
if opts.Limit == len(words)+1 {
input = strings.TrimSpace(input)
if len(input) > 0 {
words = append(words, input)
}
return
}
}
return
}
func Split(input string) (words []string, err error) {
return SplitWithOptions(input, DefaultSplitOptions())
}
func SplitN(input string, n int) (words []string, err error) {
opts := DefaultSplitOptions()
opts.Limit = n
return SplitWithOptions(input, opts)
}
func splitWord(input string, buf *bytes.Buffer, opts *SplitOptions) (word string, remainder string, err error) {
buf.Reset()
raw:
{
cur := input
for len(cur) > 0 {
c, l := utf8.DecodeRuneInString(cur)
cur = cur[l:]
if c == opts.SingleChar {
buf.WriteString(input[0 : len(input)-len(cur)-l])
input = cur
goto single
} else if c == opts.DoubleChar {
buf.WriteString(input[0 : len(input)-len(cur)-l])
input = cur
goto double
} else if c == opts.EscapeChar {
buf.WriteString(input[0 : len(input)-len(cur)-l])
input = cur
goto escape
} else if strings.ContainsRune(opts.SplitChars, c) {
buf.WriteString(input[0 : len(input)-len(cur)-l])
return buf.String(), cur, nil
}
}
if len(input) > 0 {
buf.WriteString(input)
input = ""
}
goto done
}
escape:
{
if len(input) == 0 {
return "", "", UnterminatedEscapeError
}
c, l := utf8.DecodeRuneInString(input)
if c == '\n' {
// a backslash-escaped newline is elided from the output entirely
} else {
buf.WriteString(input[:l])
}
input = input[l:]
}
goto raw
single:
{
i := strings.IndexRune(input, opts.SingleChar)
if i == -1 {
return "", "", UnterminatedSingleQuoteError
}
buf.WriteString(input[0:i])
input = input[i+1:]
goto raw
}
double:
{
cur := input
for len(cur) > 0 {
c, l := utf8.DecodeRuneInString(cur)
cur = cur[l:]
if c == opts.DoubleChar {
buf.WriteString(input[0 : len(input)-len(cur)-l])
input = cur
goto raw
} else if c == opts.EscapeChar {
// bash only supports certain escapes in double-quoted strings
c2, l2 := utf8.DecodeRuneInString(cur)
cur = cur[l2:]
if strings.ContainsRune(opts.DoubleEscapeChars, c2) {
buf.WriteString(input[0 : len(input)-len(cur)-l-l2])
if c2 == '\n' {
// newline is special, skip the backslash entirely
} else {
buf.WriteRune(c2)
}
input = cur
}
}
}
return "", "", UnterminatedDoubleQuoteError
}
done:
return buf.String(), input, nil
} | |
app-routing.module.ts | import {NgModule} from '@angular/core';
import {RouterModule, Routes} from '@angular/router';
import {MainComponent} from './main/main.component';
import {TournamentsComponent} from './tournaments/tournaments.component';
import {TeamsComponent} from './teams/teams.component';
import {HelpComponent} from './help/help.component'; | import {AuthGuardService} from '../services/auth-guard.service';
import {LoginComponent} from './login/login.component';
import {SignupComponent} from './signup/signup.component';
import {ErrorPageComponent} from './error-page/error-page.component';
import {TeamComponent} from './teams/team/team.component';
import {UserSettingsComponent} from './user/user-settings/user-settings.component';
import {UserInvitesComponent} from './user/user-invites/user-invites.component';
import {MatchComponent} from './match/match.component';
import {UserSubscriptionsComponent} from './user/user-subscriptions/user-subscriptions.component';
import {RestoreComponent} from './restore/restore.component';
const routes: Routes = [
{path: '', redirectTo: 'main', pathMatch: 'full'},
{path: 'main', component: MainComponent},
{path: 'tournaments', component: TournamentsComponent},
{path: 'tournaments/:id', component: TournamentComponent},
{path: 'teams', component: TeamsComponent},
{path: 'teams/:id', component: TeamComponent},
{path: 'help', component: HelpComponent},
{
path: 'signup',
canActivate: [AuthGuardService],
data: {required: 'VISITOR', redirect: '/main'},
component: SignupComponent
},
{
path: 'login',
canActivate: [AuthGuardService],
data: {required: 'VISITOR', redirect: '/main'},
// ะะกะขะะ ะะะะ, ะตัะปะธ ั ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะต ะฑัะดะตั
// ะฝะธ USER, ะฝะธ VISITOR, ะฒัะตะผั ะบะพะฝะตั
// (ะบะพะปััะตะฒะพะน redirect user <-> login)
component: LoginComponent
},
{
path: 'user/:username',
component: UserComponent
},
{
path: 'settings',
canActivate: [AuthGuardService],
data: {required: 'USER', redirect: '/login'},
component: UserSettingsComponent
},
{
path: 'subscriptions',
component: UserSubscriptionsComponent
},
{
path: 'invites',
component: UserInvitesComponent
},
{
path: 'match/:matchId',
component: MatchComponent
},
{
path: 'restore/:restoreKey',
canActivate: [AuthGuardService],
data: {required: 'VISITOR', redirect: '/main'},
component: RestoreComponent
},
{
path: '**', component: ErrorPageComponent
}
];
@NgModule({
imports: [RouterModule.forRoot(routes)],
exports: [RouterModule]
})
export class AppRoutingModule {
} | import {UserComponent} from './user/user.component';
import {TournamentComponent} from './tournaments/tournament/tournament.component'; |
utils.go | package jsonx
import (
"bytes"
"encoding/json"
"regexp"
"strings"
)
var re1 = regexp.MustCompile(`"(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})[^"]+"`)
func MapFrom(arg0 interface{}) map[string]interface{} {
var buf []byte
if _buf, ok := arg0.([]byte); ok && len(_buf) > 0 {
buf = _buf
} else if s1, ok := arg0.(string); ok && s1 != "" {
buf = []byte(s1)
}
var map1 map[string]interface{}
if len(buf) < 1 || json.Unmarshal(buf, &map1) != nil {
return map[string]interface{}{}
}
return map1
}
func ArrayFrom(arg0 interface{}) []interface{} {
var buf []byte
if _buf, ok := arg0.([]byte); ok && len(_buf) > 0 {
buf = _buf
} else if s1, ok := arg0.(string); ok && s1 != "" {
buf = []byte(s1)
}
var list []interface{}
if len(buf) < 1 || json.Unmarshal(buf, &list) != nil {
return []interface{}{}
}
return list
}
func ToJson(arg0 interface{}, opts ...*option) string {
buf := bytes.NewBuffer([]byte{})
encoder := json.NewEncoder(buf)
encoder.SetEscapeHTML(false)
if encoder.Encode(arg0) != nil {
return "{}"
}
contents := buf.String()
var opt *option
if len(opts) > 0 {
opt = opts[0]
}
if opt == nil {
return contents
}
if opt.handleTimeField {
matches := re1.FindAllStringSubmatch(contents, -1)
if len(matches) < 1 {
return contents
}
for _, groups := range matches {
if len(groups) < 3 {
continue
}
if groups[2] == "00:00:00" && opt.stripZeroTimePart {
contents = strings.Replace(contents, groups[0], groups[1], 1) | contents = strings.Replace(contents, groups[0], groups[1] + " " + groups[2], 1)
}
}
if len(opt.replacements) > 0 {
for _, parts := range opt.replacements {
if parts[0] == "" || parts[1] == "" {
continue
}
if !strings.HasPrefix(parts[0], "@regex:") {
contents = strings.ReplaceAll(contents, parts[0], parts[1])
continue
}
re2, err := regexp.Compile(strings.ReplaceAll(parts[0], "@regex:", ""))
if err != nil {
continue
}
contents = re2.ReplaceAllString(contents, parts[1])
}
}
return contents
} | continue
}
|
kick.js | const Discord = require('discord.js');
module.exports = {
name: 'kick',
aliases: ['์ถ๋ฐฉ', 'ํฅ'],
description: '๋ฉค๋ฒ๋ฅผ ์ถ๋ฐฉํด์.',
usage: 'u!kick <์ ์ ๋ฉ์
> [์ถ๋ฐฉ ์ด์ ]',
permission: '์๋ฒ ๊ด๋ฆฌํ๊ธฐ',
run: async (client, message, args, ops) => {
if (!message.member.hasPermission('KICK_MEMBERS')) return message.channel.send(`๋ฉค๋ฒ ์ถ๋ฐฉํ๊ธฐ ๊ถํ์ด ํ์ํด์.`);
if (!message.mentions.users.first()) return message.channel.send('์ถ๋ฐฉํ ๋ฉค๋ฒ๋ฅผ ๋ฉ์
ํด์ฃผ์ธ์.');
const embed = new Discord.MessageEmbed()
.setTitle('๋ฉค๋ฒ๋ฅผ ์ถ๋ฐฉํ ๊น์?')
.setColor('RANDOM')
.addField('์ถ๋ฐฉํ ๋ฉค๋ฒ', message.mentions.users.first().toString())
.addField('์ถ๋ฐฉ ์ด์ ', args.slice(2).join(' ') || '์์')
.setFooter(message.author.tag, message.author.avatarURL())
.setTimestamp()
.setThumbnail(message.guild.iconURL());
let m = await message.channel.send(embed);
await m.react('โ
๏ธ');
await m.react('โ๏ธ');
const filter = (r, u) => (r.emoji.name == 'โ
๏ธ' || r.emoji.name == 'โ๏ธ') && u.id == message.author.id;
const collector = m.createReactionCollector(filter, {
max: 1
});
collector.on('end', collected => {
m.reactions.removeAll();
if (collected.first().emoji.name == 'โ
๏ธ') { | embed.setTitle('๋ฉค๋ฒ๋ฅผ ์ถ๋ฐฉํ์ด์')
.setColor("RANDOM");
m.edit(embed);
});
} else {
embed.setTitle('๋ฉค๋ฒ ์ถ๋ฐฉ์ด ์ทจ์๋์์ด์.')
.setColor('RANDOM');
m.edit(embed);
}
});
}
} | message.guild.member(message.mentions.users.first()).kick(args.slice(2).join(' ') || undefined).then(() => { |
multi.go | // Copyright ยฉ 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package multi implements a pkg/log.Handler that applies every log message on multiple Handlers
package multi
import (
"go.thethings.network/lorawan-stack/pkg/log"
)
// Handler implements log.Handler.
type Handler struct {
handlers []log.Handler
}
// New returns a new handler that combines the underlying handlers.
func N | handlers ...log.Handler) *Handler {
return &Handler{
handlers: handlers,
}
}
// HandleLog implements log.Handler.
func (m *Handler) HandleLog(entry log.Entry) error {
var err error
for _, handler := range m.handlers {
e := handler.HandleLog(entry)
// save the last error but continue
if e != nil {
err = e
}
}
return err
}
| ew( |
manga-form.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { MangaFormComponent } from './manga-form.component';
describe('MangaFormComponent', () => {
let component: MangaFormComponent;
let fixture: ComponentFixture<MangaFormComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ MangaFormComponent ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(MangaFormComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
}); | }); | |
parse.rs | use proc_macro::Span;
use crate::format_description::component::{Component, NakedComponent};
use crate::format_description::error::InvalidFormatDescription;
use crate::format_description::{helper, modifier, FormatItem};
use crate::Error;
struct ParsedItem<'a> {
item: FormatItem<'a>,
remaining: &'a [u8],
}
fn parse_component(mut s: &[u8], index: &mut usize) -> Result<Component, InvalidFormatDescription> { | let component_index = *index;
let whitespace_loc = s
.iter()
.position(u8::is_ascii_whitespace)
.unwrap_or(s.len());
*index += whitespace_loc;
let component_name = &s[..whitespace_loc];
s = &s[whitespace_loc..];
s = helper::consume_whitespace(s, index);
Ok(NakedComponent::parse(component_name, component_index)?
.attach_modifiers(modifier::Modifiers::parse(component_name, s, index)?))
}
fn parse_literal<'a>(s: &'a [u8], index: &mut usize) -> ParsedItem<'a> {
let loc = s.iter().position(|&c| c == b'[').unwrap_or(s.len());
*index += loc;
ParsedItem {
item: FormatItem::Literal(&s[..loc]),
remaining: &s[loc..],
}
}
fn parse_item<'a>(
s: &'a [u8],
index: &mut usize,
) -> Result<ParsedItem<'a>, InvalidFormatDescription> {
if let [b'[', b'[', remaining @ ..] = s {
*index += 2;
return Ok(ParsedItem {
item: FormatItem::Literal(&[b'[']),
remaining,
});
};
if s.starts_with(&[b'[']) {
if let Some(bracket_index) = s.iter().position(|&c| c == b']') {
*index += 1; // opening bracket
let ret_val = ParsedItem {
item: FormatItem::Component(parse_component(&s[1..bracket_index], index)?),
remaining: &s[bracket_index + 1..],
};
*index += 1; // closing bracket
Ok(ret_val)
} else {
Err(InvalidFormatDescription::UnclosedOpeningBracket { index: *index })
}
} else {
Ok(parse_literal(s, index))
}
}
pub(crate) fn parse(mut s: &[u8], span: Span) -> Result<Vec<FormatItem<'_>>, Error> {
let mut compound = Vec::new();
let mut loc = 0;
while !s.is_empty() {
let ParsedItem { item, remaining } =
parse_item(s, &mut loc).map_err(|error| Error::InvalidFormatDescription {
error,
span_start: Some(span),
span_end: Some(span),
})?;
s = remaining;
compound.push(item);
}
Ok(compound)
} | s = helper::consume_whitespace(s, index);
|
video-urls.ts | function without<T>(array: T[], value: T): T[] {
let presentParts: T[] = [];
for (let part of array) {
if (part !== value) {
presentParts.push(part);
}
}
return presentParts;
}
interface IUrl {
protocol: string;
host: string;
pathname: string;
hash: string;
searchParams: { [key: string]: string } | URLSearchParams;
}
function getSearchParam(
searchParams: { [key: string]: string } | URLSearchParams,
name: string
) {
if (searchParams instanceof URLSearchParams) {
return searchParams.get(name);
}
return searchParams[name];
}
function toURL(url: IUrl) {
let protocol = url.protocol.replace(":", "");
let result = new URL(`${protocol}://${url.host}`);
result.pathname = url.pathname;
result.hash = url.hash;
let keys: string[];
if (url.searchParams instanceof URLSearchParams) {
keys = [...url.searchParams.keys()];
} else {
keys = Object.keys(url.searchParams);
}
for (let param of keys) {
let value = getSearchParam(url.searchParams, param);
if (value) {
result.searchParams.set(param, value);
}
}
return result.href;
}
function isYouTubeURL(url: IUrl) {
return isYouTubeEmbedURL(url) || isYouTubeWatchURL(url);
}
// Youtube embed code
// - youtu.be/
// - youtube-nocookie.com/embed/
// - youtube.com/embed
function isYouTubeEmbedURL(url: IUrl) {
return (
url.host === "youtu.be" ||
(["www.youtube-nocookie.com", "www.youtube.com"].includes(url.host) &&
url.pathname.startsWith("/embed/"))
);
}
// Youtube watch URLs
// - www.youtube.com/watch?v=
// - m.youtube.com/watch?v=
// - youtube.com/watch?v=
function isYouTubeWatchURL(url: IUrl) {
return (
["www.youtube.com", "m.youtube.com", "youtube.com"].includes(url.host) &&
url.pathname.startsWith("/watch") &&
getSearchParam(url.searchParams, "v") !== null
);
}
function normalizeYouTubeURL(url: IUrl) {
let normalized =
url.host === "www.youtube-nocookie.com"
? new URL("https://www.youtube-nocookie.com")
: new URL("https://www.youtube.com");
let timestamp =
getSearchParam(url.searchParams, "t") ||
getSearchParam(url.searchParams, "start");
if (timestamp) {
normalized.searchParams.set("start", timestamp);
}
if (isYouTubeEmbedURL(url)) {
let parts = without<string>(url.pathname.split("/"), "");
let id = parts.pop();
normalized.pathname = `/embed/${id}`;
let controls = getSearchParam(url.searchParams, "controls"); | }
} else {
normalized.pathname = `/embed/${getSearchParam(url.searchParams, "v")}`;
}
return normalized.href;
}
// Dailymotion URLs
// - https://www.dailymotion.com/video/:id
function isDailymotionURL(url: IUrl) {
return (
url.host?.match(/^[^.]*\.dailymotion\.com/) &&
(url.pathname?.startsWith("/video") ||
url.pathname?.match(/^\/[^\\]*\/video\//))
);
}
function normalizeDailymotionURL(url: IUrl) {
let normalized = new URL("https://www.dailymotion.com");
for (let param in url.searchParams) {
let value = getSearchParam(url.searchParams, param);
if (value) {
normalized.searchParams.set(param, value);
}
}
let parts = without<string>(url.pathname.split("/"), "");
let part = parts.shift();
while (part !== "video") {
part = parts.shift();
}
let id = parts.shift();
normalized.pathname = `/embed/video/${id}`;
return normalized.href;
}
// Vimeo URLs
// - https://vimeo.com/:id
// - https://www.vimeo.com/m/#/:id
// - https://player.vimeo.com/embed/
function isVimeoURL(url: IUrl) {
return (
url.host === "vimeo.com" ||
url.host === "player.vimeo.com" ||
url.host === "www.vimeo.com"
);
}
function isVimeoEmbedURL(url: IUrl) {
return url.host === "player.vimeo.com";
}
function normalizeVimeoURL(url: IUrl) {
if (isVimeoEmbedURL(url)) {
// Enforce https ~
url.protocol = "https";
return toURL(url);
}
let normalized = new URL("https://player.vimeo.com");
let parts = without<string>(url.pathname.split("/"), "");
let id = parts.shift();
normalized.pathname = `/video/${id}`;
return normalized.href;
}
// Brightcove URLs
// - https://players.brightcove.com/
// - https://bcove.video
// - https://bcove.me
function isBrightcoveURL(url: IUrl) {
return (
url.host === "players.brightcove.net" ||
url.host === "bcove.video" ||
url.host === "bcove.me"
);
}
function isTwitchURL(url: IUrl) {
return (
isTwitchStreamURL(url) || isTwitchChannelURL(url) || isTwitchClipURL(url)
);
}
function isTwitchStreamURL(url: IUrl) {
return (
(url.host === "player.twitch.tv" &&
getSearchParam(url.searchParams, "video")) ||
((url.host === "www.twitch.tv" || url.host === "m.twitch.tv") &&
url.pathname.startsWith("/videos"))
);
}
function isTwitchClipURL(url: IUrl) {
return (
url.host === "clips.twitch.tv" ||
(url.host === "www.twitch.tv" && url.pathname.match(/\/clip\/.*/))
);
}
function isTwitchChannelURL(url: IUrl) {
return (
(url.host === "player.twitch.tv" &&
getSearchParam(url.searchParams, "channel")) ||
((url.host === "www.twitch.tv" || url.host === "m.twitch.tv") &&
!url.pathname.startsWith("/videos"))
);
}
function normalizeTwitchURL(url: IUrl) {
let parent = getSearchParam(url.searchParams, "parent") ?? "www.example.com";
if (isTwitchClipURL(url)) {
let clipID =
getSearchParam(url.searchParams, "clip") ??
without<string>(url.pathname.split("/"), "").pop();
return `https://clips.twitch.tv/embed?clip=${clipID}&parent=${parent}`;
} else if (isTwitchChannelURL(url)) {
let channelID =
getSearchParam(url.searchParams, "channel") ??
without<string>(url.pathname.split("/"), "").pop();
return `https://player.twitch.tv/?channel=${channelID}&parent=${parent}`;
} else {
let videoID =
getSearchParam(url.searchParams, "video") ??
without<string>(url.pathname.split("/"), "").pop();
return `https://player.twitch.tv/?video=${videoID}&parent=${parent}`;
}
}
export enum Provider {
YOUTUBE = "YOUTUBE",
VIMEO = "VIMEO",
BRIGHTCOVE = "BRIGHTCOVE",
DAILYMOTION = "DAILYMOTION",
TWITCH = "TWITCH",
OTHER = "OTHER",
}
export function identify(url: IUrl) {
if (isYouTubeURL(url)) {
return {
provider: Provider.YOUTUBE,
url: normalizeYouTubeURL(url),
};
}
if (isVimeoURL(url)) {
return {
provider: Provider.VIMEO,
url: normalizeVimeoURL(url),
};
}
if (isDailymotionURL(url)) {
return {
provider: Provider.DAILYMOTION,
url: normalizeDailymotionURL(url),
};
}
if (isBrightcoveURL(url)) {
return {
provider: Provider.BRIGHTCOVE,
url: toURL(url),
};
}
if (isTwitchURL(url)) {
return {
provider: Provider.TWITCH,
url: normalizeTwitchURL(url),
};
}
return null;
} | if (controls) {
normalized.searchParams.set("controls", controls); |
permission.rs | use crate::error::CoreError;
use derive_more::Display;
use serde::Serialize;
use std::collections::{HashMap, HashSet};
#[derive(Serialize, Debug, Display, Eq, PartialEq, Clone, Copy, Hash)]
#[serde(transparent)]
#[display(fmt = "{}", name)]
pub struct Permission {
name: &'static str,
#[serde(skip)]
bit: u16,
}
impl Permission {
pub const fn new(name: &'static str, bit: u16) -> Permission {
Permission { name, bit }
}
pub fn name(&self) -> &str {
&self.name
}
pub fn bit(&self) -> u16 {
self.bit
}
}
impl Into<u16> for Permission {
fn into(self) -> u16 {
self.bit
}
}
#[derive(Clone)]
pub struct PermissionsManager {
permissions: HashSet<Permission>,
implication_map: HashMap<Permission, HashSet<Permission>>,
assignable_map: HashMap<Permission, HashSet<Permission>>,
}
impl PermissionsManager {
pub fn new(permissions: Vec<Permission>) -> Self {
let mut permission_set = HashSet::new();
for perm in permissions {
permission_set.insert(perm);
}
PermissionsManager {
permissions: permission_set,
implication_map: HashMap::new(),
assignable_map: HashMap::new(),
}
}
// we should probably verify that added permissions are all part of what was in the constructor but
// wherhaklsrรถdj
pub fn assigns(mut self, perm1: Permission, perm2: Permission) -> Self {
self.assignable_map.entry(perm1).or_insert(HashSet::new()).insert(perm2);
self
}
pub fn implies(mut self, perm1: Permission, perm2: Permission) -> Self {
self.implication_map.entry(perm1).or_insert(HashSet::new()).insert(perm2);
self
}
pub fn implied_by(&self, permission: Permission) -> HashSet<Permission> {
let mut implied = HashSet::new();
implied.insert(permission);
if let Some(set) = self.implication_map.get(&permission) {
for perm in set {
implied.extend(self.implied_by(*perm));
}
}
implied
}
pub fn assignable_by(&self, permission: Permission) -> HashSet<Permission> {
let mut assignable = HashSet::new();
for perm in self.implied_by(permission) {
if let Some(set) = self.assignable_map.get(&perm) {
for perm in set {
assignable.insert(*perm);
}
}
}
assignable
}
pub fn implied_by_bits(&self, permission_bits: u16) -> HashSet<Permission> {
let mut implied = HashSet::new();
for perm in self.bits_to_permissions(permission_bits) {
if perm.bit & permission_bits == perm.bit {
implied.extend(self.implied_by(perm));
}
}
implied
}
pub fn assignable_by_bits(&self, permission_bits: u16) -> HashSet<Permission> {
let mut assignable = HashSet::new();
for perm in self.bits_to_permissions(permission_bits) {
assignable.extend(self.assignable_by(perm));
}
assignable
}
pub fn bits_to_permissions(&self, bits: u16) -> HashSet<Permission> {
let mut perms = HashSet::new();
for perm in &self.permissions { | }
}
perms
}
pub fn require_permission(&self, permissions_we_have: u16, permission_required: Permission) -> Result<(), CoreError> {
if !self.implied_by_bits(permissions_we_have).contains(&permission_required) {
return Err(CoreError::MissingPermissions {
required: permission_required,
})
}
Ok(())
}
}
#[cfg(test)]
mod test {
// copied from https://riptutorial.com/rust/example/4149/create-a-hashset-macro because im lazy as fuck
macro_rules! set {
( $( $x:expr ),* $(,)? ) => { // Match zero or more comma delimited items
{
let mut temp_set = HashSet::new(); // Create a mutable HashSet
$(
temp_set.insert($x); // Insert each item matched into the HashSet
)*
temp_set // Return the populated HashSet
}
};
}
use crate::permission::{Permission, PermissionsManager};
use std::collections::HashSet;
const PERM1: Permission = Permission::new("1", 0x1);
const PERM2: Permission = Permission::new("2", 0x2);
const PERM3: Permission = Permission::new("3", 0x4);
const PERM4: Permission = Permission::new("4", 0x8);
const PERM5: Permission = Permission::new("5", 0x10);
const PERM6: Permission = Permission::new("6", 0x20);
fn permission_manager() -> PermissionsManager {
PermissionsManager::new(vec![PERM1, PERM2, PERM3, PERM4, PERM5])
.implies(PERM1, PERM2)
.implies(PERM2, PERM3)
.implies(PERM4, PERM5)
.assigns(PERM4, PERM2)
.assigns(PERM2, PERM3)
.assigns(PERM4, PERM5)
.assigns(PERM5, PERM6)
}
#[test]
fn test_implication() {
assert_eq!(permission_manager().implied_by(PERM1), set![PERM1, PERM2, PERM3]);
assert_eq!(permission_manager().implied_by(PERM4), set![PERM4, PERM5]);
assert_eq!(permission_manager().implied_by_bits(0x1 | 0x8), set![
PERM1, PERM2, PERM3, PERM4, PERM5,
]);
}
#[test]
fn test_assignment() {
assert_eq!(permission_manager().assignable_by(PERM4), set![PERM2, PERM5, PERM6]);
}
} | if perm.bit() & bits == perm.bit() {
perms.insert(*perm); |
test_vacs.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @Author: Josรฉ Sรกnchez-Gallego ([email protected])
# @Date: 2018-07-08
# @Filename: vacs.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: Brian Cherinka
# @Last modified time: 2018-07-09 17:27:59
import importlib
import astropy.io.fits
import pytest
from marvin.contrib.vacs import VACMixIn
from marvin.tools.maps import Maps
class Te | bject):
def test_subclasses(self):
assert len(VACMixIn.__subclasses__()) > 0
def test_mangahi(self):
my_map = Maps('7443-12701')
assert hasattr(my_map, 'vacs')
#assert my_map.vacs.mangahi is not None # figure out how to test based on release
def test_vac_container(self):
my_map = Maps('8485-1901')
assert my_map.vacs.__class__.__name__ == 'VACContainer'
assert list(my_map.vacs) is not None
def test_vacs_return(self, plateifu, release):
if release in ['MPL-4', 'MPL-5', 'MPL-6', 'MPL-8']:
pytest.skip()
vacs = VACMixIn.__subclasses__()
for vac in vacs:
for include_class in vac.include:
__ = importlib.import_module(str(include_class.__module__))
obj = include_class(plateifu, release=release)
assert hasattr(obj, 'vacs')
assert hasattr(obj.vacs, vac.name)
assert getattr(obj.vacs, vac.name) is not None
@pytest.mark.xfail(reason="will not work with tested releases it does not have")
class TestMangaHI(object):
def test_return_type(self, plateifu):
my_map = Maps(plateifu)
assert isinstance(my_map.vacs.HI, object)
| stVACs(o |
generic_scheduler.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"context"
"fmt"
"math"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
utiltrace "k8s.io/apiserver/pkg/util/trace"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/core/equivalence"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
)
//tanle
// GetAllNamespaces gets all namespaces //tanle
func GetAllNamespaces(nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) []string {
names := sets.String{}
for _, node := range nodes {
pods := nodeNameToInfo[node.Name].Pods()
for _, pod := range pods {
if pod.Namespace != "kube-system" {
names.Insert(pod.Namespace)
}
}
}
return names.List()
}
// GetMaxResource retrives the total allocable resources of the cluster //tanle
func GetMaxResource(nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) *schedulercache.Resource {
result := &schedulercache.Resource{}
for _, node := range nodes {
nodeInfo := nodeNameToInfo[node.Name]
result.MilliCPU += nodeInfo.AllocatableResource().MilliCPU
result.Memory += nodeInfo.AllocatableResource().Memory
result.ScalarResources[schedulercache.NvidiaGPU] += nodeInfo.AllocatableResource().ScalarResources[schedulercache.NvidiaGPU]
result.EphemeralStorage += nodeInfo.AllocatableResource().EphemeralStorage
}
return result
}
// GetResourceRequest obtains the resource request of a pod
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
result := &schedulercache.Resource{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests)
}
// take max_resource(sum_pod, any_init_container)
for _, container := range pod.Spec.InitContainers {
for rName, rQuantity := range container.Resources.Requests {
switch rName {
case v1.ResourceMemory:
if mem := rQuantity.Value(); mem > result.Memory {
result.Memory = mem
}
case v1.ResourceEphemeralStorage:
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > result.EphemeralStorage {
result.EphemeralStorage = ephemeralStorage
}
case v1.ResourceCPU:
if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
result.MilliCPU = cpu
}
case schedulercache.NvidiaGPU:
if gpu := rQuantity.Value(); gpu > result.ScalarResources[schedulercache.NvidiaGPU] {
result.ScalarResources[schedulercache.NvidiaGPU] = gpu
}
}
}
}
return result
}
// IRA_Add adds ResourceList into Resource.
func AddIRA(result *schedulercache.Resource, rl v1.ResourceList) *schedulercache.Resource {
// result := &schedulercache.Resource{}
// if r == nil {
// return result
// }
milliCPU := int64(0)
nvidiaGPU := int64(0)
memory := int64(0)
for rName, rQuant := range rl {
switch rName {
case v1.ResourceCPU:
milliCPU += rQuant.MilliValue()
case v1.ResourceMemory:
memory += rQuant.Value()
case schedulercache.NvidiaGPU:
nvidiaGPU += rQuant.Value()
}
}
result.ScalarResources[schedulercache.NvidiaGPU] += nvidiaGPU
result.Memory += memory
if nvidiaGPU == 0 { // ignore cpu usage of GPU jobs
result.MilliCPU += milliCPU
}
return result
}
// GetResourceUsageByNamespace obatains the resource usage by namespace (user) //tanle
func GetResourceUsageByNamespace(nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node, ns string) *schedulercache.Resource {
result := &schedulercache.Resource{}
for _, node := range nodes {
// count the scheduled pods.
pods := nodeNameToInfo[node.Name].Pods()
for _, pod := range pods {
if pod.Namespace == ns {
// glog.Infof("pod: %s status.phase %s", pod.Name, pod.Status.Phase)
for _, container := range pod.Spec.Containers {
result = AddIRA(result, container.Resources.Requests)
}
}
}
}
// glog.Infof("namespace: %s usage %s", ns, *result)
return result
}
type Demand struct {
cpu float64
mem float64
gpu float64
gpu_mem float64
beta float64
}
var namespaces = []string{"user1", "user2", "user3", "user4"}
func getDemand(namespace string) *Demand {
resDemand := &Demand{cpu: 0.0, mem: 0.0, gpu: 0.0, gpu_mem: 0.0}
// (com, mem, beta) = (80000.0,4000,2.5)
// (com, mem, beta) = (112000.0,4000,3.5)
// (com, mem, beta) = (192000.0,4000,6.0)
// (com, mem, beta) = (288000.0,4000,9.0)
switch namespace {
case "user1":
resDemand.cpu = 16000
resDemand.mem = 4 * GI
resDemand.gpu = 1
resDemand.gpu_mem = 2 * GI
resDemand.beta = 5 * resDemand.cpu / resDemand.gpu
case "user2":
resDemand.cpu = 16000
resDemand.mem = 8 * GI
resDemand.gpu = 1
resDemand.beta = 7 * resDemand.cpu / resDemand.gpu
case "user3":
resDemand.cpu = 16000
resDemand.mem = 6 * GI
resDemand.gpu = 1
resDemand.gpu_mem = 2 * GI
resDemand.beta = 12 * resDemand.cpu / resDemand.gpu
case "user4":
resDemand.cpu = 16000
resDemand.mem = 12 * GI
resDemand.gpu = 1
resDemand.gpu_mem = 2 * GI
resDemand.beta = 18 * resDemand.cpu / resDemand.gpu
}
return resDemand
}
const MILLI = 1000
const GI = 1024 * 1024 * 1024
func getTraditionalDemand_GPU(namespace string) *Demand {
resDemand := &Demand{cpu: 0.0, mem: 0.0, gpu: 0.0, beta: 0.0}
switch namespace {
case "user1":
resDemand.cpu = 1 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 1
case "user2":
resDemand.cpu = 1 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 1
case "user3":
resDemand.cpu = 1 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 1
case "user4":
resDemand.cpu = 1 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 1
}
return resDemand
}
func getTraditionalDemand(namespace string) *Demand {
resDemand := &Demand{cpu: 0.0, mem: 0.0, gpu: 0.0, beta: 0.0}
switch namespace {
case "user1":
resDemand.cpu = 16 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 0
case "user2":
resDemand.cpu = 16 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 0
case "user3":
resDemand.cpu = 16 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 0
case "user4":
resDemand.cpu = 16 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 0
default:
resDemand.cpu = 16 * MILLI
resDemand.mem = 12 * GI
resDemand.gpu = 0
}
return resDemand
}
func AllocateES(namespaces []string, capacity schedulercache.Resource) []*schedulercache.Resource {
n := len(namespaces)
shares := make([]*schedulercache.Resource, n)
for i := 0; i < n; i++ {
shares[i] = &schedulercache.Resource{}
shares[i].MilliCPU = int64(float64(capacity.MilliCPU) / float64(n))
shares[i].Memory = int64(float64(capacity.Memory) / float64(n))
shares[i].ScalarResources[schedulercache.NvidiaGPU] = int64(float64(capacity.ScalarResources[schedulercache.NvidiaGPU]) / float64(n))
}
return shares
}
func AllocateStatic(namespaces []string, capacity schedulercache.Resource) []*schedulercache.Resource {
n := len(namespaces)
shares := make([]*schedulercache.Resource, n)
// shares[0] = &schedulercache.Resource{}
// shares[0].MilliCPU = 2000
// shares[0].Memory = 24 * GI
// shares[0].NvidiaGPU = 2
// shares[1] = &schedulercache.Resource{}
// shares[1].MilliCPU = 86000
// shares[1].Memory = (240 - 24) * GI
// shares[1].NvidiaGPU = 2
shares[0] = &schedulercache.Resource{}
shares[0].MilliCPU = 32000
shares[0].Memory = 24 * GI
shares[0].ScalarResources[schedulercache.NvidiaGPU] = 0
shares[1] = &schedulercache.Resource{}
shares[1].MilliCPU = 0000
shares[1].Memory = 12 * GI
shares[1].ScalarResources[schedulercache.NvidiaGPU] = 0
shares[2] = &schedulercache.Resource{}
shares[2].MilliCPU = 16000
shares[2].Memory = 12 * GI
shares[2].ScalarResources[schedulercache.NvidiaGPU] = 0
shares[3] = &schedulercache.Resource{}
shares[3].MilliCPU = 16000
shares[3].Memory = 12 * GI
shares[3].ScalarResources[schedulercache.NvidiaGPU] = 0
return shares
}
const (
// minFeasibleNodesToFind is the minimum number of nodes that would be scored
// in each scheduling cycle. This is a semi-arbitrary value to ensure that a
// certain minimum of nodes are checked for feasibility. This in turn helps
// ensure a minimum level of spreading.
minFeasibleNodesToFind = 100
)
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
// FitError describes a fit error of a pod.
type FitError struct {
Pod *v1.Pod
NumAllNodes int
FailedPredicates FailedPredicateMap
}
// ErrNoNodesAvailable is used to describe the error that no nodes available to schedule pods.
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
const (
// NoNodeAvailableMsg is used to format message when no nodes available.
NoNodeAvailableMsg = "0/%v nodes are available"
)
// Error returns detailed information of why the pod failed to fit on each node
func (f *FitError) Error() string {
reasons := make(map[string]int)
for _, predicates := range f.FailedPredicates {
for _, pred := range predicates {
reasons[pred.GetReason()]++
}
}
sortReasonsHistogram := func() []string {
reasonStrings := []string{}
for k, v := range reasons {
reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
}
sort.Strings(reasonStrings)
return reasonStrings
}
reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", f.NumAllNodes, strings.Join(sortReasonsHistogram(), ", "))
return reasonMsg
}
type genericScheduler struct {
cache schedulercache.Cache
equivalenceCache *equivalence.Cache
schedulingQueue SchedulingQueue
predicates map[string]algorithm.FitPredicate
priorityMetaProducer algorithm.PriorityMetadataProducer
predicateMetaProducer algorithm.PredicateMetadataProducer
prioritizers []algorithm.PriorityConfig
extenders []algorithm.SchedulerExtender
lastNodeIndex uint64
alwaysCheckAllPredicates bool
cachedNodeInfoMap map[string]*schedulercache.NodeInfo
volumeBinder *volumebinder.VolumeBinder
pvcLister corelisters.PersistentVolumeClaimLister
disablePreemption bool
percentageOfNodesToScore int32
client clientset.Interface //tanle placing the pod on different devices.
}
//[tanle] placeOnOtherDevice
func CreatePodOnOtherDevice(pod *v1.Pod) *v1.Pod {
toBeGPU := true
// check the device of the pod
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Image, "gpu") {
toBeGPU = false
break
}
}
// delete the pod in kube system
replicatedPod := pod.DeepCopy()
for cName, container := range replicatedPod.Spec.Containers {
if toBeGPU {
container.Image = "lenhattan86/ira:gpu"
} else {
container.Image = "lenhattan86/ira:cpu"
}
// switch commands
mainCmd := container.Command[3]
container.Command[3] = container.Command[2]
container.Command[2] = mainCmd
// switch demands
mainDemand := container.Command[5]
container.Command[5] = container.Command[4]
container.Command[4] = mainDemand
cpuDemand, gpuDemand, memory := schedulercache.GetSecondaryDemand(pod)
for rName := range container.Resources.Requests {
quantity := container.Resources.Requests[rName]
switch rName {
case v1.ResourceCPU:
quantity.SetMilli(cpuDemand)
case schedulercache.NvidiaGPU:
quantity.Set(gpuDemand)
case v1.ResourceMemory:
quantity.Set(memory * GI)
}
container.Resources.Requests[rName] = quantity
container.Resources.Limits[rName] = quantity
}
replicatedPod.Spec.Containers[cName] = container
}
replicatedPod.ResourceVersion = ""
replicatedPod.Spec.NodeName = ""
replicatedPod.Annotations = nil
return replicatedPod
// pod.Spec.Containers = replicatedPod.Spec.Containers
// // p.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
// if err := g.client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}); err != nil {
// runtime.HandleError(fmt.Errorf("unable to DELETE pod in kubectl %T: %v", pod, err))
// }
// if _, err := g.client.CoreV1().Pods(replicatedPod.Namespace).Create(replicatedPod); err != nil {
// runtime.HandleError(fmt.Errorf("unable to CREATE pod in kubectl %T: %v", replicatedPod, err))
// }
}
func (g *genericScheduler) placeOnOtherDevice(pod *v1.Pod, replicatedPod *v1.Pod) {
pod.Spec.Containers = replicatedPod.Spec.Containers
// p.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
if err := g.client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}); err != nil {
runtime.HandleError(fmt.Errorf("unable to DELETE pod in kubectl %T: %v", pod, err))
}
if _, err := g.client.CoreV1().Pods(replicatedPod.Namespace).Create(replicatedPod); err != nil {
runtime.HandleError(fmt.Errorf("unable to CREATE pod in kubectl %T: %v", replicatedPod, err))
}
}
// Schedule tries to schedule the given pod to one of the nodes in the node list.
// If it succeeds, it will return the name of the node.
// If it fails, it will return a FitError error with reasons.
func (g *genericScheduler) SynClusterInfo(nodeLister algorithm.NodeLister) {
nodes, err := nodeLister.List()
if err != nil {
return
}
schedulercache.SynClusterInfo(g.cachedNodeInfoMap, nodes)
}
func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister) (string, error) {
trace := utiltrace.New(fmt.Sprintf("Scheduling %s/%s", pod.Namespace, pod.Name))
defer trace.LogIfLong(100 * time.Millisecond)
if err := podPassesBasicChecks(pod, g.pvcLister); err != nil {
return "", err
}
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes) == 0 {
return "", ErrNoNodesAvailable
}
// Used for all fit and priority funcs.
err = g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap)
if err != nil {
return "", err
}
schedulercache.SynClusterInfo(g.cachedNodeInfoMap, nodes) // tanle syn cluster info manually
trace.Step("Computing predicates")
startPredicateEvalTime := time.Now()
filteredNodes, failedPredicateMap, err := g.findNodesThatFit(pod, nodes)
// tanle
if schedulercache.NUM_RESERVE_GPU > 0 {
if schedulercache.DoNotUseReserveResource(schedulercache.IsGpuPod(pod)) {
filteredNodes = []*v1.Node{}
return "", fmt.Errorf("[tanle] not enough reserved resource for this user: %s's pod: %s ", pod.Namespace, pod.Name)
}
}
//[tanle] additional scheduling & device placement
if schedulercache.ENABLE_OFFLINE_SCHEDULER {
if len(filteredNodes) >= 0 {
isAdmit, isSwitch := advancedSchedule(pod, g.cachedNodeInfoMap, nodes)
if !isAdmit {
filteredNodes = []*v1.Node{}
return "", fmt.Errorf("not enough resource for this user: %s's pod: %s ", pod.Namespace, pod.Name)
} else if isSwitch {
//switch jobs
glog.Infof("Place the pod %s on other device", pod.Name)
fakePod := CreatePodOnOtherDevice(pod)
availNodes, _, _ := g.findNodesThatFit(fakePod, nodes)
if len(availNodes) > 0 {
g.placeOnOtherDevice(pod, fakePod)
filteredNodes = availNodes
} else {
filteredNodes = []*v1.Node{}
return "", fmt.Errorf("not enough resource for this user: %s's pod: %s ", pod.Namespace, pod.Name)
}
}
}
}
if err != nil {
return "", err
}
if len(filteredNodes) == 0 {
return "", &FitError{
Pod: pod,
NumAllNodes: len(nodes),
FailedPredicates: failedPredicateMap,
}
}
metrics.SchedulingAlgorithmPredicateEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPredicateEvalTime))
metrics.SchedulingLatency.WithLabelValues(metrics.PredicateEvaluation).Observe(metrics.SinceInSeconds(startPredicateEvalTime))
trace.Step("Prioritizing")
startPriorityEvalTime := time.Now()
// When only one node after predicate, just use it.
if len(filteredNodes) == 1 {
metrics.SchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime))
return filteredNodes[0].Name, nil
}
metaPrioritiesInterface := g.priorityMetaProducer(pod, g.cachedNodeInfoMap)
priorityList, err := PrioritizeNodes(pod, g.cachedNodeInfoMap, metaPrioritiesInterface, g.prioritizers, filteredNodes, g.extenders)
if err != nil {
return "", err
}
metrics.SchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime))
metrics.SchedulingLatency.WithLabelValues(metrics.PriorityEvaluation).Observe(metrics.SinceInSeconds(startPriorityEvalTime))
trace.Step("Selecting host")
return g.selectHost(priorityList)
}
var CAPACITY = schedulercache.Resource{MilliCPU: 384 * 1000, Memory: 1152 * GI, ScalarResources: map[v1.ResourceName]int64{schedulercache.NvidiaGPU: 12}}
func advancedSchedule(pod *v1.Pod,
nodeNameToInfo map[string]*schedulercache.NodeInfo,
nodes []*v1.Node) (bool, bool) {
isAdmit := true
isSwitch := false
uID := -1
for u := 0; u < len(namespaces); u++ {
if namespaces[u] == pod.Namespace {
uID = u
}
}
if uID < 0 {
return true, false
}
//tanle: DRF or FDRF allocation
// glog.Infof("findNodesThatFit ")
// namespaces := GetAllNamespaces(nodeNameToInfo, nodes)
currentUsage := GetResourceUsageByNamespace(nodeNameToInfo, nodes, pod.Namespace)
podDemand := GetResourceRequest(pod)
// capacity := GetMaxResource(nodeNameToInfo, nodes)
n := len(namespaces)
shares := make([]*schedulercache.Resource, n)
switch schedulercache.SCHEDULER {
case schedulercache.ES:
shares = AllocateES(namespaces, CAPACITY)
case schedulercache.Static:
shares = AllocateStatic(namespaces, CAPACITY)
case schedulercache.NaiveDRF:
shares = AllocateNaiveDRF(namespaces, CAPACITY)
// case schedulercache.AlloX:
// shares = AllocateAlloX(namespaces, CAPACITY)
}
milliCPU, gpu, memInGi := schedulercache.GetSecondaryDemand(pod)
secDemand := &schedulercache.Resource{MilliCPU: milliCPU, ScalarResources: map[v1.ResourceName]int64{schedulercache.NvidiaGPU: gpu}, Memory: memInGi * GI}
isAdmit, isSwitch = Fit(podDemand, secDemand, currentUsage, shares[uID])
if !isAdmit {
// glog.Infof("shares %s", shares)
// glog.Infof("Rejected pod: %s %s primeDemand %s secDemand %s usage %s ", pod.Namespace, pod.Name, podDemand, secDemand, currentUsage)
return isAdmit, isSwitch
}
// glog.Infof("admit pod: %s %s primeDemand %s secDemand %s usage %s ", pod.Namespace, pod.Name, podDemand, secDemand, currentUsage)
return isAdmit, isSwitch
}
// Fit fits the demand to the share.
func Fit(priDemand *schedulercache.Resource, secDemand *schedulercache.Resource, currentUsage *schedulercache.Resource, share *schedulercache.Resource) (bool, bool) {
isFit := true
isSwitch := false
if priDemand.Memory+currentUsage.Memory > share.Memory {
// glog.Infof("demand.Memory: %d, currentUsage.Memory %d, share.Memory %d", demand.Memory, currentUsage.Memory, share.Memory)
return false, isSwitch
}
// for GPU jobs
if priDemand.ScalarResources[schedulercache.NvidiaGPU]+currentUsage.ScalarResources[schedulercache.NvidiaGPU] > share.ScalarResources[schedulercache.NvidiaGPU] {
if secDemand.MilliCPU+currentUsage.MilliCPU > share.MilliCPU {
isFit = false
} else {
glog.Infof("enough resource for this GPU job on CPU priDemand %s secDemand %s", priDemand, secDemand)
isSwitch = true
}
}
// for CPU jobs
if (priDemand.ScalarResources[schedulercache.NvidiaGPU] == 0) && (priDemand.MilliCPU+currentUsage.MilliCPU > share.MilliCPU) {
if secDemand.ScalarResources[schedulercache.NvidiaGPU]+currentUsage.ScalarResources[schedulercache.NvidiaGPU] > share.ScalarResources[schedulercache.NvidiaGPU] {
isFit = false
} else {
glog.Infof("enough resource this CPU job on GPU for priDemand %s secDemand %s", priDemand, secDemand)
isSwitch = true
}
}
return isFit, isSwitch
}
// AllocateNaiveDRF is implemented for DRF with demand (cpu, mem, gpu)
func AllocateNaiveDRF(namespaces []string, capacity schedulercache.Resource) []*schedulercache.Resource {
const N_RES_DIM = 3
n := len(namespaces)
// capacityArray := [3]float64{capacity.MilliCPU, capacity.Memory, capacity.NvidiaGPU}
maxDemand := make([]float64, n)
dorminantRates := make([]float64, N_RES_DIM)
demands := make([]Demand, n)
for i := 0; i < n; i++ {
demands[i] = *getTraditionalDemand(namespaces[i])
}
// glog.Infof("capacity: %s", capacity)
// glog.Infof("demands: %s", demands)
// convert demand based on beta
for i := 0; i < n; i++ {
normalizedDemand := [3]float64{0, 0, 0}
normalizedDemand[0] = float64(demands[i].cpu) / float64(capacity.MilliCPU)
normalizedDemand[1] = float64(demands[i].mem) / float64(capacity.Memory)
normalizedDemand[2] = float64(demands[i].gpu) / float64(capacity.ScalarResources[schedulercache.NvidiaGPU])
// get the dorminant rate = max demand / capacity
maxDemand[i] = 0.0
for r := 0; r < N_RES_DIM; r++ {
if maxDemand[i] < normalizedDemand[r] {
maxDemand[i] = normalizedDemand[r]
}
}
for r := 0; r < N_RES_DIM; r++ {
dorminantRates[r] += normalizedDemand[r] / maxDemand[i]
}
// glog.Infof("normalizedDemand: ", normalizedDemand)
}
// glog.Infof("dorminantRates: ", dorminantRates)
// get total doriminant share of all users
dorminantShare := 0.0
for r := 0; r < N_RES_DIM; r++ {
if dorminantShare < dorminantRates[r] {
dorminantShare = dorminantRates[r]
}
}
// compute the share for each user = capacity/dorminantRate * demand/dorminantDemand
shares := make([]*schedulercache.Resource, n)
for i := 0; i < n; i++ {
ratio := dorminantShare * maxDemand[i]
shares[i] = &schedulercache.Resource{}
shares[i].MilliCPU = int64(demands[i].cpu / ratio)
shares[i].Memory = int64(demands[i].mem / ratio)
shares[i].ScalarResources[schedulercache.NvidiaGPU] = int64(Round(demands[i].gpu/ratio, 0.5, 4))
}
return shares
}
//Round: add round functions // tanle
func Round(val float64, roundOn float64, places int) float64 {
pow := math.Pow(10, float64(places))
digit := pow * val
_, div := math.Modf(digit)
var round float64
if val > 0 {
if div >= roundOn {
round = math.Ceil(digit)
} else {
round = math.Floor(digit)
}
} else {
if div >= roundOn {
round = math.Floor(digit)
} else {
round = math.Ceil(digit)
}
}
return round / pow
}
type IndexSorter struct {
Target []float64
Indices []int
}
func NewSorter(t []float64) IndexSorter {
iv := make([]int, len(t))
for i := range iv {
iv[i] = i
}
return IndexSorter{Target: t, Indices: iv}
}
func (s IndexSorter) Len() int { return len(s.Target) }
func (s IndexSorter) Less(i, j int) bool { return s.Target[i] < s.Target[j] }
func (s IndexSorter) Swap(i, j int) {
s.Target[i], s.Target[j] = s.Target[j], s.Target[i]
s.Indices[i], s.Indices[j] = s.Indices[j], s.Indices[i]
}
// AllocateAlloX pricing algorithm
func AllocateAlloX(namespaces []string, capacity schedulercache.Resource) []*schedulercache.Resource {
nResource := 3
n := len(namespaces)
shares := make([]*schedulercache.Resource, n)
// step 1: sort users based on beta (ascending)
betas := make([]float64, n)
demands := make([]Demand, n)
for i := 0; i < n; i++ {
demands[i] = *getDemand(namespaces[i])
betas[i] = demands[i].beta
}
// sort.Float64s(betas)
s := NewSorter(betas)
sort.Sort(s)
betas = s.Target
sortedIds := s.Indices
sortedDemands := make([]Demand, n)
for i := 0; i < n; i++ {
sortedDemands[i] = demands[sortedIds[i]] // sort demands according to betas
}
// step 2: initialization
price := make([]float64, nResource)
price[0] = 1 // for cpu
price[1] = betas[n-1] // for GPU
useralloc := UserAlloc(betas, price)
prevUserAlloc := useralloc
currLoad := sumResourceNorm(useralloc) // normalized load
gpumin := n - 1
flag := true
if n == 0 {
return shares
} else if n == 1 {
useralloc[0].cpu = 1.0
useralloc[0].mem = 1.0
useralloc[0].gpu = 1.0
}
// step 3: pricing
for flag {
if currLoad.cpu <= currLoad.gpu {
prevLoad := sumResourceNorm(prevUserAlloc)
useralloc = prevUserAlloc
useralloc[gpumin].cpu = prevUserAlloc[gpumin].cpu + (prevLoad.gpu-prevLoad.cpu)*float64(CAPACITY.MilliCPU)/2
useralloc[gpumin].gpu = prevUserAlloc[gpumin].gpu - (prevLoad.gpu-prevLoad.cpu)*float64(CAPACITY.ScalarResources[schedulercache.NvidiaGPU])/2
break
}
gpumin = gpumin - 1
if gpumin < 0 {
print("###gpumin is negative####")
break
}
price[0] = 1
price[1] = betas[gpumin]
prevUserAlloc = useralloc
useralloc = UserAlloc(betas, price)
currLoad = sumResourceNorm(useralloc)
}
sumAlloc := sumResource(useralloc)
//
for i := 0; i < n; i++ {
demand := demands[i]
shares[sortedIds[i]] = &schedulercache.Resource{}
shares[sortedIds[i]].MilliCPU = int64(useralloc[i].cpu * float64(capacity.MilliCPU) / sumAlloc.cpu)
gpu := Round(useralloc[i].gpu*float64(capacity.ScalarResources[schedulercache.NvidiaGPU])/sumAlloc.gpu, 0.5, 0)
shares[sortedIds[i]].ScalarResources[schedulercache.NvidiaGPU] = int64(gpu)
mem := (float64(shares[sortedIds[i]].MilliCPU) / demand.cpu * demand.mem) + (float64(shares[sortedIds[i]].ScalarResources[schedulercache.NvidiaGPU]) / demand.gpu * demand.gpu_mem)
roundGi := Round(mem/GI, 0.5, 0)
shares[sortedIds[i]].Memory = int64(roundGi * GI)
}
return shares
}
func sumResource(resources []*Demand) *Demand {
result := &Demand{0, 0, 0, 0, 0}
for _, res := range resources {
result.cpu = result.cpu + res.cpu
result.mem = result.mem + res.mem
result.gpu = result.gpu + res.gpu
}
return result
}
func sumResourceNorm(resources []*Demand) *Demand {
result := &Demand{0, 0, 0, 0, 0}
for _, res := range resources {
result.cpu = result.cpu + res.cpu
result.mem = result.mem + res.mem
result.gpu = result.gpu + res.gpu
}
result.cpu = result.cpu / (float64)(CAPACITY.MilliCPU)
result.mem = result.mem / (float64)(CAPACITY.Memory)
result.gpu = result.gpu / (float64)(CAPACITY.ScalarResources[schedulercache.NvidiaGPU])
return result
}
func UserAlloc(betas []float64, currentPrices []float64) []*Demand {
n := len(betas)
userAlloc := make([]*Demand, n)
for j := 0; j < n; j++ {
beta := betas[j]
alloc := &Demand{0, 0, 0, 0, 0}
if beta < currentPrices[1] {
alloc.cpu = 1
alloc.gpu = 0
} else // if beta = price, put it in GPU.
{
alloc.cpu = 0
alloc.gpu = 1 / currentPrices[1]
}
userAlloc[j] = alloc
}
return userAlloc
}
// Prioritizers returns a slice containing all the scheduler's priority
// functions and their config. It is exposed for testing only.
func (g *genericScheduler) Prioritizers() []algorithm.PriorityConfig {
return g.prioritizers
}
// Predicates returns a map containing all the scheduler's predicate
// functions. It is exposed for testing only.
func (g *genericScheduler) Predicates() map[string]algorithm.FitPredicate {
return g.predicates
}
// findMaxScores returns the indexes of nodes in the "priorityList" that has the highest "Score".
func findMaxScores(priorityList schedulerapi.HostPriorityList) []int {
maxScoreIndexes := make([]int, 0, len(priorityList)/2)
maxScore := priorityList[0].Score
for i, hp := range priorityList {
if hp.Score > maxScore {
maxScore = hp.Score
maxScoreIndexes = maxScoreIndexes[:0]
maxScoreIndexes = append(maxScoreIndexes, i)
} else if hp.Score == maxScore {
maxScoreIndexes = append(maxScoreIndexes, i)
}
}
return maxScoreIndexes
}
// selectHost takes a prioritized list of nodes and then picks one
// in a round-robin manner from the nodes that had the highest score.
func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList) (string, error) {
if len(priorityList) == 0 {
return "", fmt.Errorf("empty priorityList")
}
maxScores := findMaxScores(priorityList)
ix := int(g.lastNodeIndex % uint64(len(maxScores)))
g.lastNodeIndex++
return priorityList[maxScores[ix]].Host, nil
}
// preempt finds nodes with pods that can be preempted to make room for "pod" to
// schedule. It chooses one of the nodes and preempts the pods on the node and
// returns 1) the node, 2) the list of preempted pods if such a node is found,
// 3) A list of pods whose nominated node name should be cleared, and 4) any
// possible error.
func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, scheduleErr error) (*v1.Node, []*v1.Pod, []*v1.Pod, error) {
// Scheduler may return various types of errors. Consider preemption only if
// the error is of type FitError.
fitError, ok := scheduleErr.(*FitError)
if !ok || fitError == nil {
return nil, nil, nil, nil
}
err := g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap)
nodes, _ := nodeLister.List()
schedulercache.SynClusterInfo(g.cachedNodeInfoMap, nodes) // tanle syn cluster info manually
if err != nil {
return nil, nil, nil, err
}
if !podEligibleToPreemptOthers(pod, g.cachedNodeInfoMap) {
glog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
return nil, nil, nil, nil
}
allNodes, err := nodeLister.List()
if err != nil {
return nil, nil, nil, err
}
if len(allNodes) == 0 {
return nil, nil, nil, ErrNoNodesAvailable
}
potentialNodes := nodesWherePreemptionMightHelp(allNodes, fitError.FailedPredicates)
if len(potentialNodes) == 0 {
glog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name)
// In this case, we should clean-up any existing nominated node name of the pod.
return nil, nil, []*v1.Pod{pod}, nil
}
pdbs, err := g.cache.ListPDBs(labels.Everything())
if err != nil {
return nil, nil, nil, err
}
nodeToVictims, err := selectNodesForPreemption(pod, g.cachedNodeInfoMap, potentialNodes, g.predicates,
g.predicateMetaProducer, g.schedulingQueue, pdbs)
if err != nil {
return nil, nil, nil, err
}
// We will only check nodeToVictims with extenders that support preemption.
// Extenders which do not support preemption may later prevent preemptor from being scheduled on the nominated
// node. In that case, scheduler will find a different host for the preemptor in subsequent scheduling cycles.
nodeToVictims, err = g.processPreemptionWithExtenders(pod, nodeToVictims)
if err != nil {
return nil, nil, nil, err
}
candidateNode := pickOneNodeForPreemption(nodeToVictims)
if candidateNode == nil {
return nil, nil, nil, err
}
// Lower priority pods nominated to run on this node, may no longer fit on
// this node. So, we should remove their nomination. Removing their
// nomination updates these pods and moves them to the active queue. It
// lets scheduler find another place for them.
nominatedPods := g.getLowerPriorityNominatedPods(pod, candidateNode.Name)
if nodeInfo, ok := g.cachedNodeInfoMap[candidateNode.Name]; ok {
return nodeInfo.Node(), nodeToVictims[candidateNode].Pods, nominatedPods, err
}
return nil, nil, nil, fmt.Errorf(
"preemption failed: the target node %s has been deleted from scheduler cache",
candidateNode.Name)
}
// processPreemptionWithExtenders processes preemption with extenders
func (g *genericScheduler) processPreemptionWithExtenders(
pod *v1.Pod,
nodeToVictims map[*v1.Node]*schedulerapi.Victims,
) (map[*v1.Node]*schedulerapi.Victims, error) {
if len(nodeToVictims) > 0 {
for _, extender := range g.extenders {
if extender.SupportsPreemption() && extender.IsInterested(pod) {
newNodeToVictims, err := extender.ProcessPreemption(
pod,
nodeToVictims,
g.cachedNodeInfoMap,
)
if err != nil {
if extender.IsIgnorable() {
glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
extender, err)
continue
}
return nil, err
}
// Replace nodeToVictims with new result after preemption. So the
// rest of extenders can continue use it as parameter.
nodeToVictims = newNodeToVictims
// If node list becomes empty, no preemption can happen regardless of other extenders.
if len(nodeToVictims) == 0 {
break
}
}
}
}
return nodeToVictims, nil
}
// getLowerPriorityNominatedPods returns pods whose priority is smaller than the
// priority of the given "pod" and are nominated to run on the given node.
// Note: We could possibly check if the nominated lower priority pods still fit
// and return those that no longer fit, but that would require lots of
// manipulation of NodeInfo and PredicateMeta per nominated pod. It may not be
// worth the complexity, especially because we generally expect to have a very
// small number of nominated pods per node.
func (g *genericScheduler) getLowerPriorityNominatedPods(pod *v1.Pod, nodeName string) []*v1.Pod {
pods := g.schedulingQueue.WaitingPodsForNode(nodeName)
if len(pods) == 0 {
return nil
}
var lowerPriorityPods []*v1.Pod
podPriority := util.GetPodPriority(pod)
for _, p := range pods {
if util.GetPodPriority(p) < podPriority {
lowerPriorityPods = append(lowerPriorityPods, p)
}
}
return lowerPriorityPods
}
// numFeasibleNodesToFind returns the number of feasible nodes that once found, the scheduler stops
// its search for more feasible nodes.
func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) int32 {
if numAllNodes < minFeasibleNodesToFind || g.percentageOfNodesToScore <= 0 ||
g.percentageOfNodesToScore >= 100 {
return numAllNodes
}
numNodes := numAllNodes * g.percentageOfNodesToScore / 100
if numNodes < minFeasibleNodesToFind {
return minFeasibleNodesToFind
}
return numNodes
}
// Filters the nodes to find the ones that fit based on the given predicate functions
// Each node is passed through the predicate functions to determine if it is a fit
func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, FailedPredicateMap, error) {
var filtered []*v1.Node
failedPredicateMap := FailedPredicateMap{}
if len(g.predicates) == 0 {
filtered = nodes
} else {
allNodes := int32(g.cache.NodeTree().NumNodes)
numNodesToFind := g.numFeasibleNodesToFind(allNodes)
// Create filtered list with enough space to avoid growing it
// and allow assigning.
filtered = make([]*v1.Node, numNodesToFind)
errs := errors.MessageCountMap{}
var (
predicateResultLock sync.Mutex
filteredLen int32
equivClass *equivalence.Class
)
ctx, cancel := context.WithCancel(context.Background())
// We can use the same metadata producer for all nodes.
meta := g.predicateMetaProducer(pod, g.cachedNodeInfoMap)
if g.equivalenceCache != nil {
// getEquivalenceClassInfo will return immediately if no equivalence pod found
equivClass = equivalence.NewClass(pod)
}
checkNode := func(i int) {
var nodeCache *equivalence.NodeCache
nodeName := g.cache.NodeTree().Next()
if g.equivalenceCache != nil {
nodeCache, _ = g.equivalenceCache.GetNodeCache(nodeName)
}
fits, failedPredicates, err := podFitsOnNode(
pod,
meta,
g.cachedNodeInfoMap[nodeName],
g.predicates,
g.cache,
nodeCache,
g.schedulingQueue,
g.alwaysCheckAllPredicates,
equivClass,
)
if err != nil {
predicateResultLock.Lock()
errs[err.Error()]++
predicateResultLock.Unlock()
return
}
if fits {
length := atomic.AddInt32(&filteredLen, 1)
if length > numNodesToFind {
cancel()
atomic.AddInt32(&filteredLen, -1)
} else {
filtered[length-1] = g.cachedNodeInfoMap[nodeName].Node()
}
} else {
predicateResultLock.Lock()
failedPredicateMap[nodeName] = failedPredicates
predicateResultLock.Unlock()
}
}
// Stops searching for more nodes once the configured number of feasible nodes
// are found.
workqueue.ParallelizeUntil(ctx, 16, int(allNodes), checkNode)
filtered = filtered[:filteredLen]
if len(errs) > 0 {
return []*v1.Node{}, FailedPredicateMap{}, errors.CreateAggregateFromMessageCountMap(errs)
}
}
if len(filtered) > 0 && len(g.extenders) != 0 {
for _, extender := range g.extenders {
if !extender.IsInterested(pod) {
continue
}
filteredList, failedMap, err := extender.Filter(pod, filtered, g.cachedNodeInfoMap)
if err != nil {
if extender.IsIgnorable() {
glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
extender, err)
continue
} else {
return []*v1.Node{}, FailedPredicateMap{}, err
}
}
for failedNodeName, failedMsg := range failedMap {
if _, found := failedPredicateMap[failedNodeName]; !found {
failedPredicateMap[failedNodeName] = []algorithm.PredicateFailureReason{}
}
failedPredicateMap[failedNodeName] = append(failedPredicateMap[failedNodeName], predicates.NewFailureReason(failedMsg))
}
filtered = filteredList
if len(filtered) == 0 {
break
}
}
}
return filtered, failedPredicateMap, nil
}
// addNominatedPods adds pods with equal or greater priority which are nominated
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
// any pod was found, 2) augmented meta data, 3) augmented nodeInfo.
func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo, queue SchedulingQueue) (bool, algorithm.PredicateMetadata,
*schedulercache.NodeInfo) {
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests.
return false, meta, nodeInfo
}
nominatedPods := queue.WaitingPodsForNode(nodeInfo.Node().Name)
if nominatedPods == nil || len(nominatedPods) == 0 {
return false, meta, nodeInfo
}
var metaOut algorithm.PredicateMetadata
if meta != nil {
metaOut = meta.ShallowCopy()
}
nodeInfoOut := nodeInfo.Clone()
for _, p := range nominatedPods {
if util.GetPodPriority(p) >= podPriority {
nodeInfoOut.AddPod(p)
if metaOut != nil {
metaOut.AddPod(p, nodeInfoOut)
}
}
}
return true, metaOut, nodeInfoOut
}
// podFitsOnNode checks whether a node given by NodeInfo satisfies the given predicate functions.
// For given pod, podFitsOnNode will check if any equivalent pod exists and try to reuse its cached
// predicate results as possible.
// This function is called from two different places: Schedule and Preempt.
// When it is called from Schedule, we want to test whether the pod is schedulable
// on the node with all the existing pods on the node plus higher and equal priority
// pods nominated to run on the node.
// When it is called from Preempt, we should remove the victims of preemption and
// add the nominated pods. Removal of the victims is done by SelectVictimsOnNode().
// It removes victims from meta and NodeInfo before calling this function.
func podFitsOnNode(
pod *v1.Pod,
meta algorithm.PredicateMetadata,
info *schedulercache.NodeInfo,
predicateFuncs map[string]algorithm.FitPredicate,
cache schedulercache.Cache,
nodeCache *equivalence.NodeCache,
queue SchedulingQueue,
alwaysCheckAllPredicates bool,
equivClass *equivalence.Class,
) (bool, []algorithm.PredicateFailureReason, error) {
var (
eCacheAvailable bool
failedPredicates []algorithm.PredicateFailureReason
)
podsAdded := false
// We run predicates twice in some cases. If the node has greater or equal priority
// nominated pods, we run them when those pods are added to meta and nodeInfo.
// If all predicates succeed in this pass, we run them again when these
// nominated pods are not added. This second pass is necessary because some
// predicates such as inter-pod affinity may not pass without the nominated pods.
// If there are no nominated pods for the node or if the first run of the
// predicates fail, we don't run the second pass.
// We consider only equal or higher priority pods in the first pass, because
// those are the current "pod" must yield to them and not take a space opened
// for running them. It is ok if the current "pod" take resources freed for
// lower priority pods.
// Requiring that the new pod is schedulable in both circumstances ensures that
// we are making a conservative decision: predicates like resources and inter-pod
// anti-affinity are more likely to fail when the nominated pods are treated
// as running, while predicates like pod affinity are more likely to fail when
// the nominated pods are treated as not running. We can't just assume the
// nominated pods are running because they are not running right now and in fact,
// they may end up getting scheduled to a different node.
for i := 0; i < 2; i++ {
metaToUse := meta
nodeInfoToUse := info
if i == 0 {
podsAdded, metaToUse, nodeInfoToUse = addNominatedPods(util.GetPodPriority(pod), meta, info, queue)
} else if !podsAdded || len(failedPredicates) != 0 {
break
}
// Bypass eCache if node has any nominated pods.
// TODO(bsalamat): consider using eCache and adding proper eCache invalidations
// when pods are nominated or their nominations change.
eCacheAvailable = equivClass != nil && nodeCache != nil && !podsAdded
for _, predicateKey := range predicates.Ordering() {
var (
fit bool
reasons []algorithm.PredicateFailureReason
err error
)
//TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric
if predicate, exist := predicateFuncs[predicateKey]; exist {
if eCacheAvailable {
fit, reasons, err = nodeCache.RunPredicate(predicate, predicateKey, pod, metaToUse, nodeInfoToUse, equivClass, cache)
} else {
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
}
if err != nil {
return false, []algorithm.PredicateFailureReason{}, err
}
if !fit {
// eCache is available and valid, and predicates result is unfit, record the fail reasons
failedPredicates = append(failedPredicates, reasons...)
// if alwaysCheckAllPredicates is false, short circuit all predicates when one predicate fails.
if !alwaysCheckAllPredicates {
glog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " +
"evaluation is short circuited and there are chances " +
"of other predicates failing as well.")
break
}
}
}
}
}
return len(failedPredicates) == 0, failedPredicates, nil
}
// PrioritizeNodes prioritizes the nodes by running the individual priority functions in parallel.
// Each priority function is expected to set a score of 0-10
// 0 is the lowest priority score (least preferred node) and 10 is the highest
// Each priority function can also have its own weight
// The node scores returned by the priority function are multiplied by the weights to get weighted scores
// All scores are finally combined (added) to get the total weighted scores of all nodes
func PrioritizeNodes(
pod *v1.Pod,
nodeNameToInfo map[string]*schedulercache.NodeInfo,
meta interface{},
priorityConfigs []algorithm.PriorityConfig,
nodes []*v1.Node,
extenders []algorithm.SchedulerExtender,
) (schedulerapi.HostPriorityList, error) {
// If no priority configs are provided, then the EqualPriority function is applied
// This is required to generate the priority list in the required format
if len(priorityConfigs) == 0 && len(extenders) == 0 {
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
for i := range nodes {
hostPriority, err := EqualPriorityMap(pod, meta, nodeNameToInfo[nodes[i].Name])
if err != nil {
return nil, err
}
result = append(result, hostPriority)
}
return result, nil
}
var (
mu = sync.Mutex{}
wg = sync.WaitGroup{}
errs []error
)
appendError := func(err error) {
mu.Lock()
defer mu.Unlock()
errs = append(errs, err)
}
results := make([]schedulerapi.HostPriorityList, len(priorityConfigs), len(priorityConfigs))
for i, priorityConfig := range priorityConfigs {
if priorityConfig.Function != nil {
// DEPRECATED
wg.Add(1)
go func(index int, config algorithm.PriorityConfig) {
defer wg.Done()
var err error
results[index], err = config.Function(pod, nodeNameToInfo, nodes)
if err != nil {
appendError(err)
}
}(i, priorityConfig)
} else {
results[i] = make(schedulerapi.HostPriorityList, len(nodes))
}
}
processNode := func(index int) {
nodeInfo := nodeNameToInfo[nodes[index].Name]
var err error
for i := range priorityConfigs {
if priorityConfigs[i].Function != nil {
continue
}
results[i][index], err = priorityConfigs[i].Map(pod, meta, nodeInfo)
if err != nil |
}
}
workqueue.Parallelize(16, len(nodes), processNode)
for i, priorityConfig := range priorityConfigs {
if priorityConfig.Reduce == nil {
continue
}
wg.Add(1)
go func(index int, config algorithm.PriorityConfig) {
defer wg.Done()
if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
appendError(err)
}
if glog.V(10) {
for _, hostPriority := range results[index] {
glog.Infof("%v -> %v: %v, Score: (%d)", pod.Name, hostPriority.Host, config.Name, hostPriority.Score)
}
}
}(i, priorityConfig)
}
// Wait for all computations to be finished.
wg.Wait()
if len(errs) != 0 {
return schedulerapi.HostPriorityList{}, errors.NewAggregate(errs)
}
// Summarize all scores.
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
for i := range nodes {
result = append(result, schedulerapi.HostPriority{Host: nodes[i].Name, Score: 0})
for j := range priorityConfigs {
result[i].Score += results[j][i].Score * priorityConfigs[j].Weight
}
}
if len(extenders) != 0 && nodes != nil {
combinedScores := make(map[string]int, len(nodeNameToInfo))
for _, extender := range extenders {
if !extender.IsInterested(pod) {
continue
}
wg.Add(1)
go func(ext algorithm.SchedulerExtender) {
defer wg.Done()
prioritizedList, weight, err := ext.Prioritize(pod, nodes)
if err != nil {
// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
return
}
mu.Lock()
for i := range *prioritizedList {
host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
combinedScores[host] += score * weight
}
mu.Unlock()
}(extender)
}
// wait for all go routines to finish
wg.Wait()
for i := range result {
result[i].Score += combinedScores[result[i].Host]
}
}
if glog.V(10) {
for i := range result {
glog.V(10).Infof("Host %s => Score %d", result[i].Host, result[i].Score)
}
}
return result, nil
}
// EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes
func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
}
return schedulerapi.HostPriority{
Host: node.Name,
Score: 1,
}, nil
}
// pickOneNodeForPreemption chooses one node among the given nodes. It assumes
// pods in each map entry are ordered by decreasing priority.
// It picks a node based on the following criteria:
// 1. A node with minimum number of PDB violations.
// 2. A node with minimum highest priority victim is picked.
// 3. Ties are broken by sum of priorities of all victims.
// 4. If there are still ties, node with the minimum number of victims is picked.
// 5. If there are still ties, the first such node is picked (sort of randomly).
// The 'minNodes1' and 'minNodes2' are being reused here to save the memory
// allocation and garbage collection time.
func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) *v1.Node {
if len(nodesToVictims) == 0 {
return nil
}
minNumPDBViolatingPods := math.MaxInt32
var minNodes1 []*v1.Node
lenNodes1 := 0
for node, victims := range nodesToVictims {
if len(victims.Pods) == 0 {
// We found a node that doesn't need any preemption. Return it!
// This should happen rarely when one or more pods are terminated between
// the time that scheduler tries to schedule the pod and the time that
// preemption logic tries to find nodes for preemption.
return node
}
numPDBViolatingPods := victims.NumPDBViolations
if numPDBViolatingPods < minNumPDBViolatingPods {
minNumPDBViolatingPods = numPDBViolatingPods
minNodes1 = nil
lenNodes1 = 0
}
if numPDBViolatingPods == minNumPDBViolatingPods {
minNodes1 = append(minNodes1, node)
lenNodes1++
}
}
if lenNodes1 == 1 {
return minNodes1[0]
}
// There are more than one node with minimum number PDB violating pods. Find
// the one with minimum highest priority victim.
minHighestPriority := int32(math.MaxInt32)
var minNodes2 = make([]*v1.Node, lenNodes1)
lenNodes2 := 0
for i := 0; i < lenNodes1; i++ {
node := minNodes1[i]
victims := nodesToVictims[node]
// highestPodPriority is the highest priority among the victims on this node.
highestPodPriority := util.GetPodPriority(victims.Pods[0])
if highestPodPriority < minHighestPriority {
minHighestPriority = highestPodPriority
lenNodes2 = 0
}
if highestPodPriority == minHighestPriority {
minNodes2[lenNodes2] = node
lenNodes2++
}
}
if lenNodes2 == 1 {
return minNodes2[0]
}
// There are a few nodes with minimum highest priority victim. Find the
// smallest sum of priorities.
minSumPriorities := int64(math.MaxInt64)
lenNodes1 = 0
for i := 0; i < lenNodes2; i++ {
var sumPriorities int64
node := minNodes2[i]
for _, pod := range nodesToVictims[node].Pods {
// We add MaxInt32+1 to all priorities to make all of them >= 0. This is
// needed so that a node with a few pods with negative priority is not
// picked over a node with a smaller number of pods with the same negative
// priority (and similar scenarios).
sumPriorities += int64(util.GetPodPriority(pod)) + int64(math.MaxInt32+1)
}
if sumPriorities < minSumPriorities {
minSumPriorities = sumPriorities
lenNodes1 = 0
}
if sumPriorities == minSumPriorities {
minNodes1[lenNodes1] = node
lenNodes1++
}
}
if lenNodes1 == 1 {
return minNodes1[0]
}
// There are a few nodes with minimum highest priority victim and sum of priorities.
// Find one with the minimum number of pods.
minNumPods := math.MaxInt32
lenNodes2 = 0
for i := 0; i < lenNodes1; i++ {
node := minNodes1[i]
numPods := len(nodesToVictims[node].Pods)
if numPods < minNumPods {
minNumPods = numPods
lenNodes2 = 0
}
if numPods == minNumPods {
minNodes2[lenNodes2] = node
lenNodes2++
}
}
// At this point, even if there are more than one node with the same score,
// return the first one.
if lenNodes2 > 0 {
return minNodes2[0]
}
glog.Errorf("Error in logic of node scoring for preemption. We should never reach here!")
return nil
}
// selectNodesForPreemption finds all the nodes with possible victims for
// preemption in parallel.
func selectNodesForPreemption(pod *v1.Pod,
nodeNameToInfo map[string]*schedulercache.NodeInfo,
potentialNodes []*v1.Node,
predicates map[string]algorithm.FitPredicate,
metadataProducer algorithm.PredicateMetadataProducer,
queue SchedulingQueue,
pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*schedulerapi.Victims, error) {
nodeToVictims := map[*v1.Node]*schedulerapi.Victims{}
var resultLock sync.Mutex
// We can use the same metadata producer for all nodes.
meta := metadataProducer(pod, nodeNameToInfo)
checkNode := func(i int) {
nodeName := potentialNodes[i].Name
var metaCopy algorithm.PredicateMetadata
if meta != nil {
metaCopy = meta.ShallowCopy()
}
pods, numPDBViolations, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], predicates, queue, pdbs)
if fits {
resultLock.Lock()
victims := schedulerapi.Victims{
Pods: pods,
NumPDBViolations: numPDBViolations,
}
nodeToVictims[potentialNodes[i]] = &victims
resultLock.Unlock()
}
}
workqueue.Parallelize(16, len(potentialNodes), checkNode)
return nodeToVictims, nil
}
// filterPodsWithPDBViolation groups the given "pods" into two groups of "violatingPods"
// and "nonViolatingPods" based on whether their PDBs will be violated if they are
// preempted.
// This function is stable and does not change the order of received pods. So, if it
// receives a sorted list, grouping will preserve the order of the input list.
func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruptionBudget) (violatingPods, nonViolatingPods []*v1.Pod) {
for _, obj := range pods {
pod := obj.(*v1.Pod)
pdbForPodIsViolated := false
// A pod with no labels will not match any PDB. So, no need to check.
if len(pod.Labels) != 0 {
for _, pdb := range pdbs {
if pdb.Namespace != pod.Namespace {
continue
}
selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
if err != nil {
continue
}
// A PDB with a nil or empty selector matches nothing.
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
continue
}
// We have found a matching PDB.
if pdb.Status.PodDisruptionsAllowed <= 0 {
pdbForPodIsViolated = true
break
}
}
}
if pdbForPodIsViolated {
violatingPods = append(violatingPods, pod)
} else {
nonViolatingPods = append(nonViolatingPods, pod)
}
}
return violatingPods, nonViolatingPods
}
// selectVictimsOnNode finds minimum set of pods on the given node that should
// be preempted in order to make enough room for "pod" to be scheduled. The
// minimum set selected is subject to the constraint that a higher-priority pod
// is never preempted when a lower-priority pod could be (higher/lower relative
// to one another, not relative to the preemptor "pod").
// The algorithm first checks if the pod can be scheduled on the node when all the
// lower priority pods are gone. If so, it sorts all the lower priority pods by
// their priority and then puts them into two groups of those whose PodDisruptionBudget
// will be violated if preempted and other non-violating pods. Both groups are
// sorted by priority. It first tries to reprieve as many PDB violating pods as
// possible and then does them same for non-PDB-violating pods while checking
// that the "pod" can still fit on the node.
// NOTE: This function assumes that it is never called if "pod" cannot be scheduled
// due to pod affinity, node affinity, or node anti-affinity reasons. None of
// these predicates can be satisfied by removing more pods from the node.
func selectVictimsOnNode(
pod *v1.Pod,
meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo,
fitPredicates map[string]algorithm.FitPredicate,
queue SchedulingQueue,
pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) {
potentialVictims := util.SortableList{CompFunc: util.HigherPriorityPod}
nodeInfoCopy := nodeInfo.Clone()
removePod := func(rp *v1.Pod) {
nodeInfoCopy.RemovePod(rp)
if meta != nil {
meta.RemovePod(rp)
}
}
addPod := func(ap *v1.Pod) {
nodeInfoCopy.AddPod(ap)
if meta != nil {
meta.AddPod(ap, nodeInfoCopy)
}
}
// As the first step, remove all the lower priority pods from the node and
// check if the given pod can be scheduled.
podPriority := util.GetPodPriority(pod)
for _, p := range nodeInfoCopy.Pods() {
if util.GetPodPriority(p) < podPriority {
potentialVictims.Items = append(potentialVictims.Items, p)
removePod(p)
}
}
potentialVictims.Sort()
// If the new pod does not fit after removing all the lower priority pods,
// we are almost done and this node is not suitable for preemption. The only condition
// that we should check is if the "pod" is failing to schedule due to pod affinity
// failure.
// TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance.
if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil); !fits {
if err != nil {
glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
}
return nil, 0, false
}
var victims []*v1.Pod
numViolatingVictim := 0
// Try to reprieve as many pods as possible. We first try to reprieve the PDB
// violating victims and then other non-violating ones. In both cases, we start
// from the highest priority victims.
violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs)
reprievePod := func(p *v1.Pod) bool {
addPod(p)
fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil)
if !fits {
removePod(p)
victims = append(victims, p)
glog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name)
}
return fits
}
for _, p := range violatingVictims {
if !reprievePod(p) {
numViolatingVictim++
}
}
// Now we try to reprieve non-violating victims.
for _, p := range nonViolatingVictims {
reprievePod(p)
}
return victims, numViolatingVictim, true
}
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
// that may be satisfied by removing pods from the node.
func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedPredicateMap) []*v1.Node {
potentialNodes := []*v1.Node{}
for _, node := range nodes {
unresolvableReasonExist := false
failedPredicates, found := failedPredicatesMap[node.Name]
// If we assume that scheduler looks at all nodes and populates the failedPredicateMap
// (which is the case today), the !found case should never happen, but we'd prefer
// to rely less on such assumptions in the code when checking does not impose
// significant overhead.
// Also, we currently assume all failures returned by extender as resolvable.
for _, failedPredicate := range failedPredicates {
switch failedPredicate {
case
predicates.ErrNodeSelectorNotMatch,
predicates.ErrPodAffinityRulesNotMatch,
predicates.ErrPodNotMatchHostName,
predicates.ErrTaintsTolerationsNotMatch,
predicates.ErrNodeLabelPresenceViolated,
// Node conditions won't change when scheduler simulates removal of preemption victims.
// So, it is pointless to try nodes that have not been able to host the pod due to node
// conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, ....
predicates.ErrNodeNotReady,
predicates.ErrNodeNetworkUnavailable,
predicates.ErrNodeUnderDiskPressure,
predicates.ErrNodeUnderPIDPressure,
predicates.ErrNodeUnderMemoryPressure,
predicates.ErrNodeOutOfDisk,
predicates.ErrNodeUnschedulable,
predicates.ErrNodeUnknownCondition,
predicates.ErrVolumeZoneConflict,
predicates.ErrVolumeNodeConflict,
predicates.ErrVolumeBindConflict:
unresolvableReasonExist = true
break
}
}
if !found || !unresolvableReasonExist {
glog.V(3).Infof("Node %v is a potential node for preemption.", node.Name)
potentialNodes = append(potentialNodes, node)
}
}
return potentialNodes
}
// podEligibleToPreemptOthers determines whether this pod should be considered
// for preempting other pods or not. If this pod has already preempted other
// pods and those are in their graceful termination period, it shouldn't be
// considered for preemption.
// We look at the node that is nominated for this pod and as long as there are
// terminating pods on the node, we don't consider this for preempting more pods.
func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) bool {
nomNodeName := pod.Status.NominatedNodeName
if len(nomNodeName) > 0 {
if nodeInfo, found := nodeNameToInfo[nomNodeName]; found {
for _, p := range nodeInfo.Pods() {
if p.DeletionTimestamp != nil && util.GetPodPriority(p) < util.GetPodPriority(pod) {
// There is a terminating pod on the nominated node.
return false
}
}
}
}
return true
}
// podPassesBasicChecks makes sanity checks on the pod if it can be scheduled.
func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeClaimLister) error {
// Check PVCs used by the pod
namespace := pod.Namespace
manifest := &(pod.Spec)
for i := range manifest.Volumes {
volume := &manifest.Volumes[i]
if volume.PersistentVolumeClaim == nil {
// Volume is not a PVC, ignore
continue
}
pvcName := volume.PersistentVolumeClaim.ClaimName
pvc, err := pvcLister.PersistentVolumeClaims(namespace).Get(pvcName)
if err != nil {
// The error has already enough context ("persistentvolumeclaim "myclaim" not found")
return err
}
if pvc.DeletionTimestamp != nil {
return fmt.Errorf("persistentvolumeclaim %q is being deleted", pvc.Name)
}
}
return nil
}
// NewGenericScheduler creates a genericScheduler object.
func NewGenericScheduler(
cache schedulercache.Cache,
eCache *equivalence.Cache,
podQueue SchedulingQueue,
predicates map[string]algorithm.FitPredicate,
predicateMetaProducer algorithm.PredicateMetadataProducer,
prioritizers []algorithm.PriorityConfig,
priorityMetaProducer algorithm.PriorityMetadataProducer,
extenders []algorithm.SchedulerExtender,
volumeBinder *volumebinder.VolumeBinder,
pvcLister corelisters.PersistentVolumeClaimLister,
alwaysCheckAllPredicates bool,
disablePreemption bool,
percentageOfNodesToScore int32,
client clientset.Interface) algorithm.ScheduleAlgorithm {
// numOfUsers := 3
glog.Infof("version 1.11: 2018.10.29 4:00 EUROSYS1.3")
// glog.Infof("Offline ENABLE_OFFLINE_SCHEDULER: %s", ENABLE_OFFLINE_SCHEDULER)
glog.Infof("ENABLE_PROFILING %v", schedulercache.ENABLE_PROFILING)
if schedulercache.ENABLE_ONLINE_SCHEDULER {
glog.Infof("============= IRA =============")
glog.Infof("IS_TEST: %v", schedulercache.IS_TEST)
glog.Infof("QUEUED_UP_JOBS: %v", schedulercache.QUEUED_UP_JOBS)
glog.Infof("NUM_USERS: %v", schedulercache.NUM_USERS)
glog.Infof("SCHEDULER %s", schedulercache.SCHEDULER)
glog.Infof("NUM_RESERVE_CPU_NODE %s", schedulercache.NUM_RESERVE_CPU_NODE)
glog.Infof("ENABLE_MOCKUP_GPU %s", schedulercache.ENABLE_MOCKUP_GPU)
if schedulercache.ENABLE_MOCKUP_GPU {
glog.Infof("NUM_MOCKUP_GPUS_PER_NODE: %d", schedulercache.NUM_MOCKUP_GPUS_PER_NODE)
}
schedulercache.SynClusterInfo(make(map[string]*schedulercache.NodeInfo), nil) // tanle syn cluster info manually
schedulercache.InitParameters()
}
// glog.Infof("CAPACITY %s", CAPACITY)
// glog.Infof("namespaces %s", namespaces)
// glog.Infof("numOfUsers %s", numOfUsers)
// schedulercache.InitAllNameSpaces(numOfUsers)
// n := len(namespaces)
// shares := make([]*schedulercache.Resource, n)
// switch SCHEDULER {
// case ES:
// shares = AllocateES(namespaces, CAPACITY)
// case Static:
// shares = AllocateStatic(namespaces, CAPACITY)
// case AlloX:
// shares = AllocateAlloX(namespaces, CAPACITY)
// case NaiveDRF:
// shares = AllocateNaiveDRF(namespaces, CAPACITY)
// }
// for _, username := range namespaces {
// glog.Infof("%s's demand %s", username, getDemand(username))
// glog.Infof("%s's traditional demand %s", username, getTraditionalDemand(username))
// }
// glog.Infof("shares %s", shares)
return &genericScheduler{
cache: cache,
equivalenceCache: eCache,
schedulingQueue: podQueue,
predicates: predicates,
predicateMetaProducer: predicateMetaProducer,
prioritizers: prioritizers,
priorityMetaProducer: priorityMetaProducer,
extenders: extenders,
cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo),
volumeBinder: volumeBinder,
pvcLister: pvcLister,
alwaysCheckAllPredicates: alwaysCheckAllPredicates,
disablePreemption: disablePreemption,
percentageOfNodesToScore: percentageOfNodesToScore,
client: client,
}
}
| {
appendError(err)
return
} |
ingress.go | package fake
import (
"fmt"
"github.com/devfile/library/pkg/devfile/generator"
applabels "github.com/redhat-developer/odo/pkg/application/labels"
componentlabels "github.com/redhat-developer/odo/pkg/component/labels"
"github.com/redhat-developer/odo/pkg/kclient/unions"
"github.com/redhat-developer/odo/pkg/url/labels"
"github.com/redhat-developer/odo/pkg/util"
"github.com/redhat-developer/odo/pkg/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func GetKubernetesIngressListWithMultiple(componentName, appName string, networkingV1Supported, extensionV1Supported bool) *unions.KubernetesIngressList {
kubernetesIngressList := unions.NewEmptyKubernetesIngressList()
kubernetesIngress1 := unions.NewKubernetesIngressFromParams(generator.IngressParams{
ObjectMeta: metav1.ObjectMeta{
Name: "example-0",
Labels: map[string]string{
applabels.ApplicationLabel: appName,
componentlabels.ComponentLabel: componentName,
applabels.ManagedBy: "odo",
applabels.ManagerVersion: version.VERSION,
labels.URLLabel: "example-0",
applabels.App: appName,
},
},
IngressSpecParams: generator.IngressSpecParams{
IngressDomain: "example-0.com",
ServiceName: "example-0",
PortNumber: intstr.FromInt(8080),
},
})
if !networkingV1Supported {
kubernetesIngress1.NetworkingV1Ingress = nil
}
if !extensionV1Supported {
kubernetesIngress1.ExtensionV1Beta1Ingress = nil
}
kubernetesIngressList.Items = append(kubernetesIngressList.Items, kubernetesIngress1)
kubernetesIngress2 := unions.NewKubernetesIngressFromParams(generator.IngressParams{
ObjectMeta: metav1.ObjectMeta{
Name: "example-1",
Labels: map[string]string{
applabels.ApplicationLabel: "app",
componentlabels.ComponentLabel: componentName,
applabels.ManagedBy: "odo",
applabels.ManagerVersion: version.VERSION,
labels.URLLabel: "example-1",
applabels.App: "app",
},
},
IngressSpecParams: generator.IngressSpecParams{
IngressDomain: "example-1.com",
ServiceName: "example-1",
PortNumber: intstr.FromInt(9090),
},
})
if !networkingV1Supported {
kubernetesIngress2.NetworkingV1Ingress = nil
}
if !extensionV1Supported {
kubernetesIngress2.ExtensionV1Beta1Ingress = nil
}
kubernetesIngressList.Items = append(kubernetesIngressList.Items, kubernetesIngress2)
return kubernetesIngressList
}
func | (urlName, componentName, appName string, networkingv1Supported, extensionv1Supported bool) *unions.KubernetesIngress {
kubernetesIngress := unions.NewKubernetesIngressFromParams(generator.IngressParams{
ObjectMeta: metav1.ObjectMeta{
Name: urlName,
Labels: map[string]string{
applabels.ApplicationLabel: appName,
componentlabels.ComponentLabel: componentName,
applabels.ManagedBy: "odo",
applabels.ManagerVersion: version.VERSION,
labels.URLLabel: urlName,
applabels.App: appName,
},
},
IngressSpecParams: generator.IngressSpecParams{
IngressDomain: fmt.Sprintf("%s.com", urlName),
ServiceName: urlName,
PortNumber: intstr.FromInt(8080),
},
})
if !networkingv1Supported {
kubernetesIngress.NetworkingV1Ingress = nil
}
if !extensionv1Supported {
kubernetesIngress.ExtensionV1Beta1Ingress = nil
}
return kubernetesIngress
}
// GetSingleSecureKubernetesIngress gets a single secure ingress with the given secret name
// if no secret name is provided, the default one is used
func GetSingleSecureKubernetesIngress(urlName, componentName, appName, secretName string, networkingV1Supported, extensionV1Supported bool) *unions.KubernetesIngress {
if secretName == "" {
suffix := util.GetAdler32Value(urlName + appName + componentName)
secretName = urlName + "-" + suffix + "-tls"
}
kubernetesIngress := unions.NewKubernetesIngressFromParams(generator.IngressParams{
ObjectMeta: metav1.ObjectMeta{
Name: urlName,
Labels: map[string]string{
applabels.ApplicationLabel: appName,
componentlabels.ComponentLabel: componentName,
applabels.ManagedBy: "odo",
applabels.ManagerVersion: version.VERSION,
labels.URLLabel: urlName,
applabels.App: appName,
},
},
IngressSpecParams: generator.IngressSpecParams{
TLSSecretName: secretName,
IngressDomain: fmt.Sprintf("%s.com", urlName),
ServiceName: urlName,
PortNumber: intstr.FromInt(8080),
},
})
if !networkingV1Supported {
kubernetesIngress.NetworkingV1Ingress = nil
}
if !extensionV1Supported {
kubernetesIngress.ExtensionV1Beta1Ingress = nil
}
return kubernetesIngress
}
| GetSingleKubernetesIngress |
example-module.d.ts | export interface LiveExample {
title: string;
component: any;
additionalFiles?: string[];
selectorName?: string;
}
import { AutocompleteDisplayExample } from './autocomplete-display/autocomplete-display-example';
import { AutocompleteFilterExample } from './autocomplete-filter/autocomplete-filter-example';
import { AutocompleteOverviewExample } from './autocomplete-overview/autocomplete-overview-example';
import { AutocompleteSimpleExample } from './autocomplete-simple/autocomplete-simple-example';
import { ButtonOverviewExample } from './button-overview/button-overview-example';
import { ButtonToggleExclusiveExample } from './button-toggle-exclusive/button-toggle-exclusive-example';
import { ButtonToggleOverviewExample } from './button-toggle-overview/button-toggle-overview-example';
import { ButtonTypesExample } from './button-types/button-types-example';
import { CardFancyExample } from './card-fancy/card-fancy-example';
import { CardOverviewExample } from './card-overview/card-overview-example';
import { CdkTableBasicExample } from './cdk-table-basic/cdk-table-basic-example';
import { CheckboxConfigurableExample } from './checkbox-configurable/checkbox-configurable-example';
import { CheckboxOverviewExample } from './checkbox-overview/checkbox-overview-example';
import { ChipsInputExample } from './chips-input/chips-input-example';
import { ChipsOverviewExample } from './chips-overview/chips-overview-example';
import { ChipsStackedExample } from './chips-stacked/chips-stacked-example';
import { DatepickerApiExample } from './datepicker-api/datepicker-api-example';
import { DatepickerDisabledExample } from './datepicker-disabled/datepicker-disabled-example';
import { DatepickerEventsExample } from './datepicker-events/datepicker-events-example';
import { DatepickerFilterExample } from './datepicker-filter/datepicker-filter-example';
import { DatepickerFormatsExample } from './datepicker-formats/datepicker-formats-example';
import { DatepickerLocaleExample } from './datepicker-locale/datepicker-locale-example';
import { DatepickerMinMaxExample } from './datepicker-min-max/datepicker-min-max-example';
import { DatepickerMomentExample } from './datepicker-moment/datepicker-moment-example';
import { DatepickerOverviewExample } from './datepicker-overview/datepicker-overview-example';
import { DatepickerStartViewExample } from './datepicker-start-view/datepicker-start-view-example';
import { DatepickerTouchExample } from './datepicker-touch/datepicker-touch-example';
import { DatepickerValueExample } from './datepicker-value/datepicker-value-example';
import { DialogContentExample } from './dialog-content/dialog-content-example';
import { DialogDataExample } from './dialog-data/dialog-data-example';
import { DialogElementsExample } from './dialog-elements/dialog-elements-example';
import { DialogOverviewExampleDialog, DialogOverviewExample } from './dialog-overview/dialog-overview-example';
import { ElevationOverviewExample } from './elevation-overview/elevation-overview-example';
import { ExpansionOverviewExample } from './expansion-overview/expansion-overview-example';
import { ExpansionStepsExample } from './expansion-steps/expansion-steps-example';
import { MyTelInput, FormFieldCustomControlExample } from './form-field-custom-control/form-field-custom-control-example';
import { FormFieldErrorExample } from './form-field-error/form-field-error-example';
import { FormFieldHintExample } from './form-field-hint/form-field-hint-example';
import { FormFieldLabelExample } from './form-field-label/form-field-label-example';
import { FormFieldOverviewExample } from './form-field-overview/form-field-overview-example';
import { FormFieldPrefixSuffixExample } from './form-field-prefix-suffix/form-field-prefix-suffix-example';
import { FormFieldThemingExample } from './form-field-theming/form-field-theming-example';
import { GridListDynamicExample } from './grid-list-dynamic/grid-list-dynamic-example';
import { GridListOverviewExample } from './grid-list-overview/grid-list-overview-example';
import { IconOverviewExample } from './icon-overview/icon-overview-example';
import { IconSvgExample } from './icon-svg-example/icon-svg-example';
import { InputAutosizeTextareaExample } from './input-autosize-textarea/input-autosize-textarea-example';
import { InputClearableExample } from './input-clearable/input-clearable-example';
import { InputErrorStateMatcherExample } from './input-error-state-matcher/input-error-state-matcher-example';
import { InputErrorsExample } from './input-errors/input-errors-example';
import { InputFormExample } from './input-form/input-form-example';
import { InputHintExample } from './input-hint/input-hint-example';
import { InputOverviewExample } from './input-overview/input-overview-example';
import { InputPrefixSuffixExample } from './input-prefix-suffix/input-prefix-suffix-example';
import { ListOverviewExample } from './list-overview/list-overview-example';
import { ListSectionsExample } from './list-sections/list-sections-example';
import { ListSelectionExample } from './list-selection/list-selection-example';
import { MenuIconsExample } from './menu-icons/menu-icons-example';
import { MenuOverviewExample } from './menu-overview/menu-overview-example';
import { NestedMenuExample } from './nested-menu/nested-menu-example';
import { PaginatorConfigurableExample } from './paginator-configurable/paginator-configurable-example';
import { PaginatorOverviewExample } from './paginator-overview/paginator-overview-example';
import { ProgressBarBufferExample } from './progress-bar-buffer/progress-bar-buffer-example';
import { ProgressBarConfigurableExample } from './progress-bar-configurable/progress-bar-configurable-example';
import { ProgressBarDeterminateExample } from './progress-bar-determinate/progress-bar-determinate-example';
import { ProgressBarIndeterminateExample } from './progress-bar-indeterminate/progress-bar-indeterminate-example';
import { ProgressBarQueryExample } from './progress-bar-query/progress-bar-query-example';
import { ProgressSpinnerConfigurableExample } from './progress-spinner-configurable/progress-spinner-configurable-example';
import { ProgressSpinnerOverviewExample } from './progress-spinner-overview/progress-spinner-overview-example';
import { RadioNgModelExample } from './radio-ng-model/radio-ng-model-example';
import { RadioOverviewExample } from './radio-overview/radio-overview-example';
import { SelectCustomTriggerExample } from './select-custom-trigger/select-custom-trigger-example';
import { SelectDisabledExample } from './select-disabled/select-disabled-example';
import { SelectErrorStateMatcherExample } from './select-error-state-matcher/select-error-state-matcher-example';
import { SelectFormExample } from './select-form/select-form-example';
import { SelectHintErrorExample } from './select-hint-error/select-hint-error-example';
import { SelectMultipleExample } from './select-multiple/select-multiple-example';
import { SelectNoRippleExample } from './select-no-ripple/select-no-ripple-example';
import { SelectOptgroupExample } from './select-optgroup/select-optgroup-example';
import { SelectOverviewExample } from './select-overview/select-overview-example';
import { SelectPanelClassExample } from './select-panel-class/select-panel-class-example';
import { SelectResetExample } from './select-reset/select-reset-example';
import { SelectValueBindingExample } from './select-value-binding/select-value-binding-example';
import { SidenavAutosizeExample } from './sidenav-autosize/sidenav-autosize-example';
import { SidenavDisableCloseExample } from './sidenav-disable-close/sidenav-disable-close-example';
import { SidenavDrawerOverviewExample } from './sidenav-drawer-overview/sidenav-drawer-overview-example';
import { SidenavFixedExample } from './sidenav-fixed/sidenav-fixed-example';
import { SidenavModeExample } from './sidenav-mode/sidenav-mode-example';
import { SidenavOpenCloseExample } from './sidenav-open-close/sidenav-open-close-example';
import { SidenavOverviewExample } from './sidenav-overview/sidenav-overview-example';
import { SidenavPositionExample } from './sidenav-position/sidenav-position-example';
import { SidenavResponsiveExample } from './sidenav-responsive/sidenav-responsive-example';
import { SlideToggleConfigurableExample } from './slide-toggle-configurable/slide-toggle-configurable-example';
import { SlideToggleFormsExample } from './slide-toggle-forms/slide-toggle-forms-example';
import { SlideToggleOverviewExample } from './slide-toggle-overview/slide-toggle-overview-example';
import { SliderConfigurableExample } from './slider-configurable/slider-configurable-example';
import { SliderOverviewExample } from './slider-overview/slider-overview-example';
import { SnackBarComponentExample } from './snack-bar-component/snack-bar-component-example';
import { SnackBarOverviewExample } from './snack-bar-overview/snack-bar-overview-example';
import { SortOverviewExample } from './sort-overview/sort-overview-example';
import { StepperOverviewExample } from './stepper-overview/stepper-overview-example';
import { TableBasicExample } from './table-basic/table-basic-example';
import { TableFilteringExample } from './table-filtering/table-filtering-example';
import { TableHttpExample } from './table-http/table-http-example';
import { TableOverviewExample } from './table-overview/table-overview-example';
import { TablePaginationExample } from './table-pagination/table-pagination-example';
import { TableSelectionExample } from './table-selection/table-selection-example';
import { TableSortingExample } from './table-sorting/table-sorting-example';
import { TabsOverviewExample } from './tabs-overview/tabs-overview-example';
import { TabsTemplateLabelExample } from './tabs-template-label/tabs-template-label-example';
import { ToolbarMultirowExample } from './toolbar-multirow/toolbar-multirow-example';
import { ToolbarOverviewExample } from './toolbar-overview/toolbar-overview-example';
import { TooltipOverviewExample } from './tooltip-overview/tooltip-overview-example';
import { TooltipPositionExample } from './tooltip-position/tooltip-position-example';
export declare const EXAMPLE_COMPONENTS: {
'autocomplete-display': {
title: string;
component: typeof AutocompleteDisplayExample;
additionalFiles: null;
selectorName: null;
};
'autocomplete-filter': {
title: string;
component: typeof AutocompleteFilterExample;
additionalFiles: null;
selectorName: null;
};
'autocomplete-overview': {
title: string;
component: typeof AutocompleteOverviewExample;
additionalFiles: null;
selectorName: null;
};
'autocomplete-simple': {
title: string;
component: typeof AutocompleteSimpleExample;
additionalFiles: null;
selectorName: null;
};
'button-overview': {
title: string;
component: typeof ButtonOverviewExample;
additionalFiles: null;
selectorName: null;
};
'button-toggle-exclusive': {
title: string;
component: typeof ButtonToggleExclusiveExample;
additionalFiles: null;
selectorName: null;
};
'button-toggle-overview': {
title: string;
component: typeof ButtonToggleOverviewExample;
additionalFiles: null;
selectorName: null;
};
'button-types': {
title: string;
component: typeof ButtonTypesExample;
additionalFiles: null;
selectorName: null;
};
'card-fancy': {
title: string;
component: typeof CardFancyExample;
additionalFiles: null;
selectorName: null;
};
'card-overview': {
title: string;
component: typeof CardOverviewExample;
additionalFiles: null;
selectorName: null;
};
'cdk-table-basic': {
title: string;
component: typeof CdkTableBasicExample;
additionalFiles: null;
selectorName: null;
};
'checkbox-configurable': {
title: string;
component: typeof CheckboxConfigurableExample;
additionalFiles: null;
selectorName: null;
};
'checkbox-overview': {
title: string;
component: typeof CheckboxOverviewExample;
additionalFiles: null;
selectorName: null;
};
'chips-input': {
title: string;
component: typeof ChipsInputExample;
additionalFiles: null;
selectorName: null;
};
'chips-overview': {
title: string;
component: typeof ChipsOverviewExample;
additionalFiles: null;
selectorName: null;
};
'chips-stacked': {
title: string;
component: typeof ChipsStackedExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-api': {
title: string;
component: typeof DatepickerApiExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-disabled': {
title: string;
component: typeof DatepickerDisabledExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-events': {
title: string;
component: typeof DatepickerEventsExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-filter': {
title: string;
component: typeof DatepickerFilterExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-formats': {
title: string;
component: typeof DatepickerFormatsExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-locale': {
title: string;
component: typeof DatepickerLocaleExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-min-max': {
title: string;
component: typeof DatepickerMinMaxExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-moment': {
title: string;
component: typeof DatepickerMomentExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-overview': {
title: string;
component: typeof DatepickerOverviewExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-start-view': {
title: string;
component: typeof DatepickerStartViewExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-touch': {
title: string;
component: typeof DatepickerTouchExample;
additionalFiles: null;
selectorName: null;
};
'datepicker-value': {
title: string;
component: typeof DatepickerValueExample;
additionalFiles: null;
selectorName: null;
};
'dialog-content': {
title: string;
component: typeof DialogContentExample;
additionalFiles: string[];
selectorName: string;
};
'dialog-data': {
title: string;
component: typeof DialogDataExample;
additionalFiles: string[];
selectorName: string;
};
'dialog-elements': {
title: string;
component: typeof DialogElementsExample;
additionalFiles: string[];
selectorName: string;
};
'dialog-overview': {
title: string;
component: typeof DialogOverviewExample;
additionalFiles: string[];
selectorName: string;
};
'elevation-overview': {
title: string;
component: typeof ElevationOverviewExample;
additionalFiles: null;
selectorName: null;
};
'expansion-overview': {
title: string;
component: typeof ExpansionOverviewExample;
additionalFiles: null;
selectorName: null;
};
'expansion-steps': {
title: string;
component: typeof ExpansionStepsExample;
additionalFiles: null;
selectorName: null;
};
'form-field-custom-control': {
title: string;
component: typeof FormFieldCustomControlExample;
additionalFiles: string[];
selectorName: string;
};
'form-field-error': {
title: string;
component: typeof FormFieldErrorExample;
additionalFiles: null;
selectorName: null;
};
'form-field-hint': {
title: string;
component: typeof FormFieldHintExample;
additionalFiles: null;
selectorName: null;
};
'form-field-label': {
title: string;
component: typeof FormFieldLabelExample;
additionalFiles: null;
selectorName: null;
};
'form-field-overview': {
title: string;
component: typeof FormFieldOverviewExample;
additionalFiles: null;
selectorName: null;
};
'form-field-prefix-suffix': {
title: string;
component: typeof FormFieldPrefixSuffixExample;
additionalFiles: null;
selectorName: null;
};
'form-field-theming': {
title: string;
component: typeof FormFieldThemingExample;
additionalFiles: null;
selectorName: null;
};
'grid-list-dynamic': {
title: string;
component: typeof GridListDynamicExample;
additionalFiles: null;
selectorName: null;
};
'grid-list-overview': {
title: string;
component: typeof GridListOverviewExample;
additionalFiles: null;
selectorName: null;
};
'icon-overview': {
title: string;
component: typeof IconOverviewExample;
additionalFiles: null;
selectorName: null;
};
'icon-svg': {
title: string;
component: typeof IconSvgExample;
additionalFiles: null;
selectorName: null;
};
'input-autosize-textarea': {
title: string;
component: typeof InputAutosizeTextareaExample;
additionalFiles: null;
selectorName: null;
};
'input-clearable': {
title: string;
component: typeof InputClearableExample;
additionalFiles: null;
selectorName: null;
};
'input-error-state-matcher': {
title: string;
component: typeof InputErrorStateMatcherExample;
additionalFiles: null;
selectorName: null;
};
'input-errors': {
title: string;
component: typeof InputErrorsExample;
additionalFiles: null;
selectorName: null;
};
'input-form': {
title: string;
component: typeof InputFormExample;
additionalFiles: null;
selectorName: null;
};
'input-hint': {
title: string;
component: typeof InputHintExample;
additionalFiles: null;
selectorName: null;
};
'input-overview': {
title: string;
component: typeof InputOverviewExample;
additionalFiles: null;
selectorName: null;
};
'input-prefix-suffix': {
title: string;
component: typeof InputPrefixSuffixExample;
additionalFiles: null;
selectorName: null;
};
'list-overview': {
title: string;
component: typeof ListOverviewExample;
additionalFiles: null;
selectorName: null;
};
'list-sections': {
title: string;
component: typeof ListSectionsExample;
additionalFiles: null;
selectorName: null;
};
'list-selection': {
title: string;
component: typeof ListSelectionExample;
additionalFiles: null;
selectorName: null;
};
'menu-icons': {
title: string;
component: typeof MenuIconsExample;
additionalFiles: null;
selectorName: null;
};
'menu-overview': {
title: string;
component: typeof MenuOverviewExample;
additionalFiles: null;
selectorName: null;
};
'nested-menu': {
title: string;
component: typeof NestedMenuExample;
additionalFiles: null;
selectorName: null;
};
'paginator-configurable': {
title: string;
component: typeof PaginatorConfigurableExample;
additionalFiles: null;
selectorName: null;
};
'paginator-overview': {
title: string;
component: typeof PaginatorOverviewExample;
additionalFiles: null;
selectorName: null;
};
'progress-bar-buffer': {
title: string;
component: typeof ProgressBarBufferExample;
additionalFiles: null;
selectorName: null;
};
'progress-bar-configurable': {
title: string;
component: typeof ProgressBarConfigurableExample;
additionalFiles: null;
selectorName: null;
};
'progress-bar-determinate': {
title: string;
component: typeof ProgressBarDeterminateExample;
additionalFiles: null;
selectorName: null;
};
'progress-bar-indeterminate': {
title: string;
component: typeof ProgressBarIndeterminateExample;
additionalFiles: null;
selectorName: null;
};
'progress-bar-query': {
title: string;
component: typeof ProgressBarQueryExample;
additionalFiles: null;
selectorName: null;
};
'progress-spinner-configurable': {
title: string;
component: typeof ProgressSpinnerConfigurableExample;
additionalFiles: null;
selectorName: null;
};
'progress-spinner-overview': {
title: string;
component: typeof ProgressSpinnerOverviewExample;
additionalFiles: null;
selectorName: null;
};
'radio-ng-model': {
title: string;
component: typeof RadioNgModelExample;
additionalFiles: null;
selectorName: null;
};
'radio-overview': {
title: string;
component: typeof RadioOverviewExample;
additionalFiles: null;
selectorName: null;
};
'select-custom-trigger': {
title: string;
component: typeof SelectCustomTriggerExample;
additionalFiles: null;
selectorName: null;
};
'select-disabled': {
title: string;
component: typeof SelectDisabledExample;
additionalFiles: null;
selectorName: null;
};
'select-error-state-matcher': {
title: string;
component: typeof SelectErrorStateMatcherExample;
additionalFiles: null;
selectorName: null;
};
'select-form': {
title: string;
component: typeof SelectFormExample;
additionalFiles: null;
selectorName: null;
};
'select-hint-error': {
title: string;
component: typeof SelectHintErrorExample;
additionalFiles: null;
selectorName: null;
};
'select-multiple': {
title: string;
component: typeof SelectMultipleExample;
additionalFiles: null;
selectorName: null;
};
'select-no-ripple': {
title: string;
component: typeof SelectNoRippleExample;
additionalFiles: null;
selectorName: null;
};
'select-optgroup': {
title: string;
component: typeof SelectOptgroupExample;
additionalFiles: null;
selectorName: null;
};
'select-overview': {
title: string;
component: typeof SelectOverviewExample;
additionalFiles: null;
selectorName: null;
};
'select-panel-class': {
title: string;
component: typeof SelectPanelClassExample;
additionalFiles: null;
selectorName: null;
};
'select-reset': {
title: string;
component: typeof SelectResetExample;
additionalFiles: null;
selectorName: null;
};
'select-value-binding': {
title: string;
component: typeof SelectValueBindingExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-autosize': {
title: string;
component: typeof SidenavAutosizeExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-disable-close': {
title: string;
component: typeof SidenavDisableCloseExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-drawer-overview': {
title: string;
component: typeof SidenavDrawerOverviewExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-fixed': {
title: string;
component: typeof SidenavFixedExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-mode': {
title: string;
component: typeof SidenavModeExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-open-close': {
title: string;
component: typeof SidenavOpenCloseExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-overview': {
title: string;
component: typeof SidenavOverviewExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-position': {
title: string;
component: typeof SidenavPositionExample;
additionalFiles: null;
selectorName: null;
};
'sidenav-responsive': {
title: string;
component: typeof SidenavResponsiveExample;
additionalFiles: null;
selectorName: null;
};
'slide-toggle-configurable': {
title: string;
component: typeof SlideToggleConfigurableExample;
additionalFiles: null;
selectorName: null;
};
'slide-toggle-forms': {
title: string;
component: typeof SlideToggleFormsExample;
additionalFiles: null;
selectorName: null;
};
'slide-toggle-overview': {
title: string;
component: typeof SlideToggleOverviewExample;
additionalFiles: null;
selectorName: null;
};
'slider-configurable': {
title: string;
component: typeof SliderConfigurableExample;
additionalFiles: null;
selectorName: null;
};
'slider-overview': {
title: string;
component: typeof SliderOverviewExample;
additionalFiles: null;
selectorName: null;
};
'snack-bar-component': {
title: string;
component: typeof SnackBarComponentExample;
additionalFiles: string[];
selectorName: string;
};
'snack-bar-overview': {
title: string;
component: typeof SnackBarOverviewExample;
additionalFiles: null;
selectorName: null;
};
'sort-overview': {
title: string;
component: typeof SortOverviewExample;
additionalFiles: null;
selectorName: null;
};
'stepper-overview': {
title: string;
component: typeof StepperOverviewExample;
additionalFiles: null;
selectorName: null;
};
'table-basic': {
title: string;
component: typeof TableBasicExample;
additionalFiles: null;
selectorName: null;
};
'table-filtering': {
title: string;
component: typeof TableFilteringExample;
additionalFiles: null;
selectorName: null;
};
'table-http': {
title: string;
component: typeof TableHttpExample;
additionalFiles: null;
selectorName: null;
};
'table-overview': {
title: string;
component: typeof TableOverviewExample;
additionalFiles: null;
selectorName: null;
};
'table-pagination': {
title: string;
component: typeof TablePaginationExample;
additionalFiles: null;
selectorName: null;
};
'table-selection': {
title: string;
component: typeof TableSelectionExample;
additionalFiles: null;
selectorName: null;
};
'table-sorting': {
title: string;
component: typeof TableSortingExample;
additionalFiles: null;
selectorName: null;
};
'tabs-overview': {
title: string;
component: typeof TabsOverviewExample;
additionalFiles: null;
selectorName: null;
};
'tabs-template-label': {
title: string;
component: typeof TabsTemplateLabelExample;
additionalFiles: null;
selectorName: null;
};
'toolbar-multirow': {
title: string;
component: typeof ToolbarMultirowExample;
additionalFiles: null;
selectorName: null;
};
'toolbar-overview': {
title: string;
component: typeof ToolbarOverviewExample;
additionalFiles: null;
selectorName: null;
};
'tooltip-overview': {
title: string;
component: typeof TooltipOverviewExample;
additionalFiles: null;
selectorName: null;
};
'tooltip-position': {
title: string;
component: typeof TooltipPositionExample;
additionalFiles: null;
selectorName: null;
};
};
export declare const EXAMPLE_LIST: (typeof DatepickerLocaleExample | typeof DialogContentExample | typeof DialogOverviewExampleDialog | typeof MyTelInput | typeof FormFieldLabelExample | typeof IconSvgExample | typeof SidenavResponsiveExample | typeof SlideToggleFormsExample | typeof SnackBarOverviewExample | typeof StepperOverviewExample | typeof TableHttpExample)[];
export declare class | {
}
| ExampleModule |
emulation_unchecked.rs | use core::marker::PhantomData;
use roundops::*;
use float_traits::IEEE754Float;
use utils::safeeft::{safetwosum_branch as safetwosum, safetwoproduct_branch};
#[cfg(any(feature = "use-fma", feature = "doc"))]
use utils::safeeft::safetwoproduct_fma;
#[cfg(any(feature = "use-fma", feature = "doc"))]
use utils::fma::Fma;
use utils::FloatSuccPred;
#[derive(Clone)]
pub struct EmulationRegularUnchecked<T: IEEE754Float + Clone>(PhantomData<fn(T)>);
#[cfg(any(feature = "use-fma", feature = "doc"))]
#[derive(Clone)]
pub struct EmulationFmaUnchecked<T: IEEE754Float + Fma + Clone>(PhantomData<fn(T)>);
impl<T: IEEE754Float + Clone> RoundingMethod for EmulationRegularUnchecked<T> {
type HostMethod = rmode::DefaultRounding;
type Num = T;
}
#[cfg(any(feature = "use-fma", feature = "doc"))]
impl<T: IEEE754Float + Fma + Clone> RoundingMethod for EmulationFmaUnchecked<T> {
type HostMethod = rmode::DefaultRounding;
type Num = T;
}
macro_rules! impl_rops {
($bound:ident $( + $bound1:ident)+, $method:ident, $twoproduct:ident) => (
impl<T: $($bound1+)+$bound> RoundAdd for $method<T> {
fn add_up(a: T, b: T) -> T {
let (x, y) = safetwosum(a, b);
if y > T::zero() { x.succ() } else { x }
}
fn add_down(a: T, b: T) -> T {
let (x, y) = safetwosum(a, b);
if y < T::zero() { x.pred() } else { x }
}
}
impl<T: $($bound1+)+$bound> RoundSub for $method<T> {
#[inline]
fn sub_up(a: T, b: T) -> T {
Self::add_up(a, -b)
}
#[inline]
fn sub_down(a: T, b: T) -> T {
Self::add_down(a, -b)
}
}
impl<T: $($bound1+)+$bound> RoundMul for $method<T> {
fn mul_up(a: T, b: T) -> T {
let (x, y) = $twoproduct(a, b);
if y > T::zero() { x.succ() } else { x }
}
fn mul_down(a: T, b: T) -> T {
let (x, y) = $twoproduct(a, b);
if y < T::zero() { x.pred() } else { x }
}
}
impl<T: $($bound1+)+$bound> RoundDiv for $method<T> {
fn div_up(a: T, b: T) -> T {
let (a, b) = if b < T::zero() { (-a, -b) } else { (a, b) };
let d = a.clone() / b.clone();
let (x, y) = $twoproduct(d.clone(), b);
if x < a || (x == a && y < T::zero()) {
d.succ()
} else {
d
}
}
fn div_down(a: T, b: T) -> T {
let (a, b) = if b < T::zero() { (-a, -b) } else { (a, b) };
let d = a.clone() / b.clone();
let (x, y) = $twoproduct(d.clone(), b);
if x > a || (x == a && y > T::zero()) {
d.pred()
} else {
d
}
}
}
impl<T: $($bound1+)+$bound> RoundSqrt for $method<T> {
fn sqrt_up(a: T) -> T {
let r = a.clone().sqrt();
let (x, y) = $twoproduct(r.clone(), r.clone());
if x < a || (x == a && y < T::zero()) {
r.succ()
} else {
r
}
}
fn sqrt_down(a: T) -> T {
let r = a.clone().sqrt();
let (x, y) = $twoproduct(r.clone(), r.clone());
if x > a || (x == a && y > T::zero()) {
r.pred()
} else {
r
}
}
}
)
}
impl_rops!(IEEE754Float + Clone,
EmulationRegularUnchecked,
safetwoproduct_branch);
#[cfg(any(feature = "use-fma", feature = "doc"))]
impl_rops!(IEEE754Float + Fma + Clone,
EmulationFmaUnchecked,
safetwoproduct_fma);
impl<T: IEEE754Float + Clone> RoundedSession for EmulationRegularUnchecked<T> {
type Num = T;
}
#[cfg(any(feature = "use-fma", feature = "doc"))]
impl<T: IEEE754Float + Fma + Clone> RoundedSession for EmulationFmaUnchecked<T> {
type Num = T;
}
#[cfg(test)]
mod tests {
use rand::{Rng, thread_rng};
use roundops::*;
use utils::FloatSuccPred;
use super::EmulationRegularUnchecked;
type Emuf64 = EmulationRegularUnchecked<f64>;
#[test]
fn addition() {
let mut rng = thread_rng();
for _ in 0..10000000 {
let (a, b) = (rng.gen(), rng.gen());
let (x, y) = (Emuf64::add_up(a, b), Emuf64::add_down(a, b));
if !((a + b).is_infinite() || a != a || b != b || a + b != a + b) {
assert!(y <= a + b && a + b <= x);
assert!(x == y.succ() || x == y);
}
}
}
#[test]
fn subtraction() {
let mut rng = thread_rng();
for _ in 0..10000000 {
let (a, b) = (rng.gen(), rng.gen());
let (x, y) = (Emuf64::sub_up(a, b), Emuf64::sub_down(a, b));
if !((a - b).is_infinite() || a != a || b != b || a - b != a - b) {
assert!(y <= a - b && a - b <= x);
assert!(x == y.succ() || x == y);
}
}
}
#[test]
fn multiplication() {
let mut rng = thread_rng();
for _ in 0..10000000 {
let (a, b) = (rng.gen(), rng.gen());
let (x, y) = (Emuf64::mul_up(a, b), Emuf64::mul_down(a, b));
if !((a * b).is_infinite() || a != a || b != b || a * b != a * b) {
assert!(y <= a * b && a * b <= x);
assert!(x == y.succ() || x == y);
}
}
}
#[test]
fn division() {
let mut rng = thread_rng();
for _ in 0..10000000 {
let (a, b) = (rng.gen(), rng.gen());
let (x, y) = (Emuf64::div_up(a, b), Emuf64::div_down(a, b));
if !((a / b).is_infinite() || a != a || b != b || a / b != a / b) {
assert!(y <= a / b && a / b <= x);
assert!(x == y.succ() || x == y);
}
}
}
#[test]
fn sqrt() |
}
| {
let mut rng = thread_rng();
for _ in 0..10000000 {
let a = rng.gen();
let (x, y) = (Emuf64::sqrt_up(a), Emuf64::sqrt_down(a));
if !(a.is_infinite() || a != a || a.sqrt() != a.sqrt()) {
assert!(y <= a.sqrt() && a.sqrt() <= x);
assert!(x == y.succ() || x == y);
}
}
} |
test_python.py | import sqlite3
import tempfile
import hgdb
import os
import pytest
def get_conn_cursor(db_name):
conn = sqlite3.connect(db_name)
c = conn.cursor()
return conn, c
def test_store_instance():
with tempfile.TemporaryDirectory() as temp:
db_name = os.path.join(temp, "debug.db")
db = hgdb.DebugSymbolTable(db_name)
db.store_instance(42, "test")
conn, c = get_conn_cursor(db_name)
c.execute("SELECT COUNT(*) FROM instance WHERE id=?", (42,))
r = c.fetchone()[0]
assert r == 1
conn.close()
def test_store_breakpoint():
with tempfile.TemporaryDirectory() as temp:
db_name = os.path.join(temp, "debug.db")
db = hgdb.DebugSymbolTable(db_name)
# no instance matching yet
with pytest.raises(hgdb.db.DebugSymbolTableException) as ex:
db.store_breakpoint(1, 42, "/tmp/test.py", 1)
assert ex.value.args[0]
db.store_instance(42, "test")
db.store_breakpoint(1, 42, "/tmp/test.py", 1)
conn, c = get_conn_cursor(db_name)
c.execute("SELECT COUNT(*) FROM breakpoint WHERE filename=? AND line_num=?", ("/tmp/test.py", 1))
r = c.fetchone()[0]
assert r == 1
conn.close()
def | ():
with tempfile.TemporaryDirectory() as temp:
db_name = os.path.join(temp, "debug.db")
db = hgdb.DebugSymbolTable(db_name)
# no variable matching yet
with pytest.raises(hgdb.db.DebugSymbolTableException) as ex:
db.store_context_variable("a", 1, 43)
assert ex.value.args[0]
db.store_instance(42, "test")
db.store_breakpoint(1, 42, "/tmp/test.py", 1)
db.store_variable(43, "value")
db.store_context_variable("a", 1, 43)
conn, c = get_conn_cursor(db_name)
c.execute("SELECT COUNT(*) FROM context_variable WHERE breakpoint_id=?", (1, ))
r = c.fetchone()[0]
assert r == 1
conn.close()
def test_store_generator_variable():
with tempfile.TemporaryDirectory() as temp:
db_name = os.path.join(temp, "debug.db")
db = hgdb.DebugSymbolTable(db_name)
# no instance matching yet
with pytest.raises(hgdb.db.DebugSymbolTableException) as ex:
db.store_generator_variable("a", 42, 43)
assert ex.value.args[0]
db.store_instance(42, "test")
db.store_breakpoint(1, 42, "/tmp/test.py", 1)
db.store_variable(43, "value")
db.store_generator_variable("a", 42, 43)
conn, c = get_conn_cursor(db_name)
c.execute("SELECT COUNT(*) FROM generator_variable WHERE instance_id=?", (42, ))
r = c.fetchone()[0]
assert r == 1
conn.close()
def test_store_scope():
with tempfile.TemporaryDirectory() as temp:
db_name = os.path.join(temp, "debug.db")
db = hgdb.DebugSymbolTable(db_name)
db.store_instance(42, "test")
for i in range(4):
db.store_breakpoint(i, 42, "/tmp/test.py", i + 1)
db.store_scope(0, *[0, 1, 2, 3])
conn, c = get_conn_cursor(db_name)
c.execute("SELECT breakpoints FROM scope WHERE scope=?", (0, ))
r = c.fetchone()[0]
assert r == " ".join([str(i) for i in range(4)])
conn.close()
if __name__ == "__main__":
test_store_scope()
| test_store_context_variable |
dr.rs | #[doc = "Register `DR` reader"]
pub struct R(crate::R<DR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<DR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<DR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<DR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `DR` writer"]
pub struct W(crate::W<DR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<DR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<DR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<DR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `DR` reader - Data register"]
pub struct DR_R(crate::FieldReader<u16, u16>);
impl DR_R {
#[inline(always)]
pub(crate) fn new(bits: u16) -> Self {
DR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DR_R {
type Target = crate::FieldReader<u16, u16>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DR` writer - Data register"]
pub struct DR_W<'a> {
w: &'a mut W,
}
impl<'a> DR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | (value as u32 & 0xffff);
self.w
} | #[doc = "Bits 0:15 - Data register"]
#[inline(always)]
pub fn dr(&self) -> DR_R {
DR_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - Data register"]
#[inline(always)]
pub fn dr(&mut self) -> DR_W {
DR_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "data register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [dr](index.html) module"]
pub struct DR_SPEC;
impl crate::RegisterSpec for DR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [dr::R](R) reader structure"]
impl crate::Readable for DR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [dr::W](W) writer structure"]
impl crate::Writable for DR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets DR to value 0"]
impl crate::Resettable for DR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | }
impl R { |
collection_view_compositional_layout.go | package appkit
// #include "collection_view_compositional_layout.h"
import "C"
import (
"unsafe"
"github.com/hsiafan/cocoa/coregraphics"
"github.com/hsiafan/cocoa/foundation"
"github.com/hsiafan/cocoa/objc"
)
type CollectionViewCompositionalLayout interface {
CollectionViewLayout
Configuration() NSCollectionViewCompositionalLayoutConfiguration
SetConfiguration(value CollectionViewCompositionalLayoutConfiguration)
}
type NSCollectionViewCompositionalLayout struct {
NSCollectionViewLayout
}
func MakeCollectionViewCompositionalLayout(ptr unsafe.Pointer) NSCollectionViewCompositionalLayout {
return NSCollectionViewCompositionalLayout{
NSCollectionViewLayout: MakeCollectionViewLayout(ptr),
}
}
func (n NSCollectionViewCompositionalLayout) InitWithSection(section CollectionLayoutSection) NSCollectionViewCompositionalLayout {
result_ := C.C_NSCollectionViewCompositionalLayout_InitWithSection(n.Ptr(), objc.ExtractPtr(section))
return MakeCollectionViewCompositionalLayout(result_)
}
func (n NSCollectionViewCompositionalLayout) InitWithSection_Configuration(section CollectionLayoutSection, configuration CollectionViewCompositionalLayoutConfiguration) NSCollectionViewCompositionalLayout {
result_ := C.C_NSCollectionViewCompositionalLayout_InitWithSection_Configuration(n.Ptr(), objc.ExtractPtr(section), objc.ExtractPtr(configuration))
return MakeCollectionViewCompositionalLayout(result_)
}
func AllocCollectionViewCompositionalLayout() NSCollectionViewCompositionalLayout {
result_ := C.C_NSCollectionViewCompositionalLayout_AllocCollectionViewCompositionalLayout()
return MakeCollectionViewCompositionalLayout(result_)
}
func (n NSCollectionViewCompositionalLayout) Autorelease() NSCollectionViewCompositionalLayout {
result_ := C.C_NSCollectionViewCompositionalLayout_Autorelease(n.Ptr())
return MakeCollectionViewCompositionalLayout(result_)
}
func (n NSCollectionViewCompositionalLayout) Retain() NSCollectionViewCompositionalLayout {
result_ := C.C_NSCollectionViewCompositionalLayout_Retain(n.Ptr())
return MakeCollectionViewCompositionalLayout(result_)
}
func (n NSCollectionViewCompositionalLayout) Configuration() NSCollectionViewCompositionalLayoutConfiguration {
result_ := C.C_NSCollectionViewCompositionalLayout_Configuration(n.Ptr())
return MakeCollectionViewCompositionalLayoutConfiguration(result_)
}
func (n NSCollectionViewCompositionalLayout) SetConfiguration(value CollectionViewCompositionalLayoutConfiguration) {
C.C_NSCollectionViewCompositionalLayout_SetConfiguration(n.Ptr(), objc.ExtractPtr(value))
}
type CollectionViewCompositionalLayoutConfiguration interface {
objc.Object
ScrollDirection() CollectionViewScrollDirection
SetScrollDirection(value CollectionViewScrollDirection)
InterSectionSpacing() coregraphics.Float
SetInterSectionSpacing(value coregraphics.Float)
BoundarySupplementaryItems() []CollectionLayoutBoundarySupplementaryItem
SetBoundarySupplementaryItems(value []CollectionLayoutBoundarySupplementaryItem)
}
type NSCollectionViewCompositionalLayoutConfiguration struct {
objc.NSObject
}
func MakeCollectionViewCompositionalLayoutConfiguration(ptr unsafe.Pointer) NSCollectionViewCompositionalLayoutConfiguration {
return NSCollectionViewCompositionalLayoutConfiguration{
NSObject: objc.MakeObject(ptr),
}
}
func AllocCollectionViewCompositionalLayoutConfiguration() NSCollectionViewCompositionalLayoutConfiguration {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_AllocCollectionViewCompositionalLayoutConfiguration()
return MakeCollectionViewCompositionalLayoutConfiguration(result_)
}
func (n NSCollectionViewCompositionalLayoutConfiguration) Init() NSCollectionViewCompositionalLayoutConfiguration {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_Init(n.Ptr())
return MakeCollectionViewCompositionalLayoutConfiguration(result_)
}
func NewCollectionViewCompositionalLayoutConfiguration() NSCollectionViewCompositionalLayoutConfiguration {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_NewCollectionViewCompositionalLayoutConfiguration()
return MakeCollectionViewCompositionalLayoutConfiguration(result_)
}
func (n NSCollectionViewCompositionalLayoutConfiguration) Autorelease() NSCollectionViewCompositionalLayoutConfiguration {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_Autorelease(n.Ptr())
return MakeCollectionViewCompositionalLayoutConfiguration(result_)
}
func (n NSCollectionViewCompositionalLayoutConfiguration) Retain() NSCollectionViewCompositionalLayoutConfiguration {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_Retain(n.Ptr())
return MakeCollectionViewCompositionalLayoutConfiguration(result_)
}
func (n NSCollectionViewCompositionalLayoutConfiguration) ScrollDirection() CollectionViewScrollDirection {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_ScrollDirection(n.Ptr())
return CollectionViewScrollDirection(int(result_))
}
func (n NSCollectionViewCompositionalLayoutConfiguration) SetScrollDirection(value CollectionViewScrollDirection) {
C.C_NSCollectionViewCompositionalLayoutConfiguration_SetScrollDirection(n.Ptr(), C.int(int(value)))
}
func (n NSCollectionViewCompositionalLayoutConfiguration) InterSectionSpacing() coregraphics.Float {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_InterSectionSpacing(n.Ptr())
return coregraphics.Float(float64(result_))
}
func (n NSCollectionViewCompositionalLayoutConfiguration) SetInterSectionSpacing(value coregraphics.Float) {
C.C_NSCollectionViewCompositionalLayoutConfiguration_SetInterSectionSpacing(n.Ptr(), C.double(float64(value)))
}
func (n NSCollectionViewCompositionalLayoutConfiguration) BoundarySupplementaryItems() []CollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionViewCompositionalLayoutConfiguration_BoundarySupplementaryItems(n.Ptr())
if result_.len > 0 {
defer C.free(result_.data)
}
result_Slice := unsafe.Slice((*unsafe.Pointer)(result_.data), int(result_.len))
var goResult_ = make([]CollectionLayoutBoundarySupplementaryItem, len(result_Slice))
for idx, r := range result_Slice {
goResult_[idx] = MakeCollectionLayoutBoundarySupplementaryItem(r)
}
return goResult_
}
func (n NSCollectionViewCompositionalLayoutConfiguration) SetBoundarySupplementaryItems(value []CollectionLayoutBoundarySupplementaryItem) {
var cValue C.Array
if len(value) > 0 {
cValueData := make([]unsafe.Pointer, len(value))
for idx, v := range value {
cValueData[idx] = objc.ExtractPtr(v)
}
cValue.data = unsafe.Pointer(&cValueData[0])
cValue.len = C.int(len(value))
}
C.C_NSCollectionViewCompositionalLayoutConfiguration_SetBoundarySupplementaryItems(n.Ptr(), cValue)
}
type CollectionLayoutItem interface {
objc.Object
LayoutSize() NSCollectionLayoutSize
SupplementaryItems() []CollectionLayoutSupplementaryItem
EdgeSpacing() NSCollectionLayoutEdgeSpacing
SetEdgeSpacing(value CollectionLayoutEdgeSpacing)
ContentInsets() DirectionalEdgeInsets
SetContentInsets(value DirectionalEdgeInsets)
}
type NSCollectionLayoutItem struct {
objc.NSObject
}
func MakeCollectionLayoutItem(ptr unsafe.Pointer) NSCollectionLayoutItem {
return NSCollectionLayoutItem{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutItem_ItemWithLayoutSize(layoutSize CollectionLayoutSize) NSCollectionLayoutItem {
result_ := C.C_NSCollectionLayoutItem_CollectionLayoutItem_ItemWithLayoutSize(objc.ExtractPtr(layoutSize))
return MakeCollectionLayoutItem(result_)
}
func CollectionLayoutItem_ItemWithLayoutSize_SupplementaryItems(layoutSize CollectionLayoutSize, supplementaryItems []CollectionLayoutSupplementaryItem) NSCollectionLayoutItem {
var cSupplementaryItems C.Array
if len(supplementaryItems) > 0 {
cSupplementaryItemsData := make([]unsafe.Pointer, len(supplementaryItems))
for idx, v := range supplementaryItems {
cSupplementaryItemsData[idx] = objc.ExtractPtr(v)
}
cSupplementaryItems.data = unsafe.Pointer(&cSupplementaryItemsData[0])
cSupplementaryItems.len = C.int(len(supplementaryItems))
}
result_ := C.C_NSCollectionLayoutItem_CollectionLayoutItem_ItemWithLayoutSize_SupplementaryItems(objc.ExtractPtr(layoutSize), cSupplementaryItems)
return MakeCollectionLayoutItem(result_)
}
func AllocCollectionLayoutItem() NSCollectionLayoutItem {
result_ := C.C_NSCollectionLayoutItem_AllocCollectionLayoutItem()
return MakeCollectionLayoutItem(result_)
}
func (n NSCollectionLayoutItem) Autorelease() NSCollectionLayoutItem {
result_ := C.C_NSCollectionLayoutItem_Autorelease(n.Ptr())
return MakeCollectionLayoutItem(result_)
}
func (n NSCollectionLayoutItem) Retain() NSCollectionLayoutItem {
result_ := C.C_NSCollectionLayoutItem_Retain(n.Ptr())
return MakeCollectionLayoutItem(result_)
}
func (n NSCollectionLayoutItem) LayoutSize() NSCollectionLayoutSize {
result_ := C.C_NSCollectionLayoutItem_LayoutSize(n.Ptr())
return MakeCollectionLayoutSize(result_)
}
func (n NSCollectionLayoutItem) SupplementaryItems() []CollectionLayoutSupplementaryItem {
result_ := C.C_NSCollectionLayoutItem_SupplementaryItems(n.Ptr())
if result_.len > 0 {
defer C.free(result_.data)
}
result_Slice := unsafe.Slice((*unsafe.Pointer)(result_.data), int(result_.len))
var goResult_ = make([]CollectionLayoutSupplementaryItem, len(result_Slice))
for idx, r := range result_Slice {
goResult_[idx] = MakeCollectionLayoutSupplementaryItem(r)
}
return goResult_
}
func (n NSCollectionLayoutItem) EdgeSpacing() NSCollectionLayoutEdgeSpacing {
result_ := C.C_NSCollectionLayoutItem_EdgeSpacing(n.Ptr())
return MakeCollectionLayoutEdgeSpacing(result_)
}
func (n NSCollectionLayoutItem) SetEdgeSpacing(value CollectionLayoutEdgeSpacing) {
C.C_NSCollectionLayoutItem_SetEdgeSpacing(n.Ptr(), objc.ExtractPtr(value))
}
func (n NSCollectionLayoutItem) ContentInsets() DirectionalEdgeInsets {
result_ := C.C_NSCollectionLayoutItem_ContentInsets(n.Ptr())
return *((*DirectionalEdgeInsets)(unsafe.Pointer(&result_)))
}
func (n NSCollectionLayoutItem) SetContentInsets(value DirectionalEdgeInsets) {
C.C_NSCollectionLayoutItem_SetContentInsets(n.Ptr(), *(*C.NSDirectionalEdgeInsets)(unsafe.Pointer(&value)))
}
type CollectionLayoutBoundarySupplementaryItem interface {
CollectionLayoutSupplementaryItem
PinToVisibleBounds() bool
SetPinToVisibleBounds(value bool)
Offset() foundation.Point
Alignment() RectAlignment
ExtendsBoundary() bool
SetExtendsBoundary(value bool)
}
type NSCollectionLayoutBoundarySupplementaryItem struct {
NSCollectionLayoutSupplementaryItem
}
func MakeCollectionLayoutBoundarySupplementaryItem(ptr unsafe.Pointer) NSCollectionLayoutBoundarySupplementaryItem {
return NSCollectionLayoutBoundarySupplementaryItem{
NSCollectionLayoutSupplementaryItem: MakeCollectionLayoutSupplementaryItem(ptr),
}
}
func CollectionLayoutBoundarySupplementaryItem_BoundarySupplementaryItemWithLayoutSize_ElementKind_Alignment(layoutSize CollectionLayoutSize, elementKind string, alignment RectAlignment) NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_CollectionLayoutBoundarySupplementaryItem_BoundarySupplementaryItemWithLayoutSize_ElementKind_Alignment(objc.ExtractPtr(layoutSize), foundation.NewString(elementKind).Ptr(), C.int(int(alignment)))
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func CollectionLayoutBoundarySupplementaryItem_BoundarySupplementaryItemWithLayoutSize_ElementKind_Alignment_AbsoluteOffset(layoutSize CollectionLayoutSize, elementKind string, alignment RectAlignment, absoluteOffset foundation.Point) NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_CollectionLayoutBoundarySupplementaryItem_BoundarySupplementaryItemWithLayoutSize_ElementKind_Alignment_AbsoluteOffset(objc.ExtractPtr(layoutSize), foundation.NewString(elementKind).Ptr(), C.int(int(alignment)), *(*C.CGPoint)(unsafe.Pointer(&absoluteOffset)))
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func CollectionLayoutBoundarySupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor(layoutSize CollectionLayoutSize, elementKind string, containerAnchor CollectionLayoutAnchor) NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_CollectionLayoutBoundarySupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor(objc.ExtractPtr(layoutSize), foundation.NewString(elementKind).Ptr(), objc.ExtractPtr(containerAnchor))
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func CollectionLayoutBoundarySupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor_ItemAnchor(layoutSize CollectionLayoutSize, elementKind string, containerAnchor CollectionLayoutAnchor, itemAnchor CollectionLayoutAnchor) NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_CollectionLayoutBoundarySupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor_ItemAnchor(objc.ExtractPtr(layoutSize), foundation.NewString(elementKind).Ptr(), objc.ExtractPtr(containerAnchor), objc.ExtractPtr(itemAnchor))
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func CollectionLayoutBoundarySupplementaryItem_ItemWithLayoutSize(layoutSize CollectionLayoutSize) NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_CollectionLayoutBoundarySupplementaryItem_ItemWithLayoutSize(objc.ExtractPtr(layoutSize))
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func CollectionLayoutBoundarySupplementaryItem_ItemWithLayoutSize_SupplementaryItems(layoutSize CollectionLayoutSize, supplementaryItems []CollectionLayoutSupplementaryItem) NSCollectionLayoutBoundarySupplementaryItem {
var cSupplementaryItems C.Array
if len(supplementaryItems) > 0 {
cSupplementaryItemsData := make([]unsafe.Pointer, len(supplementaryItems))
for idx, v := range supplementaryItems {
cSupplementaryItemsData[idx] = objc.ExtractPtr(v)
}
cSupplementaryItems.data = unsafe.Pointer(&cSupplementaryItemsData[0])
cSupplementaryItems.len = C.int(len(supplementaryItems))
}
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_CollectionLayoutBoundarySupplementaryItem_ItemWithLayoutSize_SupplementaryItems(objc.ExtractPtr(layoutSize), cSupplementaryItems)
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func AllocCollectionLayoutBoundarySupplementaryItem() NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_AllocCollectionLayoutBoundarySupplementaryItem()
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func (n NSCollectionLayoutBoundarySupplementaryItem) Autorelease() NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_Autorelease(n.Ptr())
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func (n NSCollectionLayoutBoundarySupplementaryItem) Retain() NSCollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_Retain(n.Ptr())
return MakeCollectionLayoutBoundarySupplementaryItem(result_)
}
func (n NSCollectionLayoutBoundarySupplementaryItem) PinToVisibleBounds() bool {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_PinToVisibleBounds(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutBoundarySupplementaryItem) SetPinToVisibleBounds(value bool) {
C.C_NSCollectionLayoutBoundarySupplementaryItem_SetPinToVisibleBounds(n.Ptr(), C.bool(value))
}
func (n NSCollectionLayoutBoundarySupplementaryItem) Offset() foundation.Point {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_Offset(n.Ptr())
return *((*coregraphics.Point)(unsafe.Pointer(&result_)))
}
func (n NSCollectionLayoutBoundarySupplementaryItem) Alignment() RectAlignment {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_Alignment(n.Ptr())
return RectAlignment(int(result_))
}
func (n NSCollectionLayoutBoundarySupplementaryItem) ExtendsBoundary() bool {
result_ := C.C_NSCollectionLayoutBoundarySupplementaryItem_ExtendsBoundary(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutBoundarySupplementaryItem) SetExtendsBoundary(value bool) {
C.C_NSCollectionLayoutBoundarySupplementaryItem_SetExtendsBoundary(n.Ptr(), C.bool(value))
}
type CollectionLayoutSpacing interface {
objc.Object
Spacing() coregraphics.Float
IsFixedSpacing() bool
IsFlexibleSpacing() bool
}
type NSCollectionLayoutSpacing struct {
objc.NSObject
}
func MakeCollectionLayoutSpacing(ptr unsafe.Pointer) NSCollectionLayoutSpacing {
return NSCollectionLayoutSpacing{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutSpacing_FixedSpacing(fixedSpacing coregraphics.Float) NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutSpacing_CollectionLayoutSpacing_FixedSpacing(C.double(float64(fixedSpacing)))
return MakeCollectionLayoutSpacing(result_)
}
func CollectionLayoutSpacing_FlexibleSpacing(flexibleSpacing coregraphics.Float) NSCollectionLayoutSpacing |
func AllocCollectionLayoutSpacing() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutSpacing_AllocCollectionLayoutSpacing()
return MakeCollectionLayoutSpacing(result_)
}
func (n NSCollectionLayoutSpacing) Autorelease() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutSpacing_Autorelease(n.Ptr())
return MakeCollectionLayoutSpacing(result_)
}
func (n NSCollectionLayoutSpacing) Retain() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutSpacing_Retain(n.Ptr())
return MakeCollectionLayoutSpacing(result_)
}
func (n NSCollectionLayoutSpacing) Spacing() coregraphics.Float {
result_ := C.C_NSCollectionLayoutSpacing_Spacing(n.Ptr())
return coregraphics.Float(float64(result_))
}
func (n NSCollectionLayoutSpacing) IsFixedSpacing() bool {
result_ := C.C_NSCollectionLayoutSpacing_IsFixedSpacing(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutSpacing) IsFlexibleSpacing() bool {
result_ := C.C_NSCollectionLayoutSpacing_IsFlexibleSpacing(n.Ptr())
return bool(result_)
}
type CollectionLayoutSection interface {
objc.Object
OrthogonalScrollingBehavior() CollectionLayoutSectionOrthogonalScrollingBehavior
SetOrthogonalScrollingBehavior(value CollectionLayoutSectionOrthogonalScrollingBehavior)
InterGroupSpacing() coregraphics.Float
SetInterGroupSpacing(value coregraphics.Float)
ContentInsets() DirectionalEdgeInsets
SetContentInsets(value DirectionalEdgeInsets)
SupplementariesFollowContentInsets() bool
SetSupplementariesFollowContentInsets(value bool)
BoundarySupplementaryItems() []CollectionLayoutBoundarySupplementaryItem
SetBoundarySupplementaryItems(value []CollectionLayoutBoundarySupplementaryItem)
DecorationItems() []CollectionLayoutDecorationItem
SetDecorationItems(value []CollectionLayoutDecorationItem)
}
type NSCollectionLayoutSection struct {
objc.NSObject
}
func MakeCollectionLayoutSection(ptr unsafe.Pointer) NSCollectionLayoutSection {
return NSCollectionLayoutSection{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutSection_SectionWithGroup(group CollectionLayoutGroup) NSCollectionLayoutSection {
result_ := C.C_NSCollectionLayoutSection_CollectionLayoutSection_SectionWithGroup(objc.ExtractPtr(group))
return MakeCollectionLayoutSection(result_)
}
func AllocCollectionLayoutSection() NSCollectionLayoutSection {
result_ := C.C_NSCollectionLayoutSection_AllocCollectionLayoutSection()
return MakeCollectionLayoutSection(result_)
}
func (n NSCollectionLayoutSection) Autorelease() NSCollectionLayoutSection {
result_ := C.C_NSCollectionLayoutSection_Autorelease(n.Ptr())
return MakeCollectionLayoutSection(result_)
}
func (n NSCollectionLayoutSection) Retain() NSCollectionLayoutSection {
result_ := C.C_NSCollectionLayoutSection_Retain(n.Ptr())
return MakeCollectionLayoutSection(result_)
}
func (n NSCollectionLayoutSection) OrthogonalScrollingBehavior() CollectionLayoutSectionOrthogonalScrollingBehavior {
result_ := C.C_NSCollectionLayoutSection_OrthogonalScrollingBehavior(n.Ptr())
return CollectionLayoutSectionOrthogonalScrollingBehavior(int(result_))
}
func (n NSCollectionLayoutSection) SetOrthogonalScrollingBehavior(value CollectionLayoutSectionOrthogonalScrollingBehavior) {
C.C_NSCollectionLayoutSection_SetOrthogonalScrollingBehavior(n.Ptr(), C.int(int(value)))
}
func (n NSCollectionLayoutSection) InterGroupSpacing() coregraphics.Float {
result_ := C.C_NSCollectionLayoutSection_InterGroupSpacing(n.Ptr())
return coregraphics.Float(float64(result_))
}
func (n NSCollectionLayoutSection) SetInterGroupSpacing(value coregraphics.Float) {
C.C_NSCollectionLayoutSection_SetInterGroupSpacing(n.Ptr(), C.double(float64(value)))
}
func (n NSCollectionLayoutSection) ContentInsets() DirectionalEdgeInsets {
result_ := C.C_NSCollectionLayoutSection_ContentInsets(n.Ptr())
return *((*DirectionalEdgeInsets)(unsafe.Pointer(&result_)))
}
func (n NSCollectionLayoutSection) SetContentInsets(value DirectionalEdgeInsets) {
C.C_NSCollectionLayoutSection_SetContentInsets(n.Ptr(), *(*C.NSDirectionalEdgeInsets)(unsafe.Pointer(&value)))
}
func (n NSCollectionLayoutSection) SupplementariesFollowContentInsets() bool {
result_ := C.C_NSCollectionLayoutSection_SupplementariesFollowContentInsets(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutSection) SetSupplementariesFollowContentInsets(value bool) {
C.C_NSCollectionLayoutSection_SetSupplementariesFollowContentInsets(n.Ptr(), C.bool(value))
}
func (n NSCollectionLayoutSection) BoundarySupplementaryItems() []CollectionLayoutBoundarySupplementaryItem {
result_ := C.C_NSCollectionLayoutSection_BoundarySupplementaryItems(n.Ptr())
if result_.len > 0 {
defer C.free(result_.data)
}
result_Slice := unsafe.Slice((*unsafe.Pointer)(result_.data), int(result_.len))
var goResult_ = make([]CollectionLayoutBoundarySupplementaryItem, len(result_Slice))
for idx, r := range result_Slice {
goResult_[idx] = MakeCollectionLayoutBoundarySupplementaryItem(r)
}
return goResult_
}
func (n NSCollectionLayoutSection) SetBoundarySupplementaryItems(value []CollectionLayoutBoundarySupplementaryItem) {
var cValue C.Array
if len(value) > 0 {
cValueData := make([]unsafe.Pointer, len(value))
for idx, v := range value {
cValueData[idx] = objc.ExtractPtr(v)
}
cValue.data = unsafe.Pointer(&cValueData[0])
cValue.len = C.int(len(value))
}
C.C_NSCollectionLayoutSection_SetBoundarySupplementaryItems(n.Ptr(), cValue)
}
func (n NSCollectionLayoutSection) DecorationItems() []CollectionLayoutDecorationItem {
result_ := C.C_NSCollectionLayoutSection_DecorationItems(n.Ptr())
if result_.len > 0 {
defer C.free(result_.data)
}
result_Slice := unsafe.Slice((*unsafe.Pointer)(result_.data), int(result_.len))
var goResult_ = make([]CollectionLayoutDecorationItem, len(result_Slice))
for idx, r := range result_Slice {
goResult_[idx] = MakeCollectionLayoutDecorationItem(r)
}
return goResult_
}
func (n NSCollectionLayoutSection) SetDecorationItems(value []CollectionLayoutDecorationItem) {
var cValue C.Array
if len(value) > 0 {
cValueData := make([]unsafe.Pointer, len(value))
for idx, v := range value {
cValueData[idx] = objc.ExtractPtr(v)
}
cValue.data = unsafe.Pointer(&cValueData[0])
cValue.len = C.int(len(value))
}
C.C_NSCollectionLayoutSection_SetDecorationItems(n.Ptr(), cValue)
}
type CollectionLayoutGroupCustomItem interface {
objc.Object
Frame() foundation.Rect
ZIndex() int
}
type NSCollectionLayoutGroupCustomItem struct {
objc.NSObject
}
func MakeCollectionLayoutGroupCustomItem(ptr unsafe.Pointer) NSCollectionLayoutGroupCustomItem {
return NSCollectionLayoutGroupCustomItem{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutGroupCustomItem_CustomItemWithFrame(frame foundation.Rect) NSCollectionLayoutGroupCustomItem {
result_ := C.C_NSCollectionLayoutGroupCustomItem_CollectionLayoutGroupCustomItem_CustomItemWithFrame(*(*C.CGRect)(unsafe.Pointer(&frame)))
return MakeCollectionLayoutGroupCustomItem(result_)
}
func CollectionLayoutGroupCustomItem_CustomItemWithFrame_ZIndex(frame foundation.Rect, zIndex int) NSCollectionLayoutGroupCustomItem {
result_ := C.C_NSCollectionLayoutGroupCustomItem_CollectionLayoutGroupCustomItem_CustomItemWithFrame_ZIndex(*(*C.CGRect)(unsafe.Pointer(&frame)), C.int(zIndex))
return MakeCollectionLayoutGroupCustomItem(result_)
}
func AllocCollectionLayoutGroupCustomItem() NSCollectionLayoutGroupCustomItem {
result_ := C.C_NSCollectionLayoutGroupCustomItem_AllocCollectionLayoutGroupCustomItem()
return MakeCollectionLayoutGroupCustomItem(result_)
}
func (n NSCollectionLayoutGroupCustomItem) Autorelease() NSCollectionLayoutGroupCustomItem {
result_ := C.C_NSCollectionLayoutGroupCustomItem_Autorelease(n.Ptr())
return MakeCollectionLayoutGroupCustomItem(result_)
}
func (n NSCollectionLayoutGroupCustomItem) Retain() NSCollectionLayoutGroupCustomItem {
result_ := C.C_NSCollectionLayoutGroupCustomItem_Retain(n.Ptr())
return MakeCollectionLayoutGroupCustomItem(result_)
}
func (n NSCollectionLayoutGroupCustomItem) Frame() foundation.Rect {
result_ := C.C_NSCollectionLayoutGroupCustomItem_Frame(n.Ptr())
return *((*coregraphics.Rect)(unsafe.Pointer(&result_)))
}
func (n NSCollectionLayoutGroupCustomItem) ZIndex() int {
result_ := C.C_NSCollectionLayoutGroupCustomItem_ZIndex(n.Ptr())
return int(result_)
}
type CollectionLayoutSupplementaryItem interface {
CollectionLayoutItem
ItemAnchor() NSCollectionLayoutAnchor
ContainerAnchor() NSCollectionLayoutAnchor
ElementKind() string
ZIndex() int
SetZIndex(value int)
}
type NSCollectionLayoutSupplementaryItem struct {
NSCollectionLayoutItem
}
func MakeCollectionLayoutSupplementaryItem(ptr unsafe.Pointer) NSCollectionLayoutSupplementaryItem {
return NSCollectionLayoutSupplementaryItem{
NSCollectionLayoutItem: MakeCollectionLayoutItem(ptr),
}
}
func CollectionLayoutSupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor(layoutSize CollectionLayoutSize, elementKind string, containerAnchor CollectionLayoutAnchor) NSCollectionLayoutSupplementaryItem {
result_ := C.C_NSCollectionLayoutSupplementaryItem_CollectionLayoutSupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor(objc.ExtractPtr(layoutSize), foundation.NewString(elementKind).Ptr(), objc.ExtractPtr(containerAnchor))
return MakeCollectionLayoutSupplementaryItem(result_)
}
func CollectionLayoutSupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor_ItemAnchor(layoutSize CollectionLayoutSize, elementKind string, containerAnchor CollectionLayoutAnchor, itemAnchor CollectionLayoutAnchor) NSCollectionLayoutSupplementaryItem {
result_ := C.C_NSCollectionLayoutSupplementaryItem_CollectionLayoutSupplementaryItem_SupplementaryItemWithLayoutSize_ElementKind_ContainerAnchor_ItemAnchor(objc.ExtractPtr(layoutSize), foundation.NewString(elementKind).Ptr(), objc.ExtractPtr(containerAnchor), objc.ExtractPtr(itemAnchor))
return MakeCollectionLayoutSupplementaryItem(result_)
}
func CollectionLayoutSupplementaryItem_ItemWithLayoutSize(layoutSize CollectionLayoutSize) NSCollectionLayoutSupplementaryItem {
result_ := C.C_NSCollectionLayoutSupplementaryItem_CollectionLayoutSupplementaryItem_ItemWithLayoutSize(objc.ExtractPtr(layoutSize))
return MakeCollectionLayoutSupplementaryItem(result_)
}
func CollectionLayoutSupplementaryItem_ItemWithLayoutSize_SupplementaryItems(layoutSize CollectionLayoutSize, supplementaryItems []CollectionLayoutSupplementaryItem) NSCollectionLayoutSupplementaryItem {
var cSupplementaryItems C.Array
if len(supplementaryItems) > 0 {
cSupplementaryItemsData := make([]unsafe.Pointer, len(supplementaryItems))
for idx, v := range supplementaryItems {
cSupplementaryItemsData[idx] = objc.ExtractPtr(v)
}
cSupplementaryItems.data = unsafe.Pointer(&cSupplementaryItemsData[0])
cSupplementaryItems.len = C.int(len(supplementaryItems))
}
result_ := C.C_NSCollectionLayoutSupplementaryItem_CollectionLayoutSupplementaryItem_ItemWithLayoutSize_SupplementaryItems(objc.ExtractPtr(layoutSize), cSupplementaryItems)
return MakeCollectionLayoutSupplementaryItem(result_)
}
func AllocCollectionLayoutSupplementaryItem() NSCollectionLayoutSupplementaryItem {
result_ := C.C_NSCollectionLayoutSupplementaryItem_AllocCollectionLayoutSupplementaryItem()
return MakeCollectionLayoutSupplementaryItem(result_)
}
func (n NSCollectionLayoutSupplementaryItem) Autorelease() NSCollectionLayoutSupplementaryItem {
result_ := C.C_NSCollectionLayoutSupplementaryItem_Autorelease(n.Ptr())
return MakeCollectionLayoutSupplementaryItem(result_)
}
func (n NSCollectionLayoutSupplementaryItem) Retain() NSCollectionLayoutSupplementaryItem {
result_ := C.C_NSCollectionLayoutSupplementaryItem_Retain(n.Ptr())
return MakeCollectionLayoutSupplementaryItem(result_)
}
func (n NSCollectionLayoutSupplementaryItem) ItemAnchor() NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutSupplementaryItem_ItemAnchor(n.Ptr())
return MakeCollectionLayoutAnchor(result_)
}
func (n NSCollectionLayoutSupplementaryItem) ContainerAnchor() NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutSupplementaryItem_ContainerAnchor(n.Ptr())
return MakeCollectionLayoutAnchor(result_)
}
func (n NSCollectionLayoutSupplementaryItem) ElementKind() string {
result_ := C.C_NSCollectionLayoutSupplementaryItem_ElementKind(n.Ptr())
return foundation.MakeString(result_).String()
}
func (n NSCollectionLayoutSupplementaryItem) ZIndex() int {
result_ := C.C_NSCollectionLayoutSupplementaryItem_ZIndex(n.Ptr())
return int(result_)
}
func (n NSCollectionLayoutSupplementaryItem) SetZIndex(value int) {
C.C_NSCollectionLayoutSupplementaryItem_SetZIndex(n.Ptr(), C.int(value))
}
type CollectionLayoutSize interface {
objc.Object
WidthDimension() NSCollectionLayoutDimension
HeightDimension() NSCollectionLayoutDimension
}
type NSCollectionLayoutSize struct {
objc.NSObject
}
func MakeCollectionLayoutSize(ptr unsafe.Pointer) NSCollectionLayoutSize {
return NSCollectionLayoutSize{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutSize_SizeWithWidthDimension_HeightDimension(width CollectionLayoutDimension, height CollectionLayoutDimension) NSCollectionLayoutSize {
result_ := C.C_NSCollectionLayoutSize_CollectionLayoutSize_SizeWithWidthDimension_HeightDimension(objc.ExtractPtr(width), objc.ExtractPtr(height))
return MakeCollectionLayoutSize(result_)
}
func AllocCollectionLayoutSize() NSCollectionLayoutSize {
result_ := C.C_NSCollectionLayoutSize_AllocCollectionLayoutSize()
return MakeCollectionLayoutSize(result_)
}
func (n NSCollectionLayoutSize) Autorelease() NSCollectionLayoutSize {
result_ := C.C_NSCollectionLayoutSize_Autorelease(n.Ptr())
return MakeCollectionLayoutSize(result_)
}
func (n NSCollectionLayoutSize) Retain() NSCollectionLayoutSize {
result_ := C.C_NSCollectionLayoutSize_Retain(n.Ptr())
return MakeCollectionLayoutSize(result_)
}
func (n NSCollectionLayoutSize) WidthDimension() NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutSize_WidthDimension(n.Ptr())
return MakeCollectionLayoutDimension(result_)
}
func (n NSCollectionLayoutSize) HeightDimension() NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutSize_HeightDimension(n.Ptr())
return MakeCollectionLayoutDimension(result_)
}
type CollectionLayoutEdgeSpacing interface {
objc.Object
Leading() NSCollectionLayoutSpacing
Top() NSCollectionLayoutSpacing
Trailing() NSCollectionLayoutSpacing
Bottom() NSCollectionLayoutSpacing
}
type NSCollectionLayoutEdgeSpacing struct {
objc.NSObject
}
func MakeCollectionLayoutEdgeSpacing(ptr unsafe.Pointer) NSCollectionLayoutEdgeSpacing {
return NSCollectionLayoutEdgeSpacing{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutEdgeSpacing_SpacingForLeading_Top_Trailing_Bottom(leading CollectionLayoutSpacing, top CollectionLayoutSpacing, trailing CollectionLayoutSpacing, bottom CollectionLayoutSpacing) NSCollectionLayoutEdgeSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_CollectionLayoutEdgeSpacing_SpacingForLeading_Top_Trailing_Bottom(objc.ExtractPtr(leading), objc.ExtractPtr(top), objc.ExtractPtr(trailing), objc.ExtractPtr(bottom))
return MakeCollectionLayoutEdgeSpacing(result_)
}
func AllocCollectionLayoutEdgeSpacing() NSCollectionLayoutEdgeSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_AllocCollectionLayoutEdgeSpacing()
return MakeCollectionLayoutEdgeSpacing(result_)
}
func (n NSCollectionLayoutEdgeSpacing) Autorelease() NSCollectionLayoutEdgeSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_Autorelease(n.Ptr())
return MakeCollectionLayoutEdgeSpacing(result_)
}
func (n NSCollectionLayoutEdgeSpacing) Retain() NSCollectionLayoutEdgeSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_Retain(n.Ptr())
return MakeCollectionLayoutEdgeSpacing(result_)
}
func (n NSCollectionLayoutEdgeSpacing) Leading() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_Leading(n.Ptr())
return MakeCollectionLayoutSpacing(result_)
}
func (n NSCollectionLayoutEdgeSpacing) Top() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_Top(n.Ptr())
return MakeCollectionLayoutSpacing(result_)
}
func (n NSCollectionLayoutEdgeSpacing) Trailing() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_Trailing(n.Ptr())
return MakeCollectionLayoutSpacing(result_)
}
func (n NSCollectionLayoutEdgeSpacing) Bottom() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutEdgeSpacing_Bottom(n.Ptr())
return MakeCollectionLayoutSpacing(result_)
}
type CollectionLayoutAnchor interface {
objc.Object
Edges() DirectionalRectEdge
Offset() foundation.Point
IsAbsoluteOffset() bool
IsFractionalOffset() bool
}
type NSCollectionLayoutAnchor struct {
objc.NSObject
}
func MakeCollectionLayoutAnchor(ptr unsafe.Pointer) NSCollectionLayoutAnchor {
return NSCollectionLayoutAnchor{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutAnchor_LayoutAnchorWithEdges(edges DirectionalRectEdge) NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutAnchor_CollectionLayoutAnchor_LayoutAnchorWithEdges(C.uint(uint(edges)))
return MakeCollectionLayoutAnchor(result_)
}
func CollectionLayoutAnchor_LayoutAnchorWithEdges_AbsoluteOffset(edges DirectionalRectEdge, absoluteOffset foundation.Point) NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutAnchor_CollectionLayoutAnchor_LayoutAnchorWithEdges_AbsoluteOffset(C.uint(uint(edges)), *(*C.CGPoint)(unsafe.Pointer(&absoluteOffset)))
return MakeCollectionLayoutAnchor(result_)
}
func CollectionLayoutAnchor_LayoutAnchorWithEdges_FractionalOffset(edges DirectionalRectEdge, fractionalOffset foundation.Point) NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutAnchor_CollectionLayoutAnchor_LayoutAnchorWithEdges_FractionalOffset(C.uint(uint(edges)), *(*C.CGPoint)(unsafe.Pointer(&fractionalOffset)))
return MakeCollectionLayoutAnchor(result_)
}
func AllocCollectionLayoutAnchor() NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutAnchor_AllocCollectionLayoutAnchor()
return MakeCollectionLayoutAnchor(result_)
}
func (n NSCollectionLayoutAnchor) Autorelease() NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutAnchor_Autorelease(n.Ptr())
return MakeCollectionLayoutAnchor(result_)
}
func (n NSCollectionLayoutAnchor) Retain() NSCollectionLayoutAnchor {
result_ := C.C_NSCollectionLayoutAnchor_Retain(n.Ptr())
return MakeCollectionLayoutAnchor(result_)
}
func (n NSCollectionLayoutAnchor) Edges() DirectionalRectEdge {
result_ := C.C_NSCollectionLayoutAnchor_Edges(n.Ptr())
return DirectionalRectEdge(uint(result_))
}
func (n NSCollectionLayoutAnchor) Offset() foundation.Point {
result_ := C.C_NSCollectionLayoutAnchor_Offset(n.Ptr())
return *((*coregraphics.Point)(unsafe.Pointer(&result_)))
}
func (n NSCollectionLayoutAnchor) IsAbsoluteOffset() bool {
result_ := C.C_NSCollectionLayoutAnchor_IsAbsoluteOffset(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutAnchor) IsFractionalOffset() bool {
result_ := C.C_NSCollectionLayoutAnchor_IsFractionalOffset(n.Ptr())
return bool(result_)
}
type CollectionLayoutDimension interface {
objc.Object
Dimension() coregraphics.Float
IsAbsolute() bool
IsEstimated() bool
IsFractionalHeight() bool
IsFractionalWidth() bool
}
type NSCollectionLayoutDimension struct {
objc.NSObject
}
func MakeCollectionLayoutDimension(ptr unsafe.Pointer) NSCollectionLayoutDimension {
return NSCollectionLayoutDimension{
NSObject: objc.MakeObject(ptr),
}
}
func CollectionLayoutDimension_AbsoluteDimension(absoluteDimension coregraphics.Float) NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutDimension_CollectionLayoutDimension_AbsoluteDimension(C.double(float64(absoluteDimension)))
return MakeCollectionLayoutDimension(result_)
}
func CollectionLayoutDimension_EstimatedDimension(estimatedDimension coregraphics.Float) NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutDimension_CollectionLayoutDimension_EstimatedDimension(C.double(float64(estimatedDimension)))
return MakeCollectionLayoutDimension(result_)
}
func CollectionLayoutDimension_FractionalHeightDimension(fractionalHeight coregraphics.Float) NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutDimension_CollectionLayoutDimension_FractionalHeightDimension(C.double(float64(fractionalHeight)))
return MakeCollectionLayoutDimension(result_)
}
func CollectionLayoutDimension_FractionalWidthDimension(fractionalWidth coregraphics.Float) NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutDimension_CollectionLayoutDimension_FractionalWidthDimension(C.double(float64(fractionalWidth)))
return MakeCollectionLayoutDimension(result_)
}
func AllocCollectionLayoutDimension() NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutDimension_AllocCollectionLayoutDimension()
return MakeCollectionLayoutDimension(result_)
}
func (n NSCollectionLayoutDimension) Autorelease() NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutDimension_Autorelease(n.Ptr())
return MakeCollectionLayoutDimension(result_)
}
func (n NSCollectionLayoutDimension) Retain() NSCollectionLayoutDimension {
result_ := C.C_NSCollectionLayoutDimension_Retain(n.Ptr())
return MakeCollectionLayoutDimension(result_)
}
func (n NSCollectionLayoutDimension) Dimension() coregraphics.Float {
result_ := C.C_NSCollectionLayoutDimension_Dimension(n.Ptr())
return coregraphics.Float(float64(result_))
}
func (n NSCollectionLayoutDimension) IsAbsolute() bool {
result_ := C.C_NSCollectionLayoutDimension_IsAbsolute(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutDimension) IsEstimated() bool {
result_ := C.C_NSCollectionLayoutDimension_IsEstimated(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutDimension) IsFractionalHeight() bool {
result_ := C.C_NSCollectionLayoutDimension_IsFractionalHeight(n.Ptr())
return bool(result_)
}
func (n NSCollectionLayoutDimension) IsFractionalWidth() bool {
result_ := C.C_NSCollectionLayoutDimension_IsFractionalWidth(n.Ptr())
return bool(result_)
}
type CollectionLayoutGroup interface {
CollectionLayoutItem
VisualDescription() string
Subitems() []CollectionLayoutItem
SetSupplementaryItems(value []CollectionLayoutSupplementaryItem)
InterItemSpacing() NSCollectionLayoutSpacing
SetInterItemSpacing(value CollectionLayoutSpacing)
}
type NSCollectionLayoutGroup struct {
NSCollectionLayoutItem
}
func MakeCollectionLayoutGroup(ptr unsafe.Pointer) NSCollectionLayoutGroup {
return NSCollectionLayoutGroup{
NSCollectionLayoutItem: MakeCollectionLayoutItem(ptr),
}
}
func CollectionLayoutGroup_HorizontalGroupWithLayoutSize_Subitems(layoutSize CollectionLayoutSize, subitems []CollectionLayoutItem) NSCollectionLayoutGroup {
var cSubitems C.Array
if len(subitems) > 0 {
cSubitemsData := make([]unsafe.Pointer, len(subitems))
for idx, v := range subitems {
cSubitemsData[idx] = objc.ExtractPtr(v)
}
cSubitems.data = unsafe.Pointer(&cSubitemsData[0])
cSubitems.len = C.int(len(subitems))
}
result_ := C.C_NSCollectionLayoutGroup_CollectionLayoutGroup_HorizontalGroupWithLayoutSize_Subitems(objc.ExtractPtr(layoutSize), cSubitems)
return MakeCollectionLayoutGroup(result_)
}
func CollectionLayoutGroup_HorizontalGroupWithLayoutSize_Subitem_Count(layoutSize CollectionLayoutSize, subitem CollectionLayoutItem, count int) NSCollectionLayoutGroup {
result_ := C.C_NSCollectionLayoutGroup_CollectionLayoutGroup_HorizontalGroupWithLayoutSize_Subitem_Count(objc.ExtractPtr(layoutSize), objc.ExtractPtr(subitem), C.int(count))
return MakeCollectionLayoutGroup(result_)
}
func CollectionLayoutGroup_VerticalGroupWithLayoutSize_Subitems(layoutSize CollectionLayoutSize, subitems []CollectionLayoutItem) NSCollectionLayoutGroup {
var cSubitems C.Array
if len(subitems) > 0 {
cSubitemsData := make([]unsafe.Pointer, len(subitems))
for idx, v := range subitems {
cSubitemsData[idx] = objc.ExtractPtr(v)
}
cSubitems.data = unsafe.Pointer(&cSubitemsData[0])
cSubitems.len = C.int(len(subitems))
}
result_ := C.C_NSCollectionLayoutGroup_CollectionLayoutGroup_VerticalGroupWithLayoutSize_Subitems(objc.ExtractPtr(layoutSize), cSubitems)
return MakeCollectionLayoutGroup(result_)
}
func CollectionLayoutGroup_VerticalGroupWithLayoutSize_Subitem_Count(layoutSize CollectionLayoutSize, subitem CollectionLayoutItem, count int) NSCollectionLayoutGroup {
result_ := C.C_NSCollectionLayoutGroup_CollectionLayoutGroup_VerticalGroupWithLayoutSize_Subitem_Count(objc.ExtractPtr(layoutSize), objc.ExtractPtr(subitem), C.int(count))
return MakeCollectionLayoutGroup(result_)
}
func CollectionLayoutGroup_ItemWithLayoutSize(layoutSize CollectionLayoutSize) NSCollectionLayoutGroup {
result_ := C.C_NSCollectionLayoutGroup_CollectionLayoutGroup_ItemWithLayoutSize(objc.ExtractPtr(layoutSize))
return MakeCollectionLayoutGroup(result_)
}
func CollectionLayoutGroup_ItemWithLayoutSize_SupplementaryItems(layoutSize CollectionLayoutSize, supplementaryItems []CollectionLayoutSupplementaryItem) NSCollectionLayoutGroup {
var cSupplementaryItems C.Array
if len(supplementaryItems) > 0 {
cSupplementaryItemsData := make([]unsafe.Pointer, len(supplementaryItems))
for idx, v := range supplementaryItems {
cSupplementaryItemsData[idx] = objc.ExtractPtr(v)
}
cSupplementaryItems.data = unsafe.Pointer(&cSupplementaryItemsData[0])
cSupplementaryItems.len = C.int(len(supplementaryItems))
}
result_ := C.C_NSCollectionLayoutGroup_CollectionLayoutGroup_ItemWithLayoutSize_SupplementaryItems(objc.ExtractPtr(layoutSize), cSupplementaryItems)
return MakeCollectionLayoutGroup(result_)
}
func AllocCollectionLayoutGroup() NSCollectionLayoutGroup {
result_ := C.C_NSCollectionLayoutGroup_AllocCollectionLayoutGroup()
return MakeCollectionLayoutGroup(result_)
}
func (n NSCollectionLayoutGroup) Autorelease() NSCollectionLayoutGroup {
result_ := C.C_NSCollectionLayoutGroup_Autorelease(n.Ptr())
return MakeCollectionLayoutGroup(result_)
}
func (n NSCollectionLayoutGroup) Retain() NSCollectionLayoutGroup {
result_ := C.C_NSCollectionLayoutGroup_Retain(n.Ptr())
return MakeCollectionLayoutGroup(result_)
}
func (n NSCollectionLayoutGroup) VisualDescription() string {
result_ := C.C_NSCollectionLayoutGroup_VisualDescription(n.Ptr())
return foundation.MakeString(result_).String()
}
func (n NSCollectionLayoutGroup) Subitems() []CollectionLayoutItem {
result_ := C.C_NSCollectionLayoutGroup_Subitems(n.Ptr())
if result_.len > 0 {
defer C.free(result_.data)
}
result_Slice := unsafe.Slice((*unsafe.Pointer)(result_.data), int(result_.len))
var goResult_ = make([]CollectionLayoutItem, len(result_Slice))
for idx, r := range result_Slice {
goResult_[idx] = MakeCollectionLayoutItem(r)
}
return goResult_
}
func (n NSCollectionLayoutGroup) SetSupplementaryItems(value []CollectionLayoutSupplementaryItem) {
var cValue C.Array
if len(value) > 0 {
cValueData := make([]unsafe.Pointer, len(value))
for idx, v := range value {
cValueData[idx] = objc.ExtractPtr(v)
}
cValue.data = unsafe.Pointer(&cValueData[0])
cValue.len = C.int(len(value))
}
C.C_NSCollectionLayoutGroup_SetSupplementaryItems(n.Ptr(), cValue)
}
func (n NSCollectionLayoutGroup) InterItemSpacing() NSCollectionLayoutSpacing {
result_ := C.C_NSCollectionLayoutGroup_InterItemSpacing(n.Ptr())
return MakeCollectionLayoutSpacing(result_)
}
func (n NSCollectionLayoutGroup) SetInterItemSpacing(value CollectionLayoutSpacing) {
C.C_NSCollectionLayoutGroup_SetInterItemSpacing(n.Ptr(), objc.ExtractPtr(value))
}
type CollectionLayoutDecorationItem interface {
CollectionLayoutItem
ElementKind() string
ZIndex() int
SetZIndex(value int)
}
type NSCollectionLayoutDecorationItem struct {
NSCollectionLayoutItem
}
func MakeCollectionLayoutDecorationItem(ptr unsafe.Pointer) NSCollectionLayoutDecorationItem {
return NSCollectionLayoutDecorationItem{
NSCollectionLayoutItem: MakeCollectionLayoutItem(ptr),
}
}
func CollectionLayoutDecorationItem_BackgroundDecorationItemWithElementKind(elementKind string) NSCollectionLayoutDecorationItem {
result_ := C.C_NSCollectionLayoutDecorationItem_CollectionLayoutDecorationItem_BackgroundDecorationItemWithElementKind(foundation.NewString(elementKind).Ptr())
return MakeCollectionLayoutDecorationItem(result_)
}
func CollectionLayoutDecorationItem_ItemWithLayoutSize(layoutSize CollectionLayoutSize) NSCollectionLayoutDecorationItem {
result_ := C.C_NSCollectionLayoutDecorationItem_CollectionLayoutDecorationItem_ItemWithLayoutSize(objc.ExtractPtr(layoutSize))
return MakeCollectionLayoutDecorationItem(result_)
}
func CollectionLayoutDecorationItem_ItemWithLayoutSize_SupplementaryItems(layoutSize CollectionLayoutSize, supplementaryItems []CollectionLayoutSupplementaryItem) NSCollectionLayoutDecorationItem {
var cSupplementaryItems C.Array
if len(supplementaryItems) > 0 {
cSupplementaryItemsData := make([]unsafe.Pointer, len(supplementaryItems))
for idx, v := range supplementaryItems {
cSupplementaryItemsData[idx] = objc.ExtractPtr(v)
}
cSupplementaryItems.data = unsafe.Pointer(&cSupplementaryItemsData[0])
cSupplementaryItems.len = C.int(len(supplementaryItems))
}
result_ := C.C_NSCollectionLayoutDecorationItem_CollectionLayoutDecorationItem_ItemWithLayoutSize_SupplementaryItems(objc.ExtractPtr(layoutSize), cSupplementaryItems)
return MakeCollectionLayoutDecorationItem(result_)
}
func AllocCollectionLayoutDecorationItem() NSCollectionLayoutDecorationItem {
result_ := C.C_NSCollectionLayoutDecorationItem_AllocCollectionLayoutDecorationItem()
return MakeCollectionLayoutDecorationItem(result_)
}
func (n NSCollectionLayoutDecorationItem) Autorelease() NSCollectionLayoutDecorationItem {
result_ := C.C_NSCollectionLayoutDecorationItem_Autorelease(n.Ptr())
return MakeCollectionLayoutDecorationItem(result_)
}
func (n NSCollectionLayoutDecorationItem) Retain() NSCollectionLayoutDecorationItem {
result_ := C.C_NSCollectionLayoutDecorationItem_Retain(n.Ptr())
return MakeCollectionLayoutDecorationItem(result_)
}
func (n NSCollectionLayoutDecorationItem) ElementKind() string {
result_ := C.C_NSCollectionLayoutDecorationItem_ElementKind(n.Ptr())
return foundation.MakeString(result_).String()
}
func (n NSCollectionLayoutDecorationItem) ZIndex() int {
result_ := C.C_NSCollectionLayoutDecorationItem_ZIndex(n.Ptr())
return int(result_)
}
func (n NSCollectionLayoutDecorationItem) SetZIndex(value int) {
C.C_NSCollectionLayoutDecorationItem_SetZIndex(n.Ptr(), C.int(value))
}
| {
result_ := C.C_NSCollectionLayoutSpacing_CollectionLayoutSpacing_FlexibleSpacing(C.double(float64(flexibleSpacing)))
return MakeCollectionLayoutSpacing(result_)
} |
1-d1827376c8c2ea0887d9.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[1],{155:function(e,t,n){t.__esModule=!0,t.Helmet=void 0;var r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},o=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),i=s(n(0)),a=s(n(4)),u=s(n(168)),c=s(n(171)),T=n(174),l=n(163);function s(e){return e&&e.__esModule?e:{default:e}}function f(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}var p,E,d,A=(0,u.default)(T.reducePropsToState,T.handleClientStateChange,T.mapStateOnServer)(function(){return null}),y=(p=A,d=E=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.shouldComponentUpdate=function(e){return!(0,c.default)(this.props,e)},t.prototype.mapNestedChildrenToProps=function(e,t){if(!t)return null;switch(e.type){case l.TAG_NAMES.SCRIPT:case l.TAG_NAMES.NOSCRIPT:return{innerHTML:t};case l.TAG_NAMES.STYLE:return{cssText:t}}throw new Error("<"+e.type+" /> elements are self-closing and can not contain children. Refer to our API for more information.")},t.prototype.flattenArrayTypeChildren=function(e){var t,n=e.child,o=e.arrayTypeChildren,i=e.newChildProps,a=e.nestedChildren;return r({},o,((t={})[n.type]=[].concat(o[n.type]||[],[r({},i,this.mapNestedChildrenToProps(n,a))]),t))},t.prototype.mapObjectTypeChildren=function(e){var t,n,o=e.child,i=e.newProps,a=e.newChildProps,u=e.nestedChildren;switch(o.type){case l.TAG_NAMES.TITLE:return r({},i,((t={})[o.type]=u,t.titleAttributes=r({},a),t));case l.TAG_NAMES.BODY:return r({},i,{bodyAttributes:r({},a)});case l.TAG_NAMES.HTML:return r({},i,{htmlAttributes:r({},a)})}return r({},i,((n={})[o.type]=r({},a),n))},t.prototype.mapArrayTypeChildrenToProps=function(e,t){var n=r({},t);return Object.keys(e).forEach(function(t){var o;n=r({},n,((o={})[t]=e[t],o))}),n},t.prototype.warnOnInvalidChildren=function(e,t){return!0},t.prototype.mapChildrenToProps=function(e,t){var n=this,r={};return i.default.Children.forEach(e,function(e){if(e&&e.props){var o=e.props,i=o.children,a=f(o,["children"]),u=(0,T.convertReactPropstoHtmlAttributes)(a);switch(n.warnOnInvalidChildren(e,i),e.type){case l.TAG_NAMES.LINK:case l.TAG_NAMES.META:case l.TAG_NAMES.NOSCRIPT:case l.TAG_NAMES.SCRIPT:case l.TAG_NAMES.STYLE:r=n.flattenArrayTypeChildren({child:e,arrayTypeChildren:r,newChildProps:u,nestedChildren:i});break;default:t=n.mapObjectTypeChildren({child:e,newProps:t,newChildProps:u,nestedChildren:i})}}}),t=this.mapArrayTypeChildrenToProps(r,t)},t.prototype.render=function(){var e=this.props,t=e.children,n=f(e,["children"]),o=r({},n);return t&&(o=this.mapChildrenToProps(t,o)),i.default.createElement(p,o)},o(t,null,[{key:"canUseDOM",set:function(e){p.canUseDOM=e}}]),t}(i.default.Component),E.propTypes={base:a.default.object,bodyAttributes:a.default.object,children:a.default.oneOfType([a.default.arrayOf(a.default.node),a.default.node]),defaultTitle:a.default.string,defer:a.default.bool,encodeSpecialCharacters:a.default.bool,htmlAttributes:a.default.object,link:a.default.arrayOf(a.default.object),meta:a.default.arrayOf(a.default.object),noscript:a.default.arrayOf(a.default.object),onChangeClientState:a.default.func,script:a.default.arrayOf(a.default.object),style:a.default.arrayOf(a.default.object),title:a.default.string,titleAttributes:a.default.object,titleTemplate:a.default.string},E.defaultProps={defer:!0,encodeSpecialCharacters:!0},E.peek=p.peek,E.rewind=function(){var e=p.rewind();return e||(e=(0,T.mapStateOnServer)({baseTag:[],bodyAttributes:{},encodeSpecialCharacters:!0,htmlAttributes:{},linkTags:[],metaTags:[],noscriptTags:[],scriptTags:[],styleTags:[],title:"",titleAttributes:{}})),e},d);y.renderStatic=y.rewind,t.Helmet=y,t.default=y},163:function(e,t){t.__esModule=!0;t.ATTRIBUTE_NAMES={BODY:"bodyAttributes",HTML:"htmlAttributes",TITLE:"titleAttributes"};var n=t.TAG_NAMES={BASE:"base",BODY:"body",HEAD:"head",HTML:"html",LINK:"link",META:"meta",NOSCRIPT:"noscript",SCRIPT:"script",STYLE:"style",TITLE:"title"},r=(t.VALID_TAG_NAMES=Object.keys(n).map(function(e){return n[e]}),t.TAG_PROPERTIES={CHARSET:"charset",CSS_TEXT:"cssText",HREF:"href",HTTPEQUIV:"http-equiv",INNER_HTML:"innerHTML",ITEM_PROP:"itemprop",NAME:"name",PROPERTY:"property",REL:"rel",SRC:"src"},t.REACT_TAG_MAP={accesskey:"accessKey",charset:"charSet",class:"className",contenteditable:"contentEditable",contextmenu:"contextMenu","http-equiv":"httpEquiv",itemprop:"itemProp",tabindex:"tabIndex"});t.HELMET_PROPS={DEFAULT_TITLE:"defaultTitle",DEFER:"defer",ENCODE_SPECIAL_CHARACTERS:"encodeSpecialCharacters",ON_CHANGE_CLIENT_STATE:"onChangeClientState",TITLE_TEMPLATE:"titleTemplate"},t.HTML_TAG_MAP=Object.keys(r).reduce(function(e,t){return e[r[t]]=t,e},{}),t.SELF_CLOSING_TAGS=[n.NOSCRIPT,n.SCRIPT,n.STYLE],t.HELMET_ATTRIBUTE="data-react-helmet"},168:function(e,t,n){"use strict";function r(e){return e&&"object"==typeof e&&"default"in e?e.default:e}var o=n(0),i=r(o),a=r(n(169)),u=r(n(170));e.exports=function(e,t,n){if("function"!=typeof e)throw new Error("Expected reducePropsToState to be a function.");if("function"!=typeof t)throw new Error("Expected handleStateChangeOnClient to be a function.");if(void 0!==n&&"function"!=typeof n)throw new Error("Expected mapStateOnServer to either be undefined or a function.");return function(r){if("function"!=typeof r)throw new Error("Expected WrappedComponent to be a React component.");var c=[],T=void 0;function l(){T=e(c.map(function(e){return e.props})),s.canUseDOM?t(T):n&&(T=n(T))}var s=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.peek=function(){return T},t.rewind=function(){if(t.canUseDOM)throw new Error("You may only call rewind() on the server. Call peek() to read the current state.");var e=T;return T=void 0,c=[],e},t.prototype.shouldComponentUpdate=function(e){return!u(e,this.props)},t.prototype.componentWillMount=function(){c.push(this),l()},t.prototype.componentDidUpdate=function(){l()},t.prototype.componentWillUnmount=function(){var e=c.indexOf(this);c.splice(e,1),l()},t.prototype.render=function(){return i.createElement(r,this.props)},t}(o.Component);return s.displayName="SideEffect("+function(e){return e.displayName||e.name||"Component"}(r)+")",s.canUseDOM=a.canUseDOM,s}}},169:function(e,t,n){var r;!function(){"use strict";var o=!("undefined"==typeof window||!window.document||!window.document.createElement),i={canUseDOM:o,canUseWorkers:"undefined"!=typeof Worker,canUseEventListeners:o&&!(!window.addEventListener&&!window.attachEvent),canUseViewport:o&&!!window.screen};void 0===(r=function(){return i}.call(t,n,t,e))||(e.exports=r)}()},170:function(e,t){e.exports=function(e,t,n,r){var o=n?n.call(r,e,t):void 0;if(void 0!==o)return!!o;if(e===t)return!0;if("object"!=typeof e||!e||"object"!=typeof t||!t)return!1;var i=Object.keys(e),a=Object.keys(t);if(i.length!==a.length)return!1;for(var u=Object.prototype.hasOwnProperty.bind(t),c=0;c<i.length;c++){var T=i[c];if(!u(T))return!1;var l=e[T],s=t[T];if(!1===(o=n?n.call(r,l,s,T):void 0)||void 0===o&&l!==s)return!1}return!0}},171:function(e,t,n){var r=Array.prototype.slice,o=n(172),i=n(173),a=e.exports=function(e,t,n){return n||(n={}),e===t||(e instanceof Date&&t instanceof Date?e.getTime()===t.getTime():!e||!t||"object"!=typeof e&&"object"!=typeof t?n.strict?e===t:e==t:function(e,t,n){var T,l;if(u(e)||u(t))return!1;if(e.prototype!==t.prototype)return!1;if(i(e))return!!i(t)&&(e=r.call(e),t=r.call(t),a(e,t,n));if(c(e)){if(!c(t))return!1;if(e.length!==t.length)return!1;for(T=0;T<e.length;T++)if(e[T]!==t[T])return!1;return!0}try{var s=o(e),f=o(t)}catch(p){return!1}if(s.length!=f.length)return!1;for(s.sort(),f.sort(),T=s.length-1;T>=0;T--)if(s[T]!=f[T])return!1;for(T=s.length-1;T>=0;T--)if(l=s[T],!a(e[l],t[l],n))return!1;return typeof e==typeof t}(e,t,n))};function u(e){return null==e}function c(e){return!(!e||"object"!=typeof e||"number"!=typeof e.length)&&("function"==typeof e.copy&&"function"==typeof e.slice&&!(e.length>0&&"number"!=typeof e[0]))}},172:function(e,t){function n(e){var t=[];for(var n in e)t.push(n);return t}(e.exports="function"==typeof Object.keys?Object.keys:n).shim=n},173:function(e,t){var n="[object Arguments]"==function(){return Object.prototype.toString.call(arguments)}();function r(e){return"[object Arguments]"==Object.prototype.toString.call(e)}function o(e){return e&&"object"==typeof e&&"number"==typeof e.length&&Object.prototype.hasOwnProperty.call(e,"callee")&&!Object.prototype.propertyIsEnumerable.call(e,"callee")||!1}(t=e.exports=n?r:o).supported=r,t.unsupported=o},174:function(e,t,n){(function(e){t.__esModule=!0,t.warn=t.requestAnimationFrame=t.reducePropsToState=t.mapStateOnServer=t.handleClientStateChange=t.convertReactPropstoHtmlAttributes=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},i=c(n(0)),a=c(n(56)),u=n(163);function c(e){return e&&e.__esModule?e:{default:e}}var T,l=function(e){return!1===(!(arguments.length>1&&void 0!==arguments[1])||arguments[1])?String(e):String(e).replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,""").replace(/'/g,"'")},s=function(e){var t=A(e,u.TAG_NAMES.TITLE),n=A(e,u.HELMET_PROPS.TITLE_TEMPLATE);if(n&&t)return n.replace(/%s/g,function(){return t});var r=A(e,u.HELMET_PROPS.DEFAULT_TITLE);return t||r||void 0},f=function(e){return A(e,u.HELMET_PROPS.ON_CHANGE_CLIENT_STATE)||function(){}},p=function(e,t){return t.filter(function(t){return void 0!==t[e]}).map(function(t){return t[e]}).reduce(function(e,t){return o({},e,t)},{})},E=function(e,t){return t.filter(function(e){return void 0!==e[u.TAG_NAMES.BASE]}).map(function(e){return e[u.TAG_NAMES.BASE]}).reverse().reduce(function(t,n){if(!t.length)for(var r=Object.keys(n),o=0;o<r.length;o++){var i=r[o].toLowerCase();if(-1!==e.indexOf(i)&&n[i])return t.concat(n)}return t},[])},d=function(e,t,n){var o={};return n.filter(function(t){return!!Array.isArray(t[e])||(void 0!==t[e]&&_("Helmet: "+e+' should be of type "Array". Instead found type "'+r(t[e])+'"'),!1)}).map(function(t){return t[e]}).reverse().reduce(function(e,n){var r={};n.filter(function(e){for(var n=void 0,i=Object.keys(e),a=0;a<i.length;a++){var c=i[a],T=c.toLowerCase();-1===t.indexOf(T)||n===u.TAG_PROPERTIES.REL&&"canonical"===e[n].toLowerCase()||T===u.TAG_PROPERTIES.REL&&"stylesheet"===e[T].toLowerCase()||(n=T),-1===t.indexOf(c)||c!==u.TAG_PROPERTIES.INNER_HTML&&c!==u.TAG_PROPERTIES.CSS_TEXT&&c!==u.TAG_PROPERTIES.ITEM_PROP||(n=c)}if(!n||!e[n])return!1;var l=e[n].toLowerCase();return o[n]||(o[n]={}),r[n]||(r[n]={}),!o[n][l]&&(r[n][l]=!0,!0)}).reverse().forEach(function(t){return e.push(t)});for(var i=Object.keys(r),c=0;c<i.length;c++){var T=i[c],l=(0,a.default)({},o[T],r[T]);o[T]=l}return e},[]).reverse()},A=function(e,t){for(var n=e.length-1;n>=0;n--){var r=e[n];if(r.hasOwnProperty(t))return r[t]}return null},y=(T=Date.now(),function(e){var t=Date.now();t-T>16?(T=t,e(t)):setTimeout(function(){y(e)},0)}),S=function(e){return clearTimeout(e)},h="undefined"!=typeof window?window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||y:e.requestAnimationFrame||y,b="undefined"!=typeof window?window.cancelAnimationFrame||window.webkitCancelAnimationFrame||window.mozCancelAnimationFrame||S:e.cancelAnimationFrame||S,_=function(e){return console&&"function"==typeof console.warn&&console.warn(e)},m=null,O=function(e,t){var n=e.baseTag,r=e.bodyAttributes,o=e.htmlAttributes,i=e.linkTags,a=e.metaTags,c=e.noscriptTags,T=e.onChangeClientState,l=e.scriptTags,s=e.styleTags,f=e.title,p=e.titleAttributes;P(u.TAG_NAMES.BODY,r),P(u.TAG_NAMES.HTML,o),v(f,p);var E={baseTag:M(u.TAG_NAMES.BASE,n),linkTags:M(u.TAG_NAMES.LINK,i),metaTags:M(u.TAG_NAMES.META,a),noscriptTags:M(u.TAG_NAMES.NOSCRIPT,c),scriptTags:M(u.TAG_NAMES.SCRIPT,l),styleTags:M(u.TAG_NAMES.STYLE,s)},d={},A={};Object.keys(E).forEach(function(e){var t=E[e],n=t.newTags,r=t.oldTags;n.length&&(d[e]=n),r.length&&(A[e]=E[e].oldTags)}),t&&t(),T(e,d,A)},R=function(e){return Array.isArray(e)?e.join(""):e},v=function(e,t){void 0!==e&&document.title!==e&&(document.title=R(e)),P(u.TAG_NAMES.TITLE,t)},P=function(e,t){var n=document.getElementsByTagName(e)[0];if(n){for(var r=n.getAttribute(u.HELMET_ATTRIBUTE),o=r?r.split(","):[],i=[].concat(o),a=Object.keys(t),c=0;c<a.length;c++){var T=a[c],l=t[T]||"";n.getAttribute(T)!==l&&n.setAttribute(T,l),-1===o.indexOf(T)&&o.push(T);var s=i.indexOf(T);-1!==s&&i.splice(s,1)}for(var f=i.length-1;f>=0;f--)n.removeAttribute(i[f]);o.length===i.length?n.removeAttribute(u.HELMET_ATTRIBUTE):n.getAttribute(u.HELMET_ATTRIBUTE)!==a.join(",")&&n.setAttribute(u.HELMET_ATTRIBUTE,a.join(","))}},M=function(e,t){var n=document.head||document.querySelector(u.TAG_NAMES.HEAD),r=n.querySelectorAll(e+"["+u.HELMET_ATTRIBUTE+"]"),o=Array.prototype.slice.call(r),i=[],a=void 0;return t&&t.length&&t.forEach(function(t){var n=document.createElement(e);for(var r in t)if(t.hasOwnProperty(r))if(r===u.TAG_PROPERTIES.INNER_HTML)n.innerHTML=t.innerHTML;else if(r===u.TAG_PROPERTIES.CSS_TEXT)n.styleSheet?n.styleSheet.cssText=t.cssText:n.appendChild(document.createTextNode(t.cssText));else{var c=void 0===t[r]?"":t[r];n.setAttribute(r,c)}n.setAttribute(u.HELMET_ATTRIBUTE,"true"),o.some(function(e,t){return a=t,n.isEqualNode(e)})?o.splice(a,1):i.push(n)}),o.forEach(function(e){return e.parentNode.removeChild(e)}),i.forEach(function(e){return n.appendChild(e)}),{oldTags:o,newTags:i}},g=function(e){return Object.keys(e).reduce(function(t,n){var r=void 0!==e[n]?n+'="'+e[n]+'"':""+n;return t?t+" "+r:r},"")},C=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return Object.keys(e).reduce(function(t,n){return t[u.REACT_TAG_MAP[n]||n]=e[n],t},t)},I=function(e,t,n){switch(e){case u.TAG_NAMES.TITLE:return{toComponent:function(){return e=t.title,n=t.titleAttributes,(r={key:e})[u.HELMET_ATTRIBUTE]=!0,o=C(n,r),[i.default.createElement(u.TAG_NAMES.TITLE,o,e)];var e,n,r,o},toString:function(){return function(e,t,n,r){var o=g(n),i=R(t);return o?"<"+e+" "+u.HELMET_ATTRIBUTE+'="true" '+o+">"+l(i,r)+"</"+e+">":"<"+e+" "+u.HELMET_ATTRIBUTE+'="true">'+l(i,r)+"</"+e+">"}(e,t.title,t.titleAttributes,n)}};case u.ATTRIBUTE_NAMES.BODY:case u.ATTRIBUTE_NAMES.HTML:return{toComponent:function(){return C(t)},toString:function(){return g(t)}};default:return{toComponent:function(){return function(e,t){return t.map(function(t,n){var r,o=((r={key:n})[u.HELMET_ATTRIBUTE]=!0,r);return Object.keys(t).forEach(function(e){var n=u.REACT_TAG_MAP[e]||e;if(n===u.TAG_PROPERTIES.INNER_HTML||n===u.TAG_PROPERTIES.CSS_TEXT){var r=t.innerHTML||t.cssText;o.dangerouslySetInnerHTML={__html:r}}else o[n]=t[e]}),i.default.createElement(e,o)})}(e,t)},toString:function(){return function(e,t,n){return t.reduce(function(t,r){var o=Object.keys(r).filter(function(e){return!(e===u.TAG_PROPERTIES.INNER_HTML||e===u.TAG_PROPERTIES.CSS_TEXT)}).reduce(function(e,t){var o=void 0===r[t]?t:t+'="'+l(r[t],n)+'"';return e?e+" "+o:o},""),i=r.innerHTML||r.cssText||"",a=-1===u.SELF_CLOSING_TAGS.indexOf(e);return t+"<"+e+" "+u.HELMET_ATTRIBUTE+'="true" '+o+(a?"/>":">"+i+"</"+e+">")},"")}(e,t,n)}}}};t.convertReactPropstoHtmlAttributes=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return Object.keys(e).reduce(function(t,n){return t[u.HTML_TAG_MAP[n]||n]=e[n],t},t)},t.handleClientStateChange=function(e){m&&b(m),e.defer?m=h(function(){O(e,function(){m=null})}):(O(e),m=null)},t.mapStateOnServer=function(e){var t=e.baseTag,n=e.bodyAttributes,r=e.encode,o=e.htmlAttributes,i=e.linkTags,a=e.metaTags,c=e.noscriptTags,T=e.scriptTags,l=e.styleTags,s=e.title,f=void 0===s?"":s,p=e.titleAttributes;return{base:I(u.TAG_NAMES.BASE,t,r),bodyAttributes:I(u.ATTRIBUTE_NAMES.BODY,n,r),htmlAttributes:I(u.ATTRIBUTE_NAMES.HTML,o,r),link:I(u.TAG_NAMES.LINK,i,r),meta:I(u.TAG_NAMES.META,a,r),noscript:I(u.TAG_NAMES.NOSCRIPT,c,r),script:I(u.TAG_NAMES.SCRIPT,T,r),style:I(u.TAG_NAMES.STYLE,l,r),title:I(u.TAG_NAMES.TITLE,{title:f,titleAttributes:p},r)}},t.reducePropsToState=function(e){return{baseTag:E([u.TAG_PROPERTIES.HREF],e),bodyAttributes:p(u.ATTRIBUTE_NAMES.BODY,e),defer:A(e,u.HELMET_PROPS.DEFER),encode:A(e,u.HELMET_PROPS.ENCODE_SPECIAL_CHARACTERS),htmlAttributes:p(u.ATTRIBUTE_NAMES.HTML,e),linkTags:d(u.TAG_NAMES.LINK,[u.TAG_PROPERTIES.REL,u.TAG_PROPERTIES.HREF],e),metaTags:d(u.TAG_NAMES.META,[u.TAG_PROPERTIES.NAME,u.TAG_PROPERTIES.CHARSET,u.TAG_PROPERTIES.HTTPEQUIV,u.TAG_PROPERTIES.PROPERTY,u.TAG_PROPERTIES.ITEM_PROP],e),noscriptTags:d(u.TAG_NAMES.NOSCRIPT,[u.TAG_PROPERTIES.INNER_HTML],e),onChangeClientState:f(e),scriptTags:d(u.TAG_NAMES.SCRIPT,[u.TAG_PROPERTIES.SRC,u.TAG_PROPERTIES.INNER_HTML],e),styleTags:d(u.TAG_NAMES.STYLE,[u.TAG_PROPERTIES.CSS_TEXT],e),title:s(e),titleAttributes:p(u.ATTRIBUTE_NAMES.TITLE,e)}},t.requestAnimationFrame=h,t.warn=_}).call(this,n(74))}}]);
//# sourceMappingURL=1-d1827376c8c2ea0887d9.js.map |
||
align_seq_with_structure_multithread.py | #!/usr/bin/python
# coding=UTF-8
# -*- coding: UTF-8 -*-
# This file is part of the StructureMapper algorithm.
# Please cite the authors if you find this software useful
#
# https://academic.oup.com/bioinformatics/advance-article/doi/10.1093/bioinformatics/bty086/4857361
# MIT License
#
# Copyright 2018 Anssi Nurminen and Vesa P. Hytรถnen
# Faculty of Medicine and Life Sciences and BioMediTech, University of Tampere, Arvo Ylpรถn katu 34, 33520 Tampere, Finland
# Fimlab Laboratories, Biokatu 4, 33520 Tampere, Finland
# Tampere, Finland
# 16-02-2018
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
import traceback
import re
import time
import random
import signal
import glob
import textwrap
import getopt
import string
import gc
import itertools
from subprocess import call
from datetime import datetime, timedelta
#from dateutil.relativedelta import relativedelta
from multiprocessing import Process, Pool, Array, Lock, Value, cpu_count, Manager
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.Blast.Applications import NcbiblastxCommandline
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.PDBList import PDBList
#from needle_align import *
from biopyt_align import * #AlignCutSeqWindowWithPairwise
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append( "%s/../utils" % curdir)
from struct_methods import *
from fileops import *
#valid_filename_chars = "-_.()[]{}= %s%s" % (string.ascii_letters, string.digits)
#Shared Global variables
align_printlock = False
n_blast_entries_processed = Value('i', 0)
n_alignments_done = Value('i', 0)
n_alignments_skipped = Value('i', 0)
n_alignments_error = Value('i', 0)
fatal_errors = Value('i', 0)
obsoletes_dict = {}
#Init func when running only Alignments with Pool
def PoolInitializer( *args):
global n_alignments_done, n_alignments_skipped, n_alignments_error, align_printlock, obsoletes_dict, n_blast_entries_processed
n_alignments_done, n_alignments_skipped, n_alignments_error, align_printlock, obsoletes_dict, n_blast_entries_processed = args
#SetSyncPrintBuffered( False)
signal.signal( signal.SIGINT, signal.SIG_IGN)
#FUNCTIONS
def ReportProcessStart( file):
sys.stdout.write("Processing file '%s'.\n" % (file))
def ReportDone( thread_tuple):
global align_printlock, fatal_errors
SyncPrint( "Thread %i: Done. (%i)\n" % thread_tuple, aLock=align_printlock)
if thread_tuple[ 1] == -99: #User interrupted process
fatal_errors.value = -99
SyncPrint( "Shutting down processes...\n", aLock=align_printlock)
elif thread_tuple[ 1] < 0:
fatal_errors.value = 1
SyncPrint( "Shutting down processes...\n", aLock=align_printlock)
#global printlock, number
#SyncPrint( "File processed.", printlock )
#SyncPrint( "Files done: %i %s" % ( str(file)), printlock)
def GetFastaFilesInFolder( folder):
folder = InsertFolderSeparator( folder)
glob_path1 = folder + "*.fasta"
file_list = []
for globfile in glob.glob( glob_path1):
file_list.append( globfile)
return file_list
def ScoreAlignment( seq_id, PDB_ID, PDB_LEN, filename, scorefilename):
global align_printlock
#Read ClustalW output file
#outfile = open( "clustal_aligned.fasta", 'r')
#alignment = SeqIO.parse( filename, "fastq")
alignment_records = list( SeqIO.parse( filename, "fasta"))
if len( alignment_records) != 2:
SyncPrint( "ERROR: Bad alignment file '%s': contains %i records" % (filename, len( alignment_records)), align_printlock )
return #RETURN
seq_pdb = alignment_records[ 0].seq
seq_proteome = alignment_records[ 1].seq
#print "Seq PDB: % s " % seq_pdb
#print "Seq Proteome: % s" % seq_proteome
align_first = -1
align_last = -1
unaligned = 0
identity = 0
gaps = 0
gap_opens = 0
maybe_gaps = 0
scorefile = open( scorefilename, "w")
#If PDB sequence is longer, set unaligned to excess amount
if len( seq_pdb) > len( seq_proteome):
unaligned = len( seq_pdb) - len( seq_proteome)
for i in range( 0, min( len( seq_pdb), len( seq_proteome))):
#Gap in Uniprot seq
if seq_pdb[ i] != '-' and seq_proteome[ i] == '-':
unaligned += 1
#Same residue in both
elif seq_pdb[ i] != '-' and seq_proteome[ i] == seq_pdb[ i]:
identity += 1
#Gap in PDB sequence
elif align_first >= 0 and seq_pdb[ i] == '-' and seq_proteome[ i] != '-':
maybe_gaps += 1
#Aligned residues
if seq_pdb[ i] != '-' and seq_proteome[ i] != '-':
align_last = i
gaps += maybe_gaps
if maybe_gaps > 0:
gap_opens += 1
maybe_gaps = 0
if align_first < 0:
align_first = i
alignment_length = 0
if align_first >= 0:
alignment_length = align_last - align_first + 1
#hyphen_count = seq_pdb.count('-')
#Write score
# SEQ_ID SEQ_LEN, PDB_ID, PDB_LEN, Alignment length,
scorefile.write( "%s\t%i\t%s\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%i\n" % ( seq_id, len( seq_proteome), PDB_ID, PDB_LEN, alignment_length, align_first, align_last, identity, gaps, unaligned, gap_opens))
#Debug
#alignment_length_file.close()
#sys.exit("debug stop")
scorefile.write("\n")
scorefile.close()
def CreateAlignFilename( aProjectPrefix, aRecordId, aPDB_code, aPDB_chain):
filename = "%s_[%s=%s(%s)].align" % (aProjectPrefix[:100], aRecordId.upper(), aPDB_code.upper(), aPDB_chain.upper())
return ValidFilename( filename)
def RecordSelector( aCurIndex):
#global align_printlock
if (aCurIndex / RecordSelector.chunksize) % RecordSelector.tc != RecordSelector.tid: return False
#SyncErrPrint( "YES for %i! tid: %i tc: %i" % (aCurIndex, RecordSelector.tid, RecordSelector.tc), align_printlock)
return True
def InitRecordSelector( aThreadCount, aThreadId, aChunkSize=10):
RecordSelector.tc = aThreadCount
RecordSelector.tid = aThreadId
RecordSelector.chunksize = aChunkSize
def FillAlignmentQueue( blast_record_reader, blast_entries, seq_index, fasta_records, aQueueSize = 10 ):
global align_printlock
appended = 0
for blast_header, blast_record in blast_record_reader:
record_id = GetRecordACC( blast_header)
try:
fasta_records.append( seq_index[ record_id])
blast_entries.append( (blast_header, blast_record))
except Exception as ex:
SyncErrPrint( "THREAD ERROR: No matching sequence entry found for blast entry '%s':\n" % (record_id), align_printlock)
SyncErrPrint( "SeqIndex size: %i" % len( seq_index), align_printlock)
message = "An exception of type {0} occured. Arguments: {1!r}".format( type(ex).__name__, ex.args)
SyncErrPrint( message, align_printlock)
return -2 #RETURN ERROR
appended += 1
if len( blast_entries) >= aQueueSize: break
return appended
def ProcessBlastResultsAsync( aBlastResultFile, aSequenceFile, aOutputFolder, aProcessID, aPDB_Folder, aObsoletes, aScoreIt=False, aSkipExisting=True, aLock=False):
args_str =( "%s\t%s\t%s\t%i\t%i\t%s\t%s\t%i\t%i" % (aBlastResultFile, aSequenceFile, aOutputFolder, 1, aProcessID, aBlastResultFile + ".log", aPDB_Folder, aScoreIt, aSkipExisting))
#p = Process( target=ProcessBlastMultiThreadInit, args=(args_str, aObsoletes, aLock, g_SyncPrintBuffer, g_SyncPrintBuffered, g_SyncPrintBuffer_overflow))
p = Process( target=ProcessBlastMultiThreadInit, args=(args_str, aObsoletes, aLock, GetSyncPrintBufferForThread()))
p.start()
return p
#For parallel processing when blasting
#threaded
#def ProcessBlastMultiThreadInit( arg_str, aObsoletes, aLock, aSyncPrintBuffer, aSyncPrintBuffered, aSyncPrintOverFlow):
def ProcessBlastMultiThreadInit( arg_str, aObsoletes, aLock, aSyncPrintBuffer):
global align_printlock, obsoletes_dict
align_printlock = aLock
obsoletes_dict = aObsoletes
thread_id = arg_str.split( "\t")[ 4]
blast_file = arg_str.split( "\t")[ 0]
#SetSyncPrintBufferInThread( aSyncPrintBuffered, aSyncPrintBuffer, aSyncPrintOverFlow)
SetSyncPrintBufferInThread( aSyncPrintBuffer)
SetAlignPrintLock( aLock)
#DEBUG
#print "\n\n\nIsSyncPrintBuffered in thread %i: %i" % (int(thread_id), IsSyncPrintBuffered()), "LOCK:", aLock
#print "\n\n\n"
SyncPrint( "Align Progress: Aligning blast results '%s'. [%s]" % (blast_file, thread_id), aLock)
sig = signal.signal(signal.SIGINT, signal.SIG_IGN)
ProcessBlastMultiThread( arg_str)
signal.signal(signal.SIGINT, sig)
SyncPrint( "Align Progress: File '%s' Done. [%s]" % (blast_file, thread_id), aLock)
SetAlignPrintLock( False)
#Threaded
#def ProcessBlastMultiThread( arg_str, aLock ):
def ProcessBlastMultiThread( arg_str ):
global align_printlock
retval = -1
thread_id = -1
try:
thread_id = int( arg_str.split( "\t")[ 4])
retval = DoProcessBlastMultiThread( arg_str)
except Exception as ex:
err_str = "ERROR: Thread %i:\n" % thread_id
message = " An exception of type {0} occured. Arguments: {1!r}".format( type(ex).__name__, ex.args)
sys.stderr.write( "\n" + err_str + message + "\n")
SyncPrint( "\n" + err_str + message + "\n", align_printlock)
traceback.print_exc(file=sys.stderr)
return (thread_id, retval)
#Threaded
def DoProcessBlastMultiThread( arg_str ):
global n_alignments_done, n_alignments_skipped, n_alignments_error, align_printlock, obsoletes_dict, n_blast_entries_processed#, seq_index
#read arguments
cols = str(arg_str).split( "\t")
blast_file = cols[ 0]
sequence_file = cols[ 1]
output_folder = cols[ 2]
thread_count = int( cols[ 3])
thread_id = int( cols[ 4])
log_file = cols[ 5]
PDB_folder = cols[ 6]
score_it = True if int( cols[ 7]) else False
skip_existing = True if int( cols[ 8]) else False
#sys.stderr.write( "Align Progress: Debug Thread B '%i'.\n" % thread_id)
#raise ValueError('A very specific bad thing happened')
#SyncPrint( "Obsoletes %i: %i (%i)" % (thread_id, len(obsoletes_dict), obsoletes_dict['125D'] ))
#SyncPrint( "Score it: %i " % (score_it), align_printlock)
#SyncPrint( "skip_existing: %i " % (skip_existing), align_printlock)
path, filename = os.path.split( sequence_file)
name, suffix = SplitFileName( filename)
seq_index = SeqIO.index( sequence_file, "fasta", key_function=GetRecordACC)
n_records = len( seq_index)
n_total_blast_results = 0
n_errors = 0
output_folder = InsertFolderSeparator( output_folder)
log_filename = output_folder + "align_%i_log.txt" % thread_id
align_scores_filename = output_folder + "align_%i_scores.txt" % thread_id
re_cutnum = re.compile('POI:\s*(\d+)-(\d+)', re.IGNORECASE)
no_struct_found = 0
align_ok = 0
align_skipped = 0
align_error = 0
#sys.stderr.write( "Align Progress: Debug Thread '%i'." % thread_id)
#SyncPrint( "Align Progress: Thread %i starting..." % thread_id, align_printlock )
with open( log_filename, "w") as log_file:
LogIt( "INFO: Processing BLAST file '%s'." % blast_file, log_file, -1, lock=align_printlock )
blast_record_reader = None
if thread_count <= 1:
blast_record_reader = FastaIter( blast_file, False, None)
else:
#Process only every Nth record in the blast file in this thread
InitRecordSelector( thread_count, thread_id, 10)
#.blast file is not a fasta file but FastaIter method still parses the records nicely, one by one
blast_record_reader = FastaIter( blast_file, False, RecordSelector) #generator
q_blast_entries = []
q_fasta_records = []
queue_retval = -1
while True:
#Fill queue, read 10 sequences at a time
queue_retval = FillAlignmentQueue( blast_record_reader, q_blast_entries, seq_index, q_fasta_records, 10 )
if queue_retval < 1: break #Exit loop condition
assert( len( q_blast_entries) == len( q_fasta_records))
#Process queue
while len( q_blast_entries):
#FIFO queue
blast_header, blast_record = q_blast_entries.pop( 0)
fasta_rec = q_fasta_records.pop( 0)
record_id = GetRecordACC( blast_header)
record_desc = fasta_rec.description
cutsite = -1
res = re_cutnum.search( record_desc)
#print records[ r].description
if res:
try:
cut_1 = int( res.group( 1))
cut_2 = int( res.group( 2))
cutsite = cut_1
#print record_desc #+ " homologs: %i" % len( PDB_homologs[ r])
except ValueError:
pass
else:
LogIt( "INFO: No POI-site specified for: %s" % (record_desc), log_file, 1, lock=align_printlock)
n_total_blast_results += 1
blast_rank = 0 #1st rank is 1, not 0
n_ok_blast_results = 0
used_structures = set()
#For each structure in blast results
for blast_result in blast_record.split( "\n"):
blast_result = blast_result.strip()
if len( blast_result) == 0: continue #Skip empty rows
blast_row = {}
blast_rank += 1
#print "Blast result: '''%s'''" % blast_result
cols = blast_result.split( "\t")
if len( cols) != 12:
LogIt( "ALIGN ERROR: Bad Blast result '%s', rank: %i" % (blast_result, blast_rank), log_file, 2, lock=align_printlock )
n_errors += 1
if n_errors >= 5:
SyncErrPrint( "ALIGN ERROR: %i errors encountered in blast results. Exitting..\n" % n_errors, align_printlock)
return -2 #RETURN
continue
blast_row[ 'entry_id'] = record_id #cols[ 0].split( '|')[ 1]
#Two possible formats in BLAST results:
if cols[ 1].find( "|") >= 0:
#gn|P23919|DTYMK gi|359545626|pdb|2XX3|A 100.00 212 0 0 1 212 21 232 7e-156 436
blast_row[ 'pdb_code'] = cols[ 1].split( '|')[ 3].upper() #3IKM
blast_row[ 'chain_id'] = cols[ 1].split( '|')[ 4] #A
else:
#gn|P30613_Pos2|PKLR 4IP7_A 99.815 541 1 0 34 574 3 543 0.0 1096
blast_row[ 'pdb_code'] = cols[ 1].split( '_')[ 0].upper() #3IKM
blast_row[ 'chain_id'] = cols[ 1].split( '_')[ 1] #A
blast_row[ 'struct_id']= ( blast_row[ 'pdb_code'] + "|" + blast_row[ 'chain_id']) #3IKM|A
blast_row[ 'pdb_file'] = InsertFolderSeparator( PDB_folder) + "pdb" + blast_row[ 'pdb_code'].lower() + ".ent"
if blast_row[ 'pdb_code'] in obsoletes_dict:
#Skip obsolete structures
LogIt( "INFO: Entry for '%s' contains obsolete structure %s. Skipping." % (blast_row[ 'entry_id'], blast_row[ 'pdb_code']), log_file, 1, lock=align_printlock)
continue
n_ok_blast_results += 1
#Blast results can contain the same structure+chain multiple times
if blast_row['struct_id'] not in used_structures:
identifier = "%s_%s_%i" % (name, record_id, blast_rank)
if cutsite < 0:
LogIt( "No POI-site found for seq '%s'. Exitting.\n" % record_id, log_file, 2, lock=align_printlock)
return -2 #RETURN
#AlignSeqWithPDBFile( identifier, records[ r], blast_entry[PDB_CODE], blast_entry[FILE], blast_entry[CHAIN], output_folder, False, score_it, skip_existing )
else:
#Needle alignment
#align_retval = AlignCutSeqWindowWithNeedle( thread_id=thread_id, cutsite=cutsite, cutseqlen=10, record_identifier=identifier, \
# fasta_record=fasta_rec, PDB_code=blast_row[ 'pdb_code'], PDB_filename=blast_row[ 'pdb_file'], \
# PDB_chain=blast_row[ 'chain_id'], output_folder=output_folder, skip_existing=skip_existing, log_file=log_file )
#Biopython Pairwise2
align_retval = AlignCutSeqWindowWithPairwise( thread_id=thread_id, cutsite=cutsite, cutseqlen=10, record_identifier=identifier, \
fasta_record=fasta_rec, PDB_code=blast_row['pdb_code'], PDB_filename=blast_row['pdb_file'], \
PDB_chain=blast_row['chain_id'], output_folder=output_folder, skip_existing=skip_existing, log_file=log_file )
#Align with ClustalW2
#align_retval = AlignCutSeqWithPDBFile( thread_id, cutsite, 10, identifier, fasta_rec, blast_row[ 'pdb_code'], blast_row[ 'pdb_file'], \
# blast_row[ 'chain_id'], output_folder, False, score_it, skip_existing, log_file=log_file )
#debug
#print "NEEDLE: %i" % align_retval
#sys.exit( -69)
if align_retval == 1: align_ok += 1
elif align_retval == 0: align_skipped += 1
elif align_retval == -98: align_error += 1 #No sequence in PDB file found
else:
align_error += 1
return align_retval
#Mark as processed
used_structures.add( blast_row[ 'struct_id'])
#Report progress
align_printlock.acquire()
n_blast_entries_processed.value += 1
n_alignments_done.value += align_ok
n_alignments_skipped.value += align_skipped
n_alignments_error.value += align_error
align_printlock.release()
align_error = 0
align_skipped = 0
align_ok = 0
#After all processed (or interrupted by error)
if queue_retval < 0:
SyncErrPrint( "THREAD ERROR: No matching sequence entry found for blast entry %i, '%s':\n" % (cur_blast_entry_index, record_id))
SyncErrPrint( "SeqIndex size: %i" % len( seq_index))
#Sanity check failed
error_str = "Thread %s error:\n" % thread_id
template = "An exception of type {0} occured. Arguments: {1!r}"
message = template.format( type(ex).__name__, ex.args)
SyncErrPrint( error_str + message, align_printlock)
return -2 #RETURN
if n_ok_blast_results == 0:
no_struct_found += 1
return thread_id
def SetAlignPrintLock( aLock):
global align_printlock
align_printlock = aLock
#SetNeedlePrintLock( aLock)
SetPairwisePrintLock( aLock)
#Aligns sequences (or only cut site sequences) with sequences exported from PDB files.
def ProcessBlastResults( sequence_filename, blast_filename, output_folder, PDB_folder, obsoletes=None, skip_existing=False, score_it=False, threads=0 ):
global n_alignments_done, n_alignments_skipped, n_alignments_error, align_printlock, obsoletes_dict, fatal_errors #, seq_index
if not os.path.isdir( PDB_folder):
sys.stderr.write("ERROR: '%s' in not a valid folder.\n" % PDB_folder)
return -1 #RETURN
if not obsoletes:
obsoletes = FetchObsoletes( True)
#Create Shared dictionary
manager = Manager()
obsoletes_dict = manager.dict()
for o in obsoletes: obsoletes_dict[ o] = True
#Open Blast file
try:
blast_file = open( blast_filename, 'r')
except:
sys.stderr.write("ERROR: Error opening file: '%s'.\n" % blast_filename)
return -1 #RETURN
#Read sequences
#try:
# print "Building index of seq records from file '%s'... " % sequence_filename,
# seq_index = SeqIO.index( sequence_filename, "fasta", key_function=GetRecordACC)
# print "Done."
#except Exception as ex:
# sys.stderr.write("Error getting sequences from file: '%s'.\n" % sequence_filename)
# template = "An exception of type {0} occured. Arguments: {1!r}"
# message = template.format( type(ex).__name__, ex.args)
# sys.stderr.write( message + "\n")
# return -1 #RETURN
n_records = CountRecords( sequence_filename) #len( seq_index)
print "Align Progress: Found %i blast entries to align in file '%s'." % (n_records, blast_filename)
#Figure out how many cpus to use
n_cpus = cpu_count()
used_cpus = n_cpus
if threads < 0:
used_cpus = n_cpus + threads
elif threads > 0:
used_cpus = threads
#enforce cpu limits
used_cpus = min( n_cpus, used_cpus) #cap at detected system cores
if used_cpus > ( n_records / 10): used_cpus = (n_records / 10) #No need for additional cores beyond 10 alignments each
used_cpus = max( 1, used_cpus) #minimum of 1 cores
print "Align Progress: Aligning %i records with %i cores." % (n_records, used_cpus)
output_folder = InsertFolderSeparator( output_folder)
log_filename = output_folder + "align_log.txt"
SetAlignPrintLock( Lock())
SetSyncPrintBuffered( False)
#manager = Manager()
#manager.Value('i', 0)
print "INFO: Skipping existing files: %s" % ("Yes" if skip_existing else "No")
print "INFO: Creating alignment score files: %s" % ("Yes" if score_it else "No")
print "Align Progress: Starting %i thread%s and creating sequence record indices..." % (used_cpus, "s" if used_cpus > 1 else "")
#print "Reading blast file: '%s'" % blast_filename
args = []
for c in range( used_cpus):
args.append( "%s\t%s\t%s\t%i\t%i\t%s\t%s\t%i\t%i" % (blast_filename, sequence_filename, output_folder, used_cpus, c, log_filename, PDB_folder, score_it, skip_existing))
#Create worker pool
pool = Pool( processes=used_cpus, initializer=PoolInitializer, initargs=(n_alignments_done, n_alignments_skipped, n_alignments_error, align_printlock, obsoletes_dict, n_blast_entries_processed ))
for arg in args:
pool.apply_async( ProcessBlastMultiThread, (arg, ), callback=ReportDone)
pool.close() # no more tasks
total_processed = 0
start_time = time.time()
prev_progress = 0
processing_speed_per_minute = []
report_interval = 20
progress_counter = max( 5, report_interval - 5) #first report after 5 secs
try:
#Trick to keep Ctrl+C interrupt working during processing
while total_processed < n_records:
time.sleep( 1)
progress_counter += 1
if fatal_errors.value > 0:
raise Exception('Alignment Thread error.')
elif fatal_errors.value == -99:
raise KeyboardInterrupt('Ctrl+C detected.')
if progress_counter >= report_interval:
progress_counter = 0
total_processed = n_blast_entries_processed.value
#print "ISBUFFERED:", IsSyncPrintBuffered()
#SyncPrint( "Prev: %i, cur: %i" % (prev_progress, total_processed), False)#align_printlock)
n_processed = total_processed - prev_progress
if n_processed == 0: continue
speed_msg = ""
if prev_progress != 0 and n_alignments_done.value > 100:
per_minute = round(n_processed * (60.0 / report_interval))
#Calc average
if len( processing_speed_per_minute) > 30: processing_speed_per_minute.pop( 0)
processing_speed_per_minute.append( per_minute)
avg_speed = int( sum( processing_speed_per_minute) / len( processing_speed_per_minute))
n_remaining = n_records - total_processed
if avg_speed == 0: avg_speed = 1
n_minutes_remaining = n_remaining / avg_speed
completion = datetime.now() + timedelta(minutes = n_minutes_remaining)
if n_minutes_remaining <= 60:
# ALIGN PROGRESS:
speed_msg = "\n Speed: %i/min. Estimated time of completion: %s (%i minutes remaining)" % (avg_speed, completion.strftime('%H:%M'), n_minutes_remaining)
elif n_minutes_remaining < 60*24: #in 24h
h = n_minutes_remaining / 60
m = n_minutes_remaining - (h*60)
speed_msg = "\n Speed: %i/min. Estimated time of completion: %s (in %ih %imin)" % (avg_speed, completion.strftime('%H:%M'), h, m)
else:
h = n_minutes_remaining / 60
speed_msg = "\n Speed: %i/min. Estimated time of completion: %s (in >%i hours)" % (avg_speed, completion.strftime('%a %b %d %Y %H:%M'), h)
SyncPrint( "\n\nALIGN PROGRESS: Entries processed: %i/%i (%2.1f%%), Alignments: %i, Skipped: %i, Errors: %i%s\n\n" % ( total_processed, n_records, (float(total_processed) / n_records * 100.0), n_alignments_done.value, n_alignments_skipped.value, n_alignments_error.value, speed_msg), align_printlock )
prev_progress = total_processed
elif progress_counter % 5 == 0: #Update every 5 secs
total_processed = n_blast_entries_processed.value
print "\nAlign Progress: Work has finished."
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.stderr.write( "\n\nProcess interrupted by user.\n\n")
align_printlock = False
return -99
except Exception as ex:
pool.terminate()
pool.join()
sys.stderr.write( "\n\nERROR: Fatal error occurred during alignment.\n\n")
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
sys.stderr.write( message +"\n")
align_printlock = False
return -1
else:
pool.join() # wrap up current tasks
print "Align Progress: Alignment took %i seconds" % (int(time.time() - start_time))
SetAlignPrintLock( False)
gc.collect()
#n_found = n_total_blast_results - no_struct_found
#LogIt( "Homologous PDB structures found for %i/%i sequences (%.1f%%)." % ( n_found, n_total_blast_results, (float(n_found)/ n_total_blast_results * 100.0)), log_file, 1)
#LogIt( "\n\nINFO: Alignment results (%i total):\nALIGNED=%i\nSKIPPED=%i\nERROR =%i\n" % (align_ok+align_skipped+align_error, align_ok, align_skipped, align_error), log_file, 1 )
#Collect Thread logs into one log
ConcatenateFiles( GetDirFiles( output_folder + "align_*_log.txt"), log_filename, aRemoveAfterWritten=True)
return 0 #DONE
def PrintHelp():
pr | def main():
seq_file = ""
blast_file = ""
output_folder = ""
pdb_folder = "I:/anSsi/PDB/"
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:hv', ['pdb_folder', 'help', 'version'])
except getopt.GetoptError as err:
sys.stderr.write( err.msg)
sys.stderr.write( "\n")
sys.stderr.write("See -h for usage.\n")
sys.exit( 2)
#Input & Output files
for arg in args:
if len( seq_file) == 0:
seq_file = arg
elif len( blast_file) == 0:
blast_file = arg
elif len( output_folder) == 0:
output_folder = arg
else:
sys.stderr.write("Too many arguments.\n")
sys.stderr.write("See -h for usage.\n")
sys.exit( 2)
#Flags
for opt, arg in opts:
if opt in ('-h', '--help'):
PrintHelp()
sys.exit( 0)
elif opt in ('-v', '--version'):
PrintHelp()
sys.exit( 0)
elif opt in ('-p', '--pdb_folder'):
pdb_folder = arg
sys.exit( 0)
if len( sys.argv) < 3:
sys.stderr.write("Too few arguments.\n")
sys.exit( -2) #Exit
seq_file_list = []
blast_file_list = []
if os.path.isdir( seq_file) and os.path.isdir( blast_file):
#Both dirs
seq_file_list = GetFastaFilesInFolder( seq_file)
for f in seq_file_list:
blast_file_list.append( f + ".blast")
if len(seq_file_list) < len( blast_file_list):
sys.stderr.write("Warning: Blast files not found for all sequence (fasta) files.\n")
elif os.path.isfile( seq_file) and os.path.isfile( blast_file):
#Both files
seq_file_list.append( seq_file)
blast_file_list.append( blast_file)
elif os.path.isfile( seq_file) and os.path.isdir( blast_file):
#File and dir
seq_file_list.append( seq_file)
blast_file_list.append( InsertFolderSeparator( blast_file) + ".blast")
else:
sys.stderr.write("Bad arguments given. Specify either files or folders.\n")
if len( seq_file_list) > 1:
print "Found %i files to create alignments for."
obsoletes = FetchObsoletes( True)
for s in range( len( seq_file_list)):
path, filename = os.path.split( seq_file_list[ s])
name, suffix = SplitFileName( filename)
output_folder = InsertFolderSeparator( output_folder)
print "Processing files: '%s' and '%s'." % (seq_file_list[ s], blast_file_list[ s])
ProcessBlastResults( seq_file_list[ s], blast_file_list[ s], output_folder, pdb_folder, obsoletes )
print ""
print "All done."
if __name__ == "__main__":
main()
| int """
This script runs alignments for a pair of blast results and fasta sequences
Usage: script.py fasta_sequence_file blast_results_file output_folder
or: fasta_sequence_folder blast_results_folder output_folder
-p --pdb_folder Folder where structure files are stored
-h --help Print this message.
-v --version 1.0
"""
|
id_redisenterprise.go | package privateendpointconnections
import (
"fmt"
"strings"
"github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids"
)
var _ resourceids.ResourceId = RedisEnterpriseId{}
// RedisEnterpriseId is a struct representing the Resource ID for a Redis Enterprise
type RedisEnterpriseId struct {
SubscriptionId string
ResourceGroupName string
ClusterName string
}
// NewRedisEnterpriseID returns a new RedisEnterpriseId struct
func NewRedisEnterpriseID(subscriptionId string, resourceGroupName string, clusterName string) RedisEnterpriseId {
return RedisEnterpriseId{
SubscriptionId: subscriptionId,
ResourceGroupName: resourceGroupName,
ClusterName: clusterName,
}
}
// ParseRedisEnterpriseID parses 'input' into a RedisEnterpriseId
func ParseRedisEnterpriseID(input string) (*RedisEnterpriseId, error) |
// ParseRedisEnterpriseIDInsensitively parses 'input' case-insensitively into a RedisEnterpriseId
// note: this method should only be used for API response data and not user input
func ParseRedisEnterpriseIDInsensitively(input string) (*RedisEnterpriseId, error) {
parser := resourceids.NewParserFromResourceIdType(RedisEnterpriseId{})
parsed, err := parser.Parse(input, true)
if err != nil {
return nil, fmt.Errorf("parsing %q: %+v", input, err)
}
var ok bool
id := RedisEnterpriseId{}
if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok {
return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input)
}
if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok {
return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input)
}
if id.ClusterName, ok = parsed.Parsed["clusterName"]; !ok {
return nil, fmt.Errorf("the segment 'clusterName' was not found in the resource id %q", input)
}
return &id, nil
}
// ValidateRedisEnterpriseID checks that 'input' can be parsed as a Redis Enterprise ID
func ValidateRedisEnterpriseID(input interface{}, key string) (warnings []string, errors []error) {
v, ok := input.(string)
if !ok {
errors = append(errors, fmt.Errorf("expected %q to be a string", key))
return
}
if _, err := ParseRedisEnterpriseID(v); err != nil {
errors = append(errors, err)
}
return
}
// ID returns the formatted Redis Enterprise ID
func (id RedisEnterpriseId) ID() string {
fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Cache/redisEnterprise/%s"
return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ClusterName)
}
// Segments returns a slice of Resource ID Segments which comprise this Redis Enterprise ID
func (id RedisEnterpriseId) Segments() []resourceids.Segment {
return []resourceids.Segment{
resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"),
resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"),
resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"),
resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"),
resourceids.StaticSegment("staticProviders", "providers", "providers"),
resourceids.ResourceProviderSegment("staticMicrosoftCache", "Microsoft.Cache", "Microsoft.Cache"),
resourceids.StaticSegment("staticRedisEnterprise", "redisEnterprise", "redisEnterprise"),
resourceids.UserSpecifiedSegment("clusterName", "clusterValue"),
}
}
// String returns a human-readable description of this Redis Enterprise ID
func (id RedisEnterpriseId) String() string {
components := []string{
fmt.Sprintf("Subscription: %q", id.SubscriptionId),
fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName),
fmt.Sprintf("Cluster Name: %q", id.ClusterName),
}
return fmt.Sprintf("Redis Enterprise (%s)", strings.Join(components, "\n"))
}
| {
parser := resourceids.NewParserFromResourceIdType(RedisEnterpriseId{})
parsed, err := parser.Parse(input, false)
if err != nil {
return nil, fmt.Errorf("parsing %q: %+v", input, err)
}
var ok bool
id := RedisEnterpriseId{}
if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok {
return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input)
}
if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok {
return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input)
}
if id.ClusterName, ok = parsed.Parsed["clusterName"]; !ok {
return nil, fmt.Errorf("the segment 'clusterName' was not found in the resource id %q", input)
}
return &id, nil
} |
mod.rs | use super::utils;
#[test]
fn deserialization_works() |
#[test]
fn generator_does_not_panic() {
println!("{}", utils::generate(include_str!("input.xsd")))
}
#[test]
fn generator_output_has_correct_ast() {
utils::ast_test(include_str!("input.xsd"), include_str!("expected.rs"));
}
| {
mod expected {
use crate::generator::validator::Validate;
use std::io::{Read, Write};
use yaserde::{YaDeserialize, YaSerialize};
include!("expected.rs");
}
let ser = include_str!("example.xml");
let de: expected::FooType = yaserde::de::from_str(&ser).unwrap();
assert_eq!(
de,
expected::FooType {
text: "abcd".to_string()
}
);
} |
tcpinfo_test.go | // +build linux
package tcpinfo
import (
"net"
"testing"
)
func | (t *testing.T) {
var listAddr net.Addr
ch := make(chan error)
go func() {
addr, err := net.ResolveTCPAddr("tcp4", "127.0.0.1:0")
if err != nil {
ch <- err
return
}
l, err := net.ListenTCP("tcp4", addr)
if err != nil {
ch <- err
return
}
listAddr = l.Addr()
ch <- nil
_, _ = l.Accept()
}()
err := <-ch
if err != nil {
t.Error(err)
}
conn, err := net.DialTCP("tcp4", nil, listAddr.(*net.TCPAddr))
if err != nil {
t.Error(err)
}
tcpInfo, err := GetsockoptTCPInfo(conn)
if err != nil {
t.Error(err)
}
if tcpInfo.Rtt <= 0 {
t.Errorf("get tcpinfo failed. tcpInfo=%v", tcpInfo)
}
}
| TestGetsockoptTCPInfo |
msg_mhf_get_ca_unique_id.go | package mhfpacket
import (
"errors"
"github.com/Solenataris/Erupe/network/clientctx"
"github.com/Solenataris/Erupe/network"
"github.com/Andoryuuta/byteframe"
)
// MsgMhfGetCaUniqueID represents the MSG_MHF_GET_CA_UNIQUE_ID
type MsgMhfGetCaUniqueID struct{}
// Opcode returns the ID associated with this packet type.
func (m *MsgMhfGetCaUniqueID) Opcode() network.PacketID {
return network.MSG_MHF_GET_CA_UNIQUE_ID
} | func (m *MsgMhfGetCaUniqueID) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
return errors.New("NOT IMPLEMENTED")
}
// Build builds a binary packet from the current data.
func (m *MsgMhfGetCaUniqueID) Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
return errors.New("NOT IMPLEMENTED")
} |
// Parse parses the packet from binary |
placement.rs | //!
//! # Layout21 Placement Module
//!
// Local imports
use crate::{
array::ArrayInstance,
cell::Cell,
coords::{HasUnits, Int, PrimPitches, UnitSpeced, Xy},
group::GroupInstance,
instance::Instance,
raw::{Dir, LayoutError, LayoutResult},
utils::Ptr,
};
/// # Placement Enumeration
///
/// Includes absolute and relative placements.
///
/// Absolute placements are in `Self::AbsType` units.
/// Relative placements use the [RelativePlace] struct,
/// which can be specified relative to any other [Placeable] object.
///
#[derive(Debug, Clone)]
pub enum Place<AbsType> {
/// Absolute
Abs(AbsType),
/// Relative
Rel(RelativePlace),
}
impl<T> Place<T> {
/// Assert that our place is absolute, and retrieve a shared reference to the inner [Xy] value.
pub fn abs(&self) -> LayoutResult<&T> {
match self {
Place::Abs(ref xy) => Ok(xy),
Place::Rel(_) => {
LayoutError::fail("Asserted absolute-placement on a relative-placement")
}
}
}
/// Assert that our place is absolute, and retrieve a mutable reference to the inner [Xy] value.
pub fn abs_mut(&mut self) -> LayoutResult<&mut T> {
match self {
Place::Abs(ref mut xy) => Ok(xy),
Place::Rel(_) => {
LayoutError::fail("Asserted absolute-placement on a relative-placement")
}
}
}
}
impl<T: HasUnits> From<Xy<T>> for Place<Xy<T>> {
/// Convert [Xy] values into [Place::Abs] absolute places
fn from(xy: Xy<T>) -> Self {
Self::Abs(xy)
}
}
impl<T: HasUnits> From<(T, T)> for Place<Xy<T>> {
/// Two-tuples of unit-specified numerics are converted to an [Xy] value.
fn from((x, y): (T, T)) -> Self {
Self::Abs(Xy::new(x, y))
}
}
impl From<(Int, Int)> for Place<Xy<PrimPitches>> {
/// Two-tuples of integers are converted to an [Xy] value.
fn from(tup: (Int, Int)) -> Self {
Self::Abs(Xy::from(tup))
}
}
impl<T> From<RelativePlace> for Place<T> {
fn from(rel: RelativePlace) -> Self {
Self::Rel(rel)
}
}
/// # Relatively-Placed Assignment
/// FIXME: merge back in with absoutely-placed [Assign]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RelAssign {
pub net: String, | }
/// # Relative Placement
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RelativePlace {
/// Placement is relative `to` this
pub to: Placeable,
/// Placed on this `side` of `to`
pub side: Side,
/// Aligned to this aspect of `to`
pub align: Align,
/// Separation between the placement and the `to`
pub sep: Separation,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Side {
Top,
Bottom,
Left,
Right,
}
impl Side {
/// Get the side rotated 90 degrees clockwise
pub fn cw_90(&self) -> Self {
match self {
Self::Top => Self::Right,
Self::Right => Self::Bottom,
Self::Bottom => Self::Left,
Self::Left => Self::Top,
}
}
/// Get the side rotated 90 degrees counter-clockwise
pub fn ccw_90(&self) -> Self {
match self {
Self::Top => Self::Left,
Self::Left => Self::Bottom,
Self::Bottom => Self::Right,
Self::Right => Self::Top,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Align {
/// Side-to-side alignment
Side(Side),
/// Center-aligned
Center,
/// Port-to-port alignment
Ports(String, String),
}
/// Enumerated means of specifying relative-placement separation
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum SepBy {
/// Separated by [UnitSpeced]-distance in x and y, and by layers in z
UnitSpeced(UnitSpeced),
/// Separated by the size of another Cell
SizeOf(Ptr<Cell>),
}
/// Three-dimensional separation units
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct Separation {
pub x: Option<SepBy>,
pub y: Option<SepBy>,
pub z: Option<isize>,
}
impl Separation {
pub fn new(x: Option<SepBy>, y: Option<SepBy>, z: Option<isize>) -> Self {
Self { x, y, z }
}
pub fn x(x: SepBy) -> Self {
Self {
x: Some(x),
..Default::default()
}
}
pub fn y(y: SepBy) -> Self {
Self {
y: Some(y),
..Default::default()
}
}
pub fn z(z: isize) -> Self {
Self {
z: Some(z),
..Default::default()
}
}
/// Get the separation in direction `dir`
pub fn dir(&self, dir: Dir) -> &Option<SepBy> {
match dir {
Dir::Horiz => &self.x,
Dir::Vert => &self.y,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Placeable {
/// Instance of another cell
Instance(Ptr<Instance>),
/// Uniform array of placeable elements
Array(Ptr<ArrayInstance>),
/// Group of other placeable elements
Group(Ptr<GroupInstance>),
/// Instance port location
Port { inst: Ptr<Instance>, port: String },
/// Assignment
Assign(Ptr<RelAssign>),
}
impl Placeable {
/// Get the location of the placeable
pub fn loc(&self) -> LayoutResult<Place<Xy<PrimPitches>>> {
let loc = match self {
Placeable::Instance(ref p) => {
let p = p.read()?;
p.loc.clone()
}
Placeable::Array(ref p) => {
let p = p.read()?;
p.loc.clone()
}
Placeable::Group(ref p) => {
let p = p.read()?;
p.loc.clone()
}
Placeable::Port { .. } => unimplemented!(),
Placeable::Assign(_) => unimplemented!(),
};
Ok(loc)
}
} | pub loc: RelativePlace, |
test.go | package main
import (
"fmt"
"os"
s "strings"
gofish "github.com/stmcginnis/gofish/school"
)
func main() {
queryObject := ""
subQuery := ""
if len(os.Args) > 1 |
if len(os.Args) > 2 {
subQuery = s.ToLower(os.Args[2])
}
c, _ := gofish.APIClient("http://localhost:5000", nil)
service, _ := gofish.ServiceRoot(c)
switch queryObject {
case "chassis":
objs, _ := service.Chassis()
for _, obj := range objs {
fmt.Printf("Chassis: %#v\n\n", obj)
}
case "managers":
objs, _ := service.Managers()
for _, obj := range objs {
fmt.Printf("Manager: %#v\n\n", obj)
}
case "tasks":
objs, _ := service.Tasks()
for _, obj := range objs {
fmt.Printf("Task: %#v\n\n", obj)
}
case "sessions":
objs, _ := service.Sessions()
for _, obj := range objs {
fmt.Printf("Session: %#v\n\n", obj)
}
case "storageservices":
objs, _ := service.StorageServices()
for _, obj := range objs {
fmt.Printf("Storage service: %#v\n\n", obj)
switch subQuery {
case "endpoints":
endpoints, _ := obj.Endpoints()
for _, endpoint := range endpoints {
fmt.Printf("\tEndpoint: %#v\n\n", endpoint)
}
case "endpointgroups":
endpoints, _ := obj.EndpointGroups()
for _, endpoint := range endpoints {
fmt.Printf("\tEndpoint: %#v\n\n", endpoint)
}
}
}
case "storagesystems":
objs, _ := service.StorageSystems()
for _, obj := range objs {
fmt.Printf("Storage system: %#v\n\n", obj)
}
case "accounts":
obj, _ := service.AccountService()
fmt.Printf("Account service: %#v\n\n", obj)
case "events":
obj, _ := service.EventService()
fmt.Printf("Event service: %#v\n\n", obj)
case "systems":
objs, _ := service.Systems()
for _, obj := range objs {
fmt.Printf("System: %#v\n\n", obj)
}
default:
fmt.Printf("ServiceRoot: %#v\n\n", service)
}
}
| {
queryObject = s.ToLower(os.Args[1])
} |
window_change.rs | use super::Window;
use super::WindowHandle;
use super::WindowState;
use super::WindowType;
use crate::models::XYHWChange;
type MaybeWindowHandle = Option<WindowHandle>;
type MaybeName = Option<String>;
#[derive(Debug, Clone)]
pub struct | {
pub handle: WindowHandle,
pub transient: Option<MaybeWindowHandle>,
pub never_focus: Option<bool>,
pub name: Option<MaybeName>,
pub type_: Option<WindowType>,
pub floating: Option<XYHWChange>,
pub strut: Option<XYHWChange>,
pub states: Option<Vec<WindowState>>,
}
impl WindowChange {
pub fn new(h: WindowHandle) -> WindowChange {
WindowChange {
handle: h,
transient: None,
never_focus: None,
name: None,
type_: None,
floating: None,
strut: None,
states: None,
}
}
pub fn update(self, window: &mut Window) -> bool {
let mut changed = false;
if let Some(trans) = &self.transient {
let changed_trans = window.transient.is_none() || &window.transient != trans;
//if changed_trans {
// warn!("CHANGED: trans");
//}
changed = changed || changed_trans;
window.transient = trans.clone();
}
if let Some(name) = &self.name {
let changed_name = window.name.is_none() || &window.name != name;
//if changed_name {
// warn!("CHANGED: name");
//}
changed = changed || changed_name;
window.name = name.clone();
}
if let Some(nf) = self.never_focus {
let changed_nf = window.never_focus != nf;
//if changed_nf {
// warn!("CHANGED: nf");
//}
changed = changed || changed_nf;
window.never_focus = nf;
}
if let Some(floating_change) = self.floating {
let changed_floating = floating_change.update_window_floating(window);
//if changed_floating {
// warn!("CHANGED: floating");
//}
changed = changed || changed_floating;
}
if let Some(strut) = self.strut {
let changed_strut = strut.update_window_strut(window);
//////if changed_strut {
////// warn!("CHANGED: strut");
//////}
changed = changed || changed_strut;
}
if let Some(type_) = &self.type_ {
let changed_type = &window.type_ != type_;
//if changed_type {
// warn!("CHANGED: type");
//}
changed = changed || changed_type;
window.type_ = type_.clone();
if window.type_ == WindowType::Dock {
window.border = 0;
window.margin = 0;
}
}
if let Some(states) = self.states {
//warn!("CHANGED: state");
changed = true;
window.set_states(states);
}
changed
}
}
| WindowChange |
test_plantuml_markdown.py | import os
import sys
import pytest
from pytest import fixture
if sys.version_info < (3, 6):
raise pytest.skip("plantuml_markdown plugin requires Python >= 3.6", allow_module_level=True)
from tests import V8_PLUGIN_PATH
from tests.conftest import CompileResult
from v8.plantuml_markdown.plantuml_markdown import PlantUmlMarkdownProcessor, first_line_for_listing_block
def test_svg(do_fence_test):
with do_fence_test('plantuml') as compiled:
assert set(compiled.document.xpath('//svg//text/text()')) == {'Alice', 'Bob', 'hello1', 'hello2'}
assert '<?xml' not in compiled.raw_html
def test_listing(do_fence_test):
with do_fence_test('{ .plantuml listing }') as compiled:
assert compiled.document.xpath('//pre/text()') == [(
'Alice -> Bob : hello1\n'
'Bob -> Alice : hello2\n'
)]
def test_id(do_fence_test):
with do_fence_test('{ .plantuml svg+listing #foo }') as compiled:
assert compiled.document.xpath('/html/body/div/@id') == ['foo']
assert compiled.document.xpath('//pre/a/@name') == ['foo-1', 'foo-2']
assert compiled.raw_html.count('foo') == 5 # ensure the id is not anywhere unexpected
def test_line_numbering(do_fence_test):
with do_fence_test('{ .plantuml listing #foo linenos=y }') as compiled:
assert compiled.document.xpath('//table/tr//code/@data-line-number') == ['1', '2']
assert compiled.document.xpath('//table/tr//a/@href') == ['#foo-1', '#foo-2']
def test_line_highlighting(do_fence_test):
with do_fence_test('{ .plantuml listing hl_lines="1 2" }') as compiled:
assert len(compiled.document.xpath('//pre/span[@class="hll"]')) == 2
def test_svg_and_listing(do_fence_test):
with do_fence_test('{ .plantuml svg+listing }') as compiled:
assert [e.tag for e in compiled.document.xpath('/html/body/div/div/*')] == ['svg', 'pre']
def test_listing_and_svg(do_fence_test):
with do_fence_test('{ .plantuml listing+svg }') as compiled:
assert [e.tag for e in compiled.document.xpath('/html/body/div/div/*')] == ['pre', 'svg']
def test_prefix(do_compile_test):
with do_compile_test("""\
```plantuml-prefix
title Title 1
footer Footer 1
```
```plantuml
Participant foo
```
```plantuml
Participant bar
```
```plantuml-prefix
title Title 2
' no footer this time
```
```plantuml
Participant baz
```
""") as compiled:
text = compiled.document.xpath('//svg//text/text()')
assert text.count('Title 1') == 2
assert text.count('Footer 1') == 2
assert text.count('Title 2') == 1
def test_with_other_markdown(do_compile_test):
with do_compile_test("""\
# Heading
```plantuml
Participant foo
```
```python
# comment
```
""") as compiled:
assert compiled.document.xpath('//h1/text()') == ['Heading']
assert compiled.document.xpath('//svg//text/text()') == ['foo']
assert compiled.document.xpath('//pre//span[@class="c1"]/text()') == ['# comment']
def test_plantuml_syntax_error(do_compile_test):
with do_compile_test("""\
```plantuml
this line is bad
```
""", plantuml_continue_after_failure=True) as compiled:
text = compiled.document.xpath('//svg//text/text()')
assert '[From string (line 2) ]' in text
assert 'this line is bad' in text
assert 'Syntax Error?' in text
@pytest.mark.parametrize('line, expected', [
(
'```plantuml',
'```text',
),
(
'```.plantuml hl_lines="3 4"',
'```text hl_lines="3 4"',
),
(
'```{.plantuml}',
'```{.text}',
),
(
'```{ .plantuml #bar }',
'```{ .text anchor_ref=bar }',
),
(
'```{ .plantuml #bad<>&chars }',
'```{ .text anchor_ref=badchars }',
),
(
'```{ .plantuml #bar .foo linenos=y }',
'```{ .text anchor_ref=bar .foo linenos=y }',
),
])
def test_first_line_for_listing_block(line, expected):
match = PlantUmlMarkdownProcessor.FENCED_BLOCK_RE.search(line + '\n```')
assert match
assert first_line_for_listing_block(match) == expected
@fixture
def | (basic_compile_test):
def f(data: str, plantuml_continue_after_failure=False) -> CompileResult:
return basic_compile_test(
'.md',
data,
extra_config={
'PLANTUML_DEBUG': True,
'PLANTUML_CONTINUE_AFTER_FAILURE': plantuml_continue_after_failure,
'PLANTUML_EXEC': os.environ.get('PLANTUML_EXEC', 'plantuml').split(),
'PLANTUML_MARKDOWN_ARGS': [
'-chide footbox',
'-nometadata',
'-Sshadowing=false',
],
},
extra_plugins_dirs=[
V8_PLUGIN_PATH / 'plantuml',
V8_PLUGIN_PATH / 'plantuml_markdown',
]
)
return f
@fixture
def do_fence_test(do_compile_test):
def f(fence: str) -> CompileResult:
return do_compile_test("""\
```{}
Alice -> Bob : hello1
Bob -> Alice : hello2
```
""".format(fence))
return f
| do_compile_test |
test_cpp_standalone.py | import tempfile
from nose import with_setup, SkipTest
from nose.plugins.attrib import attr
from numpy.testing.utils import assert_allclose, assert_equal, assert_raises
from brian2 import *
from brian2.devices.device import reinit_devices, set_device, reset_device
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_cpp_standalone(with_output=False):
set_device('cpp_standalone', build_on_run=False)
##### Define the model
tau = 1*ms
eqs = '''
dV/dt = (-40*mV-V)/tau : volt (unless refractory)
'''
threshold = 'V>-50*mV'
reset = 'V=-60*mV'
refractory = 5*ms
N = 1000
G = NeuronGroup(N, eqs,
reset=reset,
threshold=threshold,
refractory=refractory,
name='gp')
G.V = '-i*mV'
M = SpikeMonitor(G)
S = Synapses(G, G, 'w : volt', on_pre='V += w')
S.connect('abs(i-j)<5 and i!=j')
S.w = 0.5*mV
S.delay = '0*ms'
net = Network(G, M, S)
net.run(100*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True,
with_output=with_output)
# we do an approximate equality here because depending on minor details of how it was compiled, the results
# may be slightly different (if -ffast-math is on)
assert len(M.i)>=17000 and len(M.i)<=18000
assert len(M.t) == len(M.i)
assert M.t[0] == 0.
assert M.t[-1] == 100*ms - defaultclock.dt
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_dt_changes_between_runs_standalone(with_output=False):
set_device('cpp_standalone', build_on_run=False)
defaultclock.dt = 0.1*ms
G = NeuronGroup(1, 'v:1')
mon = StateMonitor(G, 'v', record=True)
run(.5*ms)
defaultclock.dt = .5*ms
run(.5*ms)
defaultclock.dt = 0.1*ms
run(.5*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True,
with_output=True)
assert len(mon.t[:]) == 5 + 1 + 5
assert_allclose(mon.t[:],
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 1., 1.1, 1.2, 1.3, 1.4]*ms)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_multiple_connects(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, 'v:1')
S = Synapses(G, G, 'w:1')
S.connect(i=[0], j=[0])
S.connect(i=[1], j=[1])
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
run(0*ms)
device.build(directory=tempdir, compile=True, run=True,
with_output=True)
assert len(S) == 2 and len(S.w[:]) == 2
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_storing_loading(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, '''v : volt
x : 1
n : integer
b : boolean''')
v = np.arange(10)*volt
x = np.arange(10, 20)
n = np.arange(20, 30)
b = np.array([True, False]).repeat(5)
G.v = v
G.x = x
G.n = n
G.b = b
S = Synapses(G, G, '''v_syn : volt
x_syn : 1
n_syn : integer
b_syn : boolean''')
S.connect(j='i')
S.v_syn = v
S.x_syn = x
S.n_syn = n
S.b_syn = b
run(0*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True, with_output=True)
assert_allclose(G.v[:], v)
assert_allclose(S.v_syn[:], v)
assert_allclose(G.x[:], x)
assert_allclose(S.x_syn[:], x)
assert_allclose(G.n[:], n)
assert_allclose(S.n_syn[:], n)
assert_allclose(G.b[:], b)
assert_allclose(S.b_syn[:], b)
reset_device()
@attr('cpp_standalone', 'standalone-only', 'openmp')
@with_setup(teardown=reinit_devices)
def test_openmp_consistency(with_output=False):
previous_device = get_device()
n_cells = 100
n_recorded = 10
numpy.random.seed(42)
taum = 20 * ms
taus = 5 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV
fac = (60 * 0.27 / 10)
gmax = 20*fac
dApre = .01
taupre = 20 * ms
taupost = taupre
dApost = -dApre * taupre / taupost * 1.05
dApost *= 0.1*gmax
dApre *= 0.1*gmax
connectivity = numpy.random.randn(n_cells, n_cells)
sources = numpy.random.random_integers(0, n_cells-1, 10*n_cells)
# Only use one spike per time step (to rule out that a single source neuron
# has more than one spike in a time step)
times = numpy.random.choice(numpy.arange(10*n_cells), 10*n_cells,
replace=False)*ms
v_init = Vr + numpy.random.rand(n_cells) * (Vt - Vr)
eqs = Equations('''
dv/dt = (g-(v-El))/taum : volt
dg/dt = -g/taus : volt
''')
results = {}
for (n_threads, devicename) in [(0, 'runtime'),
(0, 'cpp_standalone'),
(1, 'cpp_standalone'),
(2, 'cpp_standalone'),
(3, 'cpp_standalone'),
(4, 'cpp_standalone')]:
set_device(devicename, build_on_run=False, with_output=False)
Synapses.__instances__().clear() | prefs.devices.cpp_standalone.openmp_threads = n_threads
P = NeuronGroup(n_cells, model=eqs, threshold='v>Vt', reset='v=Vr', refractory=5 * ms)
Q = SpikeGeneratorGroup(n_cells, sources, times)
P.v = v_init
P.g = 0 * mV
S = Synapses(P, P,
model = '''dApre/dt=-Apre/taupre : 1 (event-driven)
dApost/dt=-Apost/taupost : 1 (event-driven)
w : 1''',
pre = '''g += w*mV
Apre += dApre
w = w + Apost''',
post = '''Apost += dApost
w = w + Apre''')
S.connect()
S.w = fac*connectivity.flatten()
T = Synapses(Q, P, model = "w : 1", on_pre="g += w*mV")
T.connect(j='i')
T.w = 10*fac
spike_mon = SpikeMonitor(P)
rate_mon = PopulationRateMonitor(P)
state_mon = StateMonitor(S, 'w', record=range(n_recorded), dt=0.1*second)
v_mon = StateMonitor(P, 'v', record=range(n_recorded))
run(0.2 * second, report='text')
if devicename=='cpp_standalone':
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True,
run=True, with_output=with_output)
results[n_threads, devicename] = {}
results[n_threads, devicename]['w'] = state_mon.w
results[n_threads, devicename]['v'] = v_mon.v
results[n_threads, devicename]['s'] = spike_mon.num_spikes
results[n_threads, devicename]['r'] = rate_mon.rate[:]
for key1, key2 in [((0, 'runtime'), (0, 'cpp_standalone')),
((1, 'cpp_standalone'), (0, 'cpp_standalone')),
((2, 'cpp_standalone'), (0, 'cpp_standalone')),
((3, 'cpp_standalone'), (0, 'cpp_standalone')),
((4, 'cpp_standalone'), (0, 'cpp_standalone'))
]:
assert_allclose(results[key1]['w'], results[key2]['w'])
assert_allclose(results[key1]['v'], results[key2]['v'])
assert_allclose(results[key1]['r'], results[key2]['r'])
assert_allclose(results[key1]['s'], results[key2]['s'])
reset_device(previous_device)
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_timedarray(with_output=True):
set_device('cpp_standalone', build_on_run=False)
defaultclock.dt = 0.1*ms
ta1d = TimedArray(np.arange(10)*volt, dt=1*ms)
ta2d = TimedArray(np.arange(300).reshape(3, 100).T, dt=defaultclock.dt)
G = NeuronGroup(4, '''x = ta1d(t) : volt
y = ta2d(t, i) : 1''')
mon = StateMonitor(G, ['x', 'y'], record=True)
run(11*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True,
run=True, with_output=with_output)
for idx in xrange(4):
# x variable should have neuron independent values
assert_equal(mon[idx].x[:],
np.clip(np.arange(11).repeat(10), 0, 9)*volt)
for idx in xrange(3):
# y variable is neuron-specific
assert_equal(mon[idx].y[:],
np.clip(np.arange(110), 0, 99) + idx*100)
# the 2d array only has 3 columns, the last neuron should therefore contain
# only NaN
assert_equal(mon[3].y[:], np.nan)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_duplicate_names_across_nets(with_output=True):
set_device('cpp_standalone', build_on_run=False)
# In standalone mode, names have to be globally unique, not just unique
# per network
obj1 = BrianObject(name='name1')
obj2 = BrianObject(name='name2')
obj3 = BrianObject(name='name3')
obj4 = BrianObject(name='name1')
net1 = Network(obj1, obj2)
net2 = Network(obj3, obj4)
net1.run(0*ms)
net2.run(0*ms)
assert_raises(ValueError, lambda: device.build())
reset_device()
@attr('cpp_standalone', 'standalone-only', 'openmp')
@with_setup(teardown=reinit_devices)
def test_openmp_scalar_writes(with_output=False):
# Test that writing to a scalar variable only is done once in an OpenMP
# setting (see github issue #551)
set_device('cpp_standalone', build_on_run=False)
prefs.devices.cpp_standalone.openmp_threads = 4
G = NeuronGroup(10, 's : 1 (shared)')
G.run_regularly('s += 1')
run(defaultclock.dt)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
assert_equal(G.s[:], 1.0)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_time_after_run(with_output=False):
set_device('cpp_standalone', build_on_run=False)
# Check that the clock and network time after a run is correct, even if we
# have not actually run the code yet (via build)
G = NeuronGroup(10, 'dv/dt = -v/(10*ms) : 1')
net = Network(G)
assert_allclose(defaultclock.dt, 0.1*ms)
assert_allclose(defaultclock.t, 0.*ms)
assert_allclose(G.t, 0.*ms)
assert_allclose(net.t, 0.*ms)
net.run(10*ms)
assert_allclose(defaultclock.t, 10.*ms)
assert_allclose(G.t, 10.*ms)
assert_allclose(net.t, 10.*ms)
net.run(10*ms)
assert_allclose(defaultclock.t, 20.*ms)
assert_allclose(G.t, 20.*ms)
assert_allclose(net.t, 20.*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
# Everything should of course still be accessible
assert_allclose(defaultclock.t, 20.*ms)
assert_allclose(G.t, 20.*ms)
assert_allclose(net.t, 20.*ms)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_array_cache(with_output=False):
# Check that variables are only accessible from Python when they should be
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, '''dv/dt = -v / (10*ms) : 1
w : 1
x : 1
y : 1
z : 1 (shared)''',
threshold='v>1')
S = Synapses(G, G, 'weight: 1', on_pre='w += weight')
S.connect(p=0.2)
S.weight = 7
# All neurongroup values should be known
assert_allclose(G.v, 0)
assert_allclose(G.w, 0)
assert_allclose(G.x, 0)
assert_allclose(G.y, 0)
assert_allclose(G.z, 0)
assert_allclose(G.i, np.arange(10))
# But the synaptic variable is not -- we don't know the number of synapses
assert_raises(NotImplementedError, lambda: S.weight[:])
# Setting variables with explicit values should not change anything
G.v = np.arange(10)+1
G.w = 2
G.y = 5
G.z = 7
assert_allclose(G.v, np.arange(10)+1)
assert_allclose(G.w, 2)
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
# But setting with code should invalidate them
G.x = 'i*2'
assert_raises(NotImplementedError, lambda: G.x[:])
# Make sure that the array cache does not allow to use incorrectly sized
# values to pass
assert_raises(ValueError, lambda: setattr(G, 'w', [0, 2]))
assert_raises(ValueError, lambda: G.w.__setitem__(slice(0, 4), [0, 2]))
run(10*ms)
# v is now no longer known without running the network
assert_raises(NotImplementedError, lambda: G.v[:])
# Neither is w, it is updated in the synapse
assert_raises(NotImplementedError, lambda: G.w[:])
# However, no code touches y or z
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
# i is read-only anyway
assert_allclose(G.i, np.arange(10))
# After actually running the network, everything should be accessible
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
assert all(G.v > 0)
assert all(G.w > 0)
assert_allclose(G.x, np.arange(10)*2)
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
assert_allclose(G.i, np.arange(10))
assert_allclose(S.weight, 7)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_active_flag_standalone(with_output=True):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(1, 'dv/dt = 1/ms : 1')
mon = StateMonitor(G, 'v', record=0)
mon.active = False
run(1*ms)
mon.active = True
G.active = False
run(1*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir)
# Monitor should start recording at 1ms
# Neurongroup should not integrate after 1ms (but should have integrated before)
assert_allclose(mon[0].t[0], 1*ms)
assert_allclose(mon[0].v, 1.0)
if __name__=='__main__':
# Print the debug output when testing this file only but not when running
# via nose test
for t in [
test_cpp_standalone,
test_dt_changes_between_runs_standalone,
test_multiple_connects,
test_storing_loading,
test_openmp_consistency,
test_timedarray,
test_duplicate_names_across_nets,
test_openmp_scalar_writes,
test_time_after_run,
test_array_cache,
test_active_flag_standalone
]:
t(with_output=True)
reinit_devices() | if devicename=='cpp_standalone':
reinit_devices() |
build.go | package porter
import (
"bytes"
"context"
"fmt"
"os"
"strings"
"github.com/deislabs/porter/pkg/config"
cxt "github.com/deislabs/porter/pkg/context"
"github.com/deislabs/porter/pkg/mixin"
"github.com/docker/cli/cli/command"
cliflags "github.com/docker/cli/cli/flags"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/registry"
"github.com/pkg/errors"
)
func (p *Porter) Build() error {
err := p.Config.LoadManifest()
if err != nil {
return err
}
if err := p.prepareDockerFilesystem(); err != nil {
return fmt.Errorf("unable to copy mixins: %s", err)
}
if err := p.generateDockerFile(); err != nil {
return fmt.Errorf("unable to generate Dockerfile: %s", err)
}
digest, err := p.buildInvocationImage(context.Background())
if err != nil {
return errors.Wrap(err, "unable to build CNAB invocation image")
}
taggedImage, err := p.rewriteImageWithDigest(p.Config.Manifest.Image, digest)
if err != nil {
return fmt.Errorf("unable to regenerate tag: %s", err)
}
return p.buildBundle(taggedImage, digest)
}
func (p *Porter) generateDockerFile() error {
lines, err := p.buildDockerFile()
if err != nil {
return errors.Wrap(err, "error generating the Dockerfile")
}
fmt.Fprintf(p.Out, "\nWriting Dockerfile =======>\n")
contents := strings.Join(lines, "\n")
fmt.Fprintln(p.Out, contents)
err = p.Config.FileSystem.WriteFile("Dockerfile", []byte(contents), 0644)
return errors.Wrap(err, "couldn't write the Dockerfile")
}
func (p *Porter) buildDockerFile() ([]string, error) {
fmt.Fprintf(p.Out, "\nGenerating Dockerfile =======>\n")
lines := make([]string, 0, 10)
lines = append(lines, p.buildFromSection()...)
lines = append(lines, p.buildCopySSL())
mixinLines, err := p.buildMixinsSection()
if err != nil {
return nil, errors.Wrap(err, "error generating Dockefile content for mixins")
}
lines = append(lines, mixinLines...)
// Defer cnab/porter.yaml copy lines until very last, as these perhaps more subject to change
lines = append(lines, p.buildCNABSection()...)
lines = append(lines, p.buildPorterSection()...)
lines = append(lines, p.buildCMDSection())
for _, line := range lines {
fmt.Fprintln(p.Out, line)
}
return lines, nil
}
func (p *Porter) buildFromSection() []string {
return []string{
`FROM quay.io/deis/lightweight-docker-go:v0.2.0`,
`FROM debian:stretch`,
}
}
func (p *Porter) buildPorterSection() []string {
return []string{
`COPY porter.yaml /cnab/app/porter.yaml`,
}
}
func (p *Porter) buildCNABSection() []string {
return []string{
`COPY cnab/ /cnab/`,
}
}
func (p *Porter) buildCMDSection() string {
return `CMD ["/cnab/app/run"]`
}
func (p *Porter) buildCopySSL() string {
return `COPY --from=0 /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt`
}
func (p *Porter) buildMixinsSection() ([]string, error) {
lines := make([]string, 0)
for _, m := range p.Manifest.Mixins {
mixinDir, err := p.GetMixinDir(m)
if err != nil {
return nil, err
}
r := mixin.NewRunner(m, mixinDir, false)
r.Command = "build"
r.Input = "" // TODO: let the mixin know about which steps will be executed so that it can be more selective about copying into the invocation image
// Copy the existing context and tweak to pipe the output differently
mixinStdout := &bytes.Buffer{}
var mixinContext cxt.Context
mixinContext = *p.Context
mixinContext.Out = mixinStdout // mixin stdout -> dockerfile lines
mixinContext.Err = p.Context.Out // mixin stderr -> logs
r.Context = &mixinContext
err = r.Validate()
if err != nil {
return nil, err
}
err = r.Run()
if err != nil {
return nil, err
}
l := strings.Split(mixinStdout.String(), "\n")
lines = append(lines, l...)
}
return lines, nil
}
func (p *Porter) prepareDockerFilesystem() error {
fmt.Printf("Copying dependencies ===> \n")
for _, dep := range p.Manifest.Dependencies {
err := p.copyDependency(dep.Name)
if err != nil {
return err
}
}
fmt.Printf("Copying porter runtime ===> \n")
pr, err := p.GetPorterRuntimePath()
if err != nil {
return err
}
err = p.CopyFile(pr, "cnab/app/porter-runtime")
if err != nil {
return err
}
fmt.Printf("Copying mixins ===> \n")
for _, mixin := range p.Manifest.Mixins {
err := p.copyMixin(mixin)
if err != nil {
return err
}
}
return nil
}
func (p *Porter) copyDependency(bundle string) error {
fmt.Printf("Copying bundle dependency %s ===> \n", bundle)
bundleDir, err := p.GetBundleDir(bundle)
if err != nil {
return err
}
err = p.Context.CopyDirectory(bundleDir, "cnab/app/bundles", true)
return errors.Wrapf(err, "could not copy bundle directory contents for %s", bundle)
}
func (p *Porter) copyMixin(mixin string) error {
fmt.Printf("Copying mixin %s ===> \n", mixin)
mixinDir, err := p.GetMixinDir(mixin)
if err != nil {
return err
}
err = p.Context.CopyDirectory(mixinDir, "cnab/app/mixins", true)
return errors.Wrapf(err, "could not copy mixin directory contents for %s", mixin)
}
func (p *Porter) buildInvocationImage(ctx context.Context) (string, error) {
fmt.Printf("\nStarting Invocation Image Build =======> \n")
path, err := os.Getwd()
if err != nil {
return "", errors.Wrap(err, "could not get current working directory")
}
buildOptions := types.ImageBuildOptions{
SuppressOutput: false,
PullParent: false,
Tags: []string{p.Config.Manifest.Image},
Dockerfile: "Dockerfile",
}
tar, err := archive.TarWithOptions(path, &archive.TarOptions{})
if err != nil {
return "", err
}
cli, err := command.NewDockerCli()
if err != nil {
return "", errors.Wrap(err, "could not create new docker client")
}
if err := cli.Initialize(cliflags.NewClientOptions()); err != nil {
return "", err
}
response, err := cli.Client().ImageBuild(context.Background(), tar, buildOptions)
if err != nil {
return "", err
}
termFd, _ := term.GetFdInfo(os.Stdout)
// Setting this to false here because Moby os.Exit(1) all over the place and this fails on WSL (only)
// when Term is true.
isTerm := false
err = jsonmessage.DisplayJSONMessagesStream(response.Body, os.Stdout, termFd, isTerm, nil)
if err != nil {
return "", errors.Wrap(err, "failed to stream docker build stdout")
}
ref, err := reference.ParseNormalizedNamed(p.Config.Manifest.Image)
if err != nil {
return "", err
}
// Resolve the Repository name from fqn to RepositoryInfo
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return "", err
}
authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index)
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
if err != nil {
return "", err
}
options := types.ImagePushOptions{
All: true,
RegistryAuth: encodedAuth,
}
pushResponse, err := cli.Client().ImagePush(ctx, p.Config.Manifest.Image, options)
if err != nil {
return "", errors.Wrap(err, "docker push failed")
}
defer pushResponse.Close()
err = jsonmessage.DisplayJSONMessagesStream(pushResponse, os.Stdout, termFd, isTerm, nil)
if err != nil {
if strings.HasPrefix(err.Error(), "denied") {
return "", errors.Wrap(err, "docker push authentication failed")
}
return "", errors.Wrap(err, "failed to stream docker push stdout")
}
dist, err := cli.Client().DistributionInspect(ctx, p.Config.Manifest.Image, encodedAuth)
if err != nil |
return string(dist.Descriptor.Digest), nil
}
func (p *Porter) rewriteImageWithDigest(InvocationImage string, digest string) (string, error) {
ref, err := reference.Parse(InvocationImage)
if err != nil {
return "", fmt.Errorf("unable to parse docker image: %s", err)
}
named, ok := ref.(reference.Named)
if !ok {
return "", fmt.Errorf("had an issue with the docker image")
}
return fmt.Sprintf("%s@%s", named.Name(), digest), nil
}
func (p *Porter) buildBundle(invocationImage string, digest string) error {
fmt.Printf("\nGenerating Bundle File with Invocation Image %s =======> \n", invocationImage)
bundle := Bundle{
Name: p.Config.Manifest.Name,
Description: p.Config.Manifest.Description,
Version: p.Config.Manifest.Version,
}
image := InvocationImage{
Image: invocationImage,
ImageType: "docker",
}
image.Digest = digest
bundle.InvocationImages = []InvocationImage{image}
bundle.Parameters = p.generateBundleParameters()
bundle.Credentials = p.generateBundleCredentials()
return p.WriteFile(bundle, 0644)
}
func (p *Porter) generateBundleParameters() map[string]ParameterDefinition {
params := map[string]ParameterDefinition{}
for _, param := range append(p.Manifest.Parameters, p.buildDefaultPorterParameters()...) {
fmt.Printf("Generating parameter definition %s ====>\n", param.Name)
p := ParameterDefinition{
DataType: param.DataType,
DefaultValue: param.DefaultValue,
AllowedValues: param.AllowedValues,
MinValue: param.MinValue,
MaxValue: param.MaxValue,
MinLength: param.MinLength,
MaxLength: param.MaxLength,
Sensitive: param.Sensitive,
}
// If the default is empty, set required to true.
if param.DefaultValue == nil {
p.Required = true
}
if param.Metadata.Description != "" {
p.Metadata = ParameterMetadata{Description: param.Metadata.Description}
}
if param.Destination != nil {
p.Destination = &Location{
EnvironmentVariable: param.Destination.EnvironmentVariable,
Path: param.Destination.Path,
}
} else {
p.Destination = &Location{
EnvironmentVariable: strings.ToUpper(param.Name),
}
}
params[param.Name] = p
}
return params
}
func (p *Porter) buildDefaultPorterParameters() []config.ParameterDefinition {
return []config.ParameterDefinition{
{
Name: "porter-debug",
Destination: &config.Location{
EnvironmentVariable: "PORTER_DEBUG",
},
DataType: "bool",
DefaultValue: false,
Metadata: config.ParameterMetadata{
Description: "Print debug information from Porter when executing the bundle"},
},
}
}
func (p *Porter) generateBundleCredentials() map[string]Location {
params := map[string]Location{}
for _, cred := range p.Manifest.Credentials {
fmt.Printf("Generating credential %s ====>\n", cred.Name)
l := Location{
Path: cred.Path,
EnvironmentVariable: cred.EnvironmentVariable,
}
params[cred.Name] = l
}
return params
}
| {
return "", errors.Wrap(err, "unable to inspect docker image")
} |
external_table_test.go | package tests
import (
"context"
"fmt"
"testing"
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/external"
"github.com/stretchr/testify/assert"
)
func TestExternalTable(t *testing.T) {
table1, err := external.NewTable("external_table_1",
external.Column("col1", "UInt8"),
external.Column("col2", "String"),
external.Column("col3", "DateTime"),
)
if assert.NoError(t, err) {
for i := 0; i < 10; i++ {
assert.NoError(t, table1.Append(uint8(i), fmt.Sprintf("value_%d", i), time.Now()))
}
}
table2, err := external.NewTable("external_table_2",
external.Column("col1", "UInt8"),
external.Column("col2", "String"),
external.Column("col3", "DateTime"),
)
if assert.NoError(t, err) {
for i := 0; i < 10; i++ {
assert.NoError(t, table2.Append(uint8(i), fmt.Sprintf("value_%d", i), time.Now()))
}
}
conn, err := clickhouse.Open(&clickhouse.Options{
Addr: []string{"127.0.0.1:9000"},
Auth: clickhouse.Auth{
Database: "default",
Username: "default",
Password: "",
},
Compression: &clickhouse.Compression{
Method: clickhouse.CompressionLZ4,
},
// Debug: true, | if assert.NoError(t, err) {
ctx := clickhouse.Context(context.Background(),
clickhouse.WithExternalTable(table1, table2),
)
if rows, err := conn.Query(ctx, "SELECT * FROM external_table_1"); assert.NoError(t, err) {
for rows.Next() {
var (
col1 uint8
col2 string
col3 time.Time
)
if err := rows.Scan(&col1, &col2, &col3); assert.NoError(t, err) {
t.Logf("row: col1=%d, col2=%s, col3=%s\n", col1, col2, col3)
}
}
rows.Close()
}
var count uint64
if err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM external_table_1").Scan(&count); assert.NoError(t, err) {
assert.Equal(t, uint64(10), count)
}
if err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM external_table_2").Scan(&count); assert.NoError(t, err) {
assert.Equal(t, uint64(10), count)
}
if err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM (SELECT * FROM external_table_1 UNION ALL SELECT * FROM external_table_2)").Scan(&count); assert.NoError(t, err) {
assert.Equal(t, uint64(20), count)
}
}
} | }) |
Color.ts | const boxOrange = 6
const boxRed = 7
const boxBlue = 8
const boxGreen = 9
const boxGrey = 10
const targetOrange = 25
const targetRed = 38
const targetBlue = 51 | boxOrange,
boxRed,
boxBlue,
boxGreen,
boxGrey,
targetOrange,
targetRed,
targetBlue,
targetGreen,
targetGrey,
} | const targetGreen = 64
const targetGrey = 77
export { |
flask1.py | from flask import Flask
# ํ๋ผ์คํฌ๋ฅผ importํ๋ค
#
app = Flask(__name__)# flask๊ฐ์ฒด ์์ฑ
# URL /์ GET์์ฒญ์ ๋ํด ๋ทฐ ํจ์๋ฅผ ๋ฑ๋ก
# @ํ์๋ ๋ฐ์ฝ๋ ์ดํฐ๋ผ๊ณ ํ๋ค
# Flask์์ URL์ ์ฒ๋ฆฌํ๋ ๋ฐฉ๋ฒ์ URL Dispath๋ผ๊ณ ํ๋ค.
# ๋ฐ์ ์ฝ๋๋ ํด๋ผ์ด์ธํธ๊ฐ /๋ฅผ ์์ฒญํ๋ฉด helloworld๋ผ๋ ํจ์๋ฅผ ์คํํ๋ค๋ ๊ฒ
# route ๋ฐ์ฝ๋ ์ดํฐ์ ์ถ๊ฐ๋ ํจ์๋ฅผ ๋ทฐ ํจ์๋ผ๊ณ ํ๋ค
@app.route("/")
def helloworld():# ๋ทฐ ํจ์
return "Hello world flask"
# ํ๋ผ์คํฌ ์คํ
# host ='0.0.0.0'์ ์๋ฒ๊ฐ ์ธ๋ถ์๋ ์ฐ๊ฒฐ๊ฐ๋ฅํ๊ฒ ๋ง๋ฌ
# port = ํฌํธ์ค์ , debug = ๋๋ฒ๊ทธ ์ค์
if __name__=="__main__":
#app.run(host='0.0.0.0', port=5000, debug=True)
app.run(h | ost='0.0.0.0')
|
|
main.rs | use std::io::Read; |
#[derive(Clone, Copy)]
enum Arg {
Val(i32),
Reg(usize),
}
#[derive(Clone, Copy)]
enum Instruction {
CPY(Arg, Arg),
INC(Arg),
DEC(Arg),
JNZ(Arg, Arg),
TGL(Arg),
}
fn parse_reg(arg: &str) -> usize {
arg.chars().next().unwrap() as usize - 'a' as usize
}
fn parse_arg(arg: &str) -> Arg {
if arg.chars().all(|x| x.is_ascii_alphabetic()) {
Arg::Reg(parse_reg(arg))
} else {
Arg::Val(arg.parse().unwrap())
}
}
fn get_val(regs: &[i32; 4], arg: &Arg) -> i32 {
match arg {
Arg::Reg(x) => regs[*x],
Arg::Val(x) => *x,
}
}
fn main() {
let mut input = String::new();
std::io::stdin().read_to_string(&mut input).unwrap();
let mut instructions: Vec<_> = input.lines().map(|line| {
let mut words = line.split_whitespace();
match words.next().unwrap() {
"cpy" => {
let source = words.next().unwrap();
let target = words.next().unwrap();
Instruction::CPY(parse_arg(source), parse_arg(target))
}
"inc" => {
Instruction::INC(parse_arg(words.next().unwrap()))
}
"dec" => {
Instruction::DEC(parse_arg(words.next().unwrap()))
}
"jnz" => {
let source = words.next().unwrap();
let offset = words.next().unwrap();
Instruction::JNZ(parse_arg(source), parse_arg(offset))
}
"tgl" => {
let source = words.next().unwrap();
Instruction::TGL(parse_arg(source))
}
_ => { panic!("") }
}
}).collect();
let mut regs = [0; 4];
regs[0] = 7;
let mut pc = 0;
while let Some(instr) = instructions.get(pc) {
pc += 1;
match instr {
Instruction::CPY(source, target) => {
if let &Arg::Reg(reg) = target {
regs[reg] = get_val(®s, source);
}
}
Instruction::INC(target) => {
if let &Arg::Reg(reg) = target {
regs[reg] += 1;
}
}
Instruction::DEC(target) => {
if let &Arg::Reg(reg) = target {
regs[reg] -= 1;
}
}
Instruction::JNZ(source, offset) => {
if get_val(®s, source) != 0 {
pc = (pc as isize - 1 + get_val(®s, offset) as isize) as usize;
}
}
Instruction::TGL(source) => {
let index = pc as isize - 1 + get_val(®s, source) as isize;
if index > 0 && (index as usize) < instructions.len() {
let index = index as usize;
instructions[index] = match instructions[index] {
Instruction::CPY(arg1, arg2) => Instruction::JNZ(arg1, arg2),
Instruction::INC(arg) => Instruction::DEC(arg),
Instruction::DEC(arg) => Instruction::INC(arg),
Instruction::JNZ(arg1, arg2) => Instruction::CPY(arg1, arg2),
Instruction::TGL(arg) => Instruction::INC(arg),
};
}
}
}
}
println!("{}", regs[0]);
} | |
deploy.py | import itertools
import toposort
from populus.utils.contracts import (
compute_direct_dependency_graph,
compute_recursive_contract_dependencies,
)
def compute_deploy_order(dependency_graph):
"""
Given a dictionary that maps contract to their dependencies,
determine the overall dependency ordering for that set of contracts.
"""
return toposort.toposort_flatten(dict(dependency_graph))
def get_deploy_order(contracts_to_deploy, compiled_contracts):
# Extract and dependencies that exist due to library linking.
| dependency_graph = compute_direct_dependency_graph(compiled_contracts.values())
global_deploy_order = compute_deploy_order(dependency_graph)
# Compute the full set of dependencies needed to deploy the desired
# contracts.
all_deploy_dependencies = set(itertools.chain.from_iterable(
compute_recursive_contract_dependencies(contract_name, dependency_graph)
for contract_name in contracts_to_deploy
))
all_contracts_to_deploy = all_deploy_dependencies.union(contracts_to_deploy)
# Now compute the order that the contracts should be deployed based on
# their dependencies.
deploy_order = tuple(
contract_name
for contract_name
in global_deploy_order
if contract_name in all_contracts_to_deploy
)
return deploy_order |
|
test_benchmark_topi_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi conv2d_transpose operator for VTA"""
import json
import os
import pytest
import numpy as np
from collections import namedtuple
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils
from tvm.contrib.pickle_memoize import memoize
from tvm import topi
import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
Workload = namedtuple(
"Conv2DTransposeWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
"o_hpad",
"o_wpad",
],
)
# Get batch info from env
env = vta.get_env()
# DCGAN workloads
dcgan_wklds = [
# dcgan
("DCGAN.CT1", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT2", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT3", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),
]
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# Helper function to get factors
def _find_factors(n):
factors = []
for f in range(1, n + 1):
if n % f == 0:
factors.append(f)
return factors
def run_conv2d_transpose(
env, remote, wl, target, check_correctness=True, print_ir=False, samples=4
):
# Workload assertions
assert wl.hpad == wl.wpad
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
fcompute = topi.arm_cpu.conv2d_transpose_nchw
fschedule = topi.arm_cpu.schedule_conv2d_transpose_nchw
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
fcompute = vta.top.conv2d_transpose_packed
fschedule = vta.top.schedule_conv2d_transpose_packed
# Derive shapes depending upon packing
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
if data_pack:
data_shape = (
wl.batch // env.BATCH,
wl.in_filter // env.BLOCK_IN,
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter // env.BLOCK_OUT,
wl.in_filter // env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
else:
data_shape = a_shape
kernel_shape = w_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
# Define base computation schedule
with target:
res = fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, env.acc_dtype, (wl.o_hpad, wl.o_wpad)
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, res], simple_mode=True))
# Derive number of ops
fout_height = (wl.height - 1) * wl.hstride - 2 * wl.hpad + wl.hkernel + wl.o_hpad
fout_width = (wl.width - 1) * wl.wstride - 2 * wl.wpad + wl.wkernel + wl.o_wpad
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
# @memoize("vta.tests.test_benchmark_topi.conv2d.verify_nhwc")
def get_ref_data():
# derive min max for act and wgt types (max non inclusive)
|
# Data in original format
data_np, kernel_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch // env.BATCH,
env.BATCH,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.out_filter // env.BLOCK_OUT,
env.BLOCK_OUT,
wl.hkernel,
wl.wkernel,
).transpose((2, 0, 4, 5, 3, 1))
kernel_np = np.flip(kernel_np, 2)
kernel_np = np.flip(kernel_np, 3)
# Build
if "vta" in target.keys:
mod = vta.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
else:
mod = tvm.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
temp = utils.tempdir()
mod.save(temp.relpath("conv2d_transpose.o"))
remote.upload(temp.relpath("conv2d_transpose.o"))
f = remote.load_module("conv2d_transpose.o")
ctx = remote.context(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, ctx)
kernel_arr = tvm.nd.array(kernel_np, ctx)
res_arr = tvm.nd.array(res_np, ctx)
time_f = f.time_evaluator("conv2d_transpose", ctx, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.asnumpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
res_ref = res_ref >> env.WGT_WIDTH
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10 ** 9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d_transpose(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
for _, wl in dcgan_wklds:
print(wl)
run_conv2d_transpose(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d_transpose(device="arm_cpu")
test_conv2d_transpose(device="vta")
| a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(
w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
).astype(kernel.dtype)
r_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
(wl.o_hpad, wl.o_wpad),
).astype(env.acc_dtype)
return a_np, w_np, r_np |
system_service-remote.go | // Autogenerated by Thrift Compiler (facebook)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// @generated
package main
import (
"flag"
"fmt"
"math"
"net"
"net/url"
"os"
"strconv"
"strings"
thrift "github.com/facebook/fbthrift-go"
"github.com/h3copen/h3cfibservice/gen-go/platform"
)
func Usage() {
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "\nFunctions:")
fmt.Fprintln(os.Stderr, " getAllLinks()")
fmt.Fprintln(os.Stderr, " getAllNeighbors()")
fmt.Fprintln(os.Stderr, " void addIfaceAddresses(string iface, addrs)")
fmt.Fprintln(os.Stderr, " void removeIfaceAddresses(string iface, addrs)")
fmt.Fprintln(os.Stderr, " void syncIfaceAddresses(string iface, i16 family, i16 scope, addrs)")
fmt.Fprintln(os.Stderr, " getIfaceAddresses(string iface, i16 family, i16 scope)")
fmt.Fprintln(os.Stderr)
os.Exit(0)
}
func main() {
flag.Usage = Usage
var host string
var port int
var protocol string
var urlString string
var framed bool
var useHttp bool
var parsedUrl url.URL
var trans thrift.Transport
_ = strconv.Atoi
_ = math.Abs
flag.Usage = Usage
flag.StringVar(&host, "h", "localhost", "Specify host")
flag.IntVar(&port, "p", 9090, "Specify port")
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Parse()
if len(urlString) > 0 {
parsedUrl, err := url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage() | }
cmd := flag.Arg(0)
var err error
if useHttp {
trans, err = thrift.NewHTTPPostClient(parsedUrl.String())
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
host, portStr, err = net.SplitHostPort(host)
if err != nil {
fmt.Fprintln(os.Stderr, "error with host:", err)
os.Exit(1)
}
}
trans, err = thrift.NewSocket(thrift.SocketAddr(net.JoinHostPort(host, portStr)))
if err != nil {
fmt.Fprintln(os.Stderr, "error resolving address:", err)
os.Exit(1)
}
if framed {
trans = thrift.NewFramedTransport(trans)
}
}
if err != nil {
fmt.Fprintln(os.Stderr, "Error creating transport", err)
os.Exit(1)
}
defer trans.Close()
var protocolFactory thrift.ProtocolFactory
switch protocol {
case "compact":
protocolFactory = thrift.NewCompactProtocolFactory()
break
case "simplejson":
protocolFactory = thrift.NewSimpleJSONProtocolFactory()
break
case "json":
protocolFactory = thrift.NewJSONProtocolFactory()
break
case "binary", "":
protocolFactory = thrift.NewBinaryProtocolFactoryDefault()
break
default:
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
Usage()
os.Exit(1)
}
client := platform.NewSystemServiceClientFactory(trans, protocolFactory)
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
}
switch cmd {
case "getAllLinks":
if flag.NArg() - 1 != 0 {
fmt.Fprintln(os.Stderr, "GetAllLinks requires 0 args")
flag.Usage()
}
fmt.Print(client.GetAllLinks())
fmt.Print("\n")
break
case "getAllNeighbors":
if flag.NArg() - 1 != 0 {
fmt.Fprintln(os.Stderr, "GetAllNeighbors requires 0 args")
flag.Usage()
}
fmt.Print(client.GetAllNeighbors())
fmt.Print("\n")
break
case "addIfaceAddresses":
if flag.NArg() - 1 != 2 {
fmt.Fprintln(os.Stderr, "AddIfaceAddresses requires 2 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
arg35 := flag.Arg(2)
mbTrans36 := thrift.NewMemoryBufferLen(len(arg35))
defer mbTrans36.Close()
_, err37 := mbTrans36.WriteString(arg35)
if err37 != nil {
Usage()
return
}
factory38 := thrift.NewSimpleJSONProtocolFactory()
jsProt39 := factory38.GetProtocol(mbTrans36)
containerStruct1 := platform.NewSystemServiceAddIfaceAddressesArgs()
err40 := containerStruct1.ReadField2(jsProt39)
if err40 != nil {
Usage()
return
}
argvalue1 := containerStruct1.Addrs
value1 := argvalue1
fmt.Print(client.AddIfaceAddresses(value0, value1))
fmt.Print("\n")
break
case "removeIfaceAddresses":
if flag.NArg() - 1 != 2 {
fmt.Fprintln(os.Stderr, "RemoveIfaceAddresses requires 2 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
arg42 := flag.Arg(2)
mbTrans43 := thrift.NewMemoryBufferLen(len(arg42))
defer mbTrans43.Close()
_, err44 := mbTrans43.WriteString(arg42)
if err44 != nil {
Usage()
return
}
factory45 := thrift.NewSimpleJSONProtocolFactory()
jsProt46 := factory45.GetProtocol(mbTrans43)
containerStruct1 := platform.NewSystemServiceRemoveIfaceAddressesArgs()
err47 := containerStruct1.ReadField2(jsProt46)
if err47 != nil {
Usage()
return
}
argvalue1 := containerStruct1.Addrs
value1 := argvalue1
fmt.Print(client.RemoveIfaceAddresses(value0, value1))
fmt.Print("\n")
break
case "syncIfaceAddresses":
if flag.NArg() - 1 != 4 {
fmt.Fprintln(os.Stderr, "SyncIfaceAddresses requires 4 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
tmp1, err49 := (strconv.Atoi(flag.Arg(2)))
if err49 != nil {
Usage()
return
}
argvalue1 := byte(tmp1)
value1 := argvalue1
tmp2, err50 := (strconv.Atoi(flag.Arg(3)))
if err50 != nil {
Usage()
return
}
argvalue2 := byte(tmp2)
value2 := argvalue2
arg51 := flag.Arg(4)
mbTrans52 := thrift.NewMemoryBufferLen(len(arg51))
defer mbTrans52.Close()
_, err53 := mbTrans52.WriteString(arg51)
if err53 != nil {
Usage()
return
}
factory54 := thrift.NewSimpleJSONProtocolFactory()
jsProt55 := factory54.GetProtocol(mbTrans52)
containerStruct3 := platform.NewSystemServiceSyncIfaceAddressesArgs()
err56 := containerStruct3.ReadField4(jsProt55)
if err56 != nil {
Usage()
return
}
argvalue3 := containerStruct3.Addrs
value3 := argvalue3
fmt.Print(client.SyncIfaceAddresses(value0, value1, value2, value3))
fmt.Print("\n")
break
case "getIfaceAddresses":
if flag.NArg() - 1 != 3 {
fmt.Fprintln(os.Stderr, "GetIfaceAddresses requires 3 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
tmp1, err58 := (strconv.Atoi(flag.Arg(2)))
if err58 != nil {
Usage()
return
}
argvalue1 := byte(tmp1)
value1 := argvalue1
tmp2, err59 := (strconv.Atoi(flag.Arg(3)))
if err59 != nil {
Usage()
return
}
argvalue2 := byte(tmp2)
value2 := argvalue2
fmt.Print(client.GetIfaceAddresses(value0, value1, value2))
fmt.Print("\n")
break
case "":
Usage()
break
default:
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
}
} | } |
main.rs | use async_ftp::FtpStream;
use lazy_static::*;
use libunftp::Server;
use unftp_sbe_gcs::CloudStorage;
use more_asserts::assert_ge;
use path_abs::PathInfo;
use pretty_assertions::assert_eq;
use slog::Drain;
use slog::*;
use std::{
io::{Cursor, Read},
path::PathBuf,
process::{Child, Command},
str,
time::Duration,
};
use tokio::{macros::support::Future, sync::Mutex};
use unftp_sbe_gcs::options::AuthMethod;
/*
FIXME: this is just MVP tests. need to add:
- deleting_directory_deletes_files_in_it() and/or deleting_directory_fails_if_contains_file()
- ...
*/
lazy_static! {
static ref DOCKER: Mutex<Child> = initialize_docker();
}
// FIXME: auto-allocate port
const ADDR: &str = "127.0.0.1:1234";
const GCS_BASE_URL: &str = "http://localhost:9081";
const GCS_BUCKET: &str = "test-bucket";
pub fn initialize_docker() -> Mutex<Child> {
let buf = std::env::current_dir().unwrap();
let current_dir = buf.display();
Command::new("docker").arg("stop").arg("fake-gcs").status().unwrap();
Command::new("docker").arg("rm").arg("fake-gcs").status().unwrap();
let mut command = Command::new("docker");
command
.arg("run")
.arg("-d")
.arg("--name")
.arg("fake-gcs")
.arg("-v")
.arg(format!("{}/tests/resources/data:/data", current_dir))
.arg("-p")
.arg("9081:9081")
.arg("fsouza/fake-gcs-server")
.arg("-scheme")
.arg("http")
.arg("-port")
.arg("9081");
println!("docker command: {:?}", command);
let result = Mutex::new(command.spawn().expect("docker failed"));
// FIXME: on linux, `docker -d` returns extremely quickly, but container startup continues in background. Replace this stupid wait with checking container status (a sort of startup probe)
std::thread::sleep(Duration::new(10, 0));
result
}
#[tokio::test(flavor = "current_thread")]
async fn newly_created_dir_is_empty() {
run_test(async {
let mut ftp_stream = FtpStream::connect(ADDR).await.unwrap();
ftp_stream.login("anonymous", "").await.unwrap();
ftp_stream.mkdir("newly_created_dir_is_empty").await.unwrap();
ftp_stream.cwd("newly_created_dir_is_empty").await.unwrap();
let list = ftp_stream.list(None).await.unwrap();
assert_eq!(list.len(), 0)
})
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn | () {
run_test(async {
let mut ftp_stream = FtpStream::connect(ADDR).await.unwrap();
ftp_stream.login("anonymous", "").await.unwrap();
ftp_stream.mkdir("creating_directory_with_file_in_it").await.unwrap();
ftp_stream.cwd("creating_directory_with_file_in_it").await.unwrap();
let content = b"Hello from this test!\n";
let mut reader = Cursor::new(content);
ftp_stream.put("greeting.txt", &mut reader).await.unwrap();
let list_in = ftp_stream.list(None).await.unwrap();
assert_eq!(list_in.len(), 1);
assert!(list_in[0].ends_with(" greeting.txt"));
// FIXME: `CWD ..` does nothing in GCS ATM (TODO)
// ftp_stream.cwd("..").await.unwrap();
ftp_stream.cdup().await.unwrap();
let list_out = ftp_stream.list(None).await.unwrap();
assert_ge!(list_out.len(), 1);
assert!(list_out.iter().any(|t| t.ends_with("creating_directory_with_file_in_it")))
})
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn file_sizes() {
run_test(async {
let mut ftp_stream = FtpStream::connect(ADDR).await.unwrap();
ftp_stream.login("anonymous", "").await.unwrap();
ftp_stream.mkdir("file_sizes").await.unwrap();
ftp_stream.cwd("file_sizes").await.unwrap();
ftp_stream.put("10 bytes", &mut Cursor::new(b"1234567890")).await.unwrap();
ftp_stream.put("12 bytes", &mut Cursor::new(b"123456789012")).await.unwrap();
ftp_stream.put("17 bytes", &mut Cursor::new(b"12345678901234567")).await.unwrap();
let list = ftp_stream.list(None).await.unwrap();
assert_eq!(list.len(), 3);
list.iter().for_each(|f| {
println!("{}", f);
let vec: Vec<&str> = f.split_whitespace().collect();
// "coincidentally", file name matches file size
assert_eq!(vec[3], vec[7]);
});
})
.await;
}
// FIXME: `move async` is beta in rust 1.48, hence the `impl Future`
async fn run_test(test: impl Future<Output = ()>) {
let mut child = DOCKER.lock().await;
let decorator = slog_term::TermDecorator::new().stderr().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
tokio::spawn(
Server::new(Box::new(move || {
CloudStorage::with_api_base(
GCS_BASE_URL,
GCS_BUCKET,
PathBuf::from("/unftp"),
AuthMethod::ServiceAccountKey(b"unftp_test".to_vec()),
)
}))
.logger(Some(Logger::root(drain, o!())))
.listen(ADDR),
);
tokio::time::sleep(Duration::new(1, 0)).await;
test.await;
let mut stdout = String::new();
let mut stderr = String::new();
child.stdout.as_mut().map(|s| s.read_to_string(&mut stdout));
child.stderr.as_mut().map(|s| s.read_to_string(&mut stderr));
println!("stdout: {}", stdout);
println!("stderr: {}", stderr);
// FIXME: stop docker container (atm there is no mechanism in cargo test for cleanup hooks)
}
| creating_directory_with_file_in_it |
organization.rs | use async_graphql::dataloader::*;
use sqlx::postgres::PgPool;
use sqlx::types::Uuid;
use std::collections::HashMap;
use crate::prelude::*;
use crate::schema::{Organization, Viewer};
#[derive(sqlx::FromRow, Clone, Debug)]
pub struct Row {
id: Uuid,
name: String,
login: String,
default_repository_id: Uuid,
}
impl Row {
fn to_organization(&self) -> Organization |
}
pub struct OrganizationLoader {
pool: PgPool,
viewer: Viewer,
}
impl OrganizationLoader {
pub fn new(viewer: Viewer, pool: PgPool) -> Self {
Self { viewer, pool }
}
}
#[async_trait::async_trait]
impl Loader<String> for OrganizationLoader {
type Value = Organization;
type Error = Error;
async fn load(&self, ids: &[String]) -> Result<HashMap<String, Self::Value>> {
log::debug!("batch load organizations: {:?}", ids);
let rows = sqlx::query_as::<_, Row>(
r#"select
o.id,
o.name,
o.login,
r.id as default_repository_id
from organizations o
join organization_members om on o.id = om.organization_id
join repositories r on r.organization_id = o.id and r.system
where o.id = any($1::uuid[]) and om.user_id = any($2::uuid[])"#,
)
.bind(&ids)
.bind(&self.viewer.query_ids)
.fetch_all(&self.pool)
.await
.map_err(Error::from)?;
Ok(rows
.iter()
.map(|r| (r.id.to_string(), r.to_organization()))
.collect::<HashMap<_, _>>())
}
}
pub struct OrganizationByLoginLoader {
pool: PgPool,
viewer: Viewer,
}
impl OrganizationByLoginLoader {
pub fn new(viewer: Viewer, pool: PgPool) -> Self {
Self { viewer, pool }
}
}
#[async_trait::async_trait]
impl Loader<String> for OrganizationByLoginLoader {
type Value = Organization;
type Error = Error;
async fn load(&self, logins: &[String]) -> Result<HashMap<String, Self::Value>> {
log::debug!("batch load organizations by login: {:?}", logins);
let rows = sqlx::query_as::<_, Row>(
r#"select
o.id,
o.name,
o.login,
r.id as default_repository_id
from organizations o
join organization_members om on o.id = om.organization_id
join repositories r on r.organization_id = o.id and r.system
where o.login = any($1) and om.user_id = any($2::uuid[])"#,
)
.bind(&logins)
.bind(&self.viewer.query_ids)
.fetch_all(&self.pool)
.await
.map_err(Error::from)?;
Ok(rows
.iter()
.map(|r| (r.id.to_string(), r.to_organization()))
.collect::<HashMap<_, _>>())
}
}
| {
Organization::Selected {
id: ID(self.id.to_string()),
name: self.name.to_owned(),
login: self.login.to_owned(),
default_repository_id: ID(self.default_repository_id.to_string()),
}
} |
0006_person_upload.py | # Generated by Django 2.1.1 on 2018-09-26 09:08
from django.db import migrations, models
| ('AboutModel', '0005_auto_20180926_1639'),
]
operations = [
migrations.AddField(
model_name='person',
name='upload',
field=models.FileField(default='', upload_to='media/'),
),
] | class Migration(migrations.Migration):
dependencies = [ |
tmpcli.go | package main
import (
"flag"
"log"
"os"
"github.com/odk-/cme/part2/container"
"github.com/odk-/cme/part2/registry"
"github.com/odk-/cme/part2/storage"
)
var containerName = flag.String("n", "", "name of new container [required].")
var storageRootPath = flag.String("d", "", "location of image and container files [optional].")
var imageName = flag.String("i", "", "name of image to run. Docker naming compatible [required].")
var insecureRegistry = flag.Bool("http", false, "If set registry will use http [optional].")
func main() | {
// parse flags and check if all required info was provided
flag.Parse()
if *containerName == "" || *imageName == "" {
flag.Usage()
os.Exit(1)
}
// initialize all packages
if *storageRootPath != "" {
storage.SetStorageRootPath(*storageRootPath)
}
registry.InsecureRegistry(*insecureRegistry)
err := storage.InitStorage()
if err != nil {
log.Println(err)
}
// run actual container (for now it will only download an image and mount it outputing path to merged rootfs)
err = container.RunContainer(*imageName, *containerName)
if err != nil {
log.Println(err)
}
} |
|
javatest.js | var java = require('java');
// java.classpath.push('jodconverter-2.2.2.jar');
java.classpath.push('./jodconverter-2.2.2');
//java.classpath.push("commons-io.jar");
var File = java.import('java.io.File');
var inputFile = new File('test.doc');
var outputFile = new File('out.docx');
java.import("java.util.ArrayList"); | var SocketOpenOfficeConnection = java.newInstanceSync('SocketOpenOfficeConnection');
// var SocketOpenOfficeConnection = java.import('com.artofsolving.jodconverter.openoffice.connection.SocketOpenOfficeConnection');
// var connection = new SocketOpenOfficeConnection(8100);
// connection.connect();
// var OpenOfficeDocumentConverter = java.newInstanceSync('com.artofsolving.jodconverter.openoffice.converter.OpenOfficeDocumentConverter');
// var converter = new OpenOfficeDocumentConverter(connection);
// converter.convert(inputFile, outputFile);
// connection.disconnect(); | |
soft_actor.py | from rlkit.torch.sac.policies import TanhGaussianPolicy
# from rlkit.torch.sac.sac import SoftActorCritic
from rlkit.torch.networks import FlattenMlp
import numpy as np
from .rl_algorithm import RL_algorithm
from rlkit.torch.sac.sac import SACTrainer as SoftActorCritic_rlkit
import rlkit.torch.pytorch_util as ptu
import torch
import utils
# networks = {individual:, population:}
class SoftActorCritic(RL_algorithm):
def __init__(self, config, env, replay, networks):
""" Bascally a wrapper class for SAC from rlkit.
Args: | env: Environment
replay: Replay buffer
networks: dict containing two sub-dicts, 'individual' and 'population'
which contain the networks.
"""
super().__init__(config, env, replay, networks)
self._variant_pop = config['rl_algorithm_config']['algo_params_pop']
self._variant_spec = config['rl_algorithm_config']['algo_params']
self._ind_qf1 = networks['individual']['qf1']
self._ind_qf2 = networks['individual']['qf2']
self._ind_qf1_target = networks['individual']['qf1_target']
self._ind_qf2_target = networks['individual']['qf2_target']
self._ind_policy = networks['individual']['policy']
self._pop_qf1 = networks['population']['qf1']
self._pop_qf2 = networks['population']['qf2']
self._pop_qf1_target = networks['population']['qf1_target']
self._pop_qf2_target = networks['population']['qf2_target']
self._pop_policy = networks['population']['policy']
self._batch_size = config['rl_algorithm_config']['batch_size']
self._nmbr_indiv_updates = config['rl_algorithm_config']['indiv_updates']
self._nmbr_pop_updates = config['rl_algorithm_config']['pop_updates']
self._algorithm_ind = SoftActorCritic_rlkit(
env=self._env,
policy=self._ind_policy,
qf1=self._ind_qf1,
qf2=self._ind_qf2,
target_qf1=self._ind_qf1_target,
target_qf2=self._ind_qf2_target,
use_automatic_entropy_tuning = False,
**self._variant_spec
)
self._algorithm_pop = SoftActorCritic_rlkit(
env=self._env,
policy=self._pop_policy,
qf1=self._pop_qf1,
qf2=self._pop_qf2,
target_qf1=self._pop_qf1_target,
target_qf2=self._pop_qf2_target,
use_automatic_entropy_tuning = False,
**self._variant_pop
)
# self._algorithm_ind.to(ptu.device)
# self._algorithm_pop.to(ptu.device)
def episode_init(self):
""" Initializations to be done before the first episode.
In this case basically creates a fresh instance of SAC for the
individual networks and copies the values of the target network.
"""
self._algorithm_ind = SoftActorCritic_rlkit(
env=self._env,
policy=self._ind_policy,
qf1=self._ind_qf1,
qf2=self._ind_qf2,
target_qf1=self._ind_qf1_target,
target_qf2=self._ind_qf2_target,
use_automatic_entropy_tuning = False,
# alt_alpha = self._alt_alpha,
**self._variant_spec
)
if self._config['rl_algorithm_config']['copy_from_gobal']:
utils.copy_pop_to_ind(networks_pop=self._networks['population'], networks_ind=self._networks['individual'])
# We have only to do this becasue the version of rlkit which we use
# creates internally a target network
# vf_dict = self._algorithm_pop.target_vf.state_dict()
# self._algorithm_ind.target_vf.load_state_dict(vf_dict)
# self._algorithm_ind.target_vf.eval()
# self._algorithm_ind.to(ptu.device)
def single_train_step(self, train_ind=True, train_pop=False):
""" A single trianing step.
Args:
train_ind: Boolean. If true the individual networks will be trained.
train_pop: Boolean. If true the population networks will be trained.
"""
if train_ind:
# Get only samples from the species buffer
self._replay.set_mode('species')
# self._algorithm_ind.num_updates_per_train_call = self._variant_spec['num_updates_per_epoch']
# self._algorithm_ind._try_to_train()
for _ in range(self._nmbr_indiv_updates):
batch = self._replay.random_batch(self._batch_size)
self._algorithm_ind.train(batch)
if train_pop:
# Get only samples from the population buffer
self._replay.set_mode('population')
# self._algorithm_pop.num_updates_per_train_call = self._variant_pop['num_updates_per_epoch']
# self._algorithm_pop._try_to_train()
for _ in range(self._nmbr_pop_updates):
batch = self._replay.random_batch(self._batch_size)
self._algorithm_pop.train(batch)
@staticmethod
def create_networks(env, config):
""" Creates all networks necessary for SAC.
These networks have to be created before instantiating this class and
used in the constructor.
Args:
config: A configuration dictonary containing population and
individual networks
Returns:
A dictonary which contains the networks.
"""
network_dict = {
'individual' : SoftActorCritic._create_networks(env=env, config=config),
'population' : SoftActorCritic._create_networks(env=env, config=config),
}
return network_dict
@staticmethod
def _create_networks(env, config):
""" Creates all networks necessary for SAC.
These networks have to be created before instantiating this class and
used in the constructor.
TODO: Maybe this should be reworked one day...
Args:
config: A configuration dictonary.
Returns:
A dictonary which contains the networks.
"""
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
net_size = config['rl_algorithm_config']['net_size']
hidden_sizes = [net_size] * config['rl_algorithm_config']['network_depth']
# hidden_sizes = [net_size, net_size, net_size]
qf1 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf2 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf1_target = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf2_target = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
policy = TanhGaussianPolicy(
hidden_sizes=hidden_sizes,
obs_dim=obs_dim,
action_dim=action_dim,
).to(device=ptu.device)
clip_value = 1.0
for p in qf1.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
for p in qf2.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
for p in policy.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
return {'qf1' : qf1, 'qf2' : qf2, 'qf1_target' : qf1_target, 'qf2_target' : qf2_target, 'policy' : policy}
@staticmethod
def get_q_network(networks):
""" Returns the q network from a dict of networks.
This method extracts the q-network from the dictonary of networks
created by the function create_networks.
Args:
networks: Dict containing the networks.
Returns:
The q-network as torch object.
"""
return networks['qf1']
@staticmethod
def get_policy_network(networks):
""" Returns the policy network from a dict of networks.
This method extracts the policy network from the dictonary of networks
created by the function create_networks.
Args:
networks: Dict containing the networks.
Returns:
The policy network as torch object.
"""
return networks['policy'] | config: Configuration dictonary |
kao_utils.js | $traceurRuntime.ModuleStore.getAnonymousModule(function() {
"use strict";
angular.module("kao.utils", []).factory("KaoDefer", function($q) {
function KaoDefer() {
var defer = $q.defer();
defer.promise.success = function(callback) {
defer.promise.then(function() {
callback.apply(this, arguments);
}); | };
defer.promise.error = function(callback) {
defer.promise.then(null, function() {
callback.apply(this, arguments);
});
return defer.promise;
};
return defer;
}
return KaoDefer;
}).factory("KaoPromise", function(KaoDefer) {
function KaoPromise(promise, resolveWith) {
var deferred = KaoDefer();
promise.success(function(data) {
var resolveData = typeof resolveWith === "function" ? resolveWith(data) : void 0;
if (!(typeof resolveData !== "undefined" && resolveData !== null)) {
resolveData = data;
}
deferred.resolve(resolveData);
}).error(function(error) {
deferred.reject(error);
});
return deferred.promise;
}
return KaoPromise;
}).directive("dynamicDirective", function($compile) {
return {
restrict: "E",
replace: true,
link: function(scope, element, attrs) {
if (attrs.directive) {
var dom = "<" + attrs.directive + ">" + "</" + attrs.directive + ">";
var el = $compile(dom)(scope);
element.append(el);
}
}
};
}).directive("transcludePlaceholder", function() {
return {
restrict: "A",
replace: true,
controller: function($transclude) {
this.$transclude = $transclude;
},
link: function(scope, element, attrs, controller) {
var attach = function(clone) {
for (var i = 0; i < clone.length; i++) {
var el = angular.element(clone[i]);
if (el.attr("fills-transclude") === attrs.transcludePlaceholder) {
element.empty();
element.append(el);
}
}
};
controller.$transclude(function(clone) {
attach(clone);
});
}
};
}).directive("kaoHeader", function() {
return {
restrict: "E",
replace: true,
transclude: true,
scope: {headerTitle: "@"},
template: "<div class=\"col-md-12 text-center\"> <div class=\"col-md-2\" transclude-placeholder=\"left\"></div> <h1 class=\"col-md-8\">{{headerTitle}}</h1> <div class=\"col-md-2\" transclude-placeholder=\"right\"></div> </div>"
};
});
return {};
}); | return defer.promise; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.