file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
errors2.rs | // errors2.rs
// Say we're writing a game where you can buy items with tokens. All items cost
// 5 tokens, and whenever you purchase items there is a processing fee of 1
// token. A player of the game will type in how many items they want to buy,
// and the `total_cost` function will calculate the total number of tokens.
// Since the player typed in the quantity, though, we get it as a string-- and
// they might have typed anything, not just numbers!
// Right now, this function isn't handling the error case at all (and isn't
// handling the success case properly either). What we want to do is:
// if we call the `parse` function on a string that is not a number, that
// function will return a `ParseIntError`, and in that case, we want to
// immediately return that error from our function and not try to multiply
// and add.
// There are at least two ways to implement this that are both correct-- but
// one is a lot shorter! Execute `rustlings hint errors2` for hints to both ways.
use std::num::ParseIntError;
pub fn total_cost(item_quantity: &str) -> Result<i32, ParseIntError> {
let processing_fee = 1;
let cost_per_item = 5;
// match item_quantity.parse::<i32>() {
// Ok(qty) => Ok(qty * cost_per_item + processing_fee),
// Err(e) => Err(e),
// }
Ok(item_quantity.parse::<i32>()? * cost_per_item + processing_fee)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn item_quantity_is_a_valid_number() {
assert_eq!(total_cost("34"), Ok(171));
}
#[test]
fn item_quantity_is_an_invalid_number() |
}
| {
assert_eq!(
total_cost("beep boop").unwrap_err().to_string(),
"invalid digit found in string"
);
} |
bot.js | 'use strict';
const AWS = require('aws-sdk');
const Slack = require('slack');
/**
* Handles the http request, calls the bot lambda and responds the request with data
* @async
* @param {Object} data
* @return {Object}
*/
module.exports.run = async ( data ) =>
{
const dataObject = JSON.parse( data.body );
// The response we will return to Slack
let response = {
statusCode: 200,
body : {},
// Tell slack we don't want retries, to avoid multiple triggers of this lambda
headers : { 'X-Slack-No-Retry': 1 }
};
try {
// If the Slack retry header is present, ignore the call to avoid triggering the lambda multiple times
if ( !( 'X-Slack-Retry-Num' in data.headers ) )
{
switch ( dataObject.type )
{
case 'url_verification':
response.body = verifyCall( dataObject );
break;
case 'event_callback':
await handleMessage( dataObject.event );
response.body = { ok: true };
break;
default:
response.statusCode = 400,
response.body = 'Empty request';
break;
}
}
}
catch( err )
{
response.statusCode = 500,
response.body = JSON.stringify( err )
}
finally
{
return response;
}
}
/**
* Verifies the URL with a challenge - https://api.slack.com/events/url_verification
* @param {Object} data The event data
*/
function verifyCall( data )
{
if ( data.token === process.env.VERIFICATION_TOKEN )
{
return data.challenge;
}
else {
throw 'Verification failed';
}
}
/**
* Process the message and executes an action based on the message received
* @async
* @param {Object} message The Slack message object
*/
async function handleMessage( message )
{
// Makes sure the bot was actually mentioned
if ( !message.bot_id )
{
// Gets the command from the message
let command = parseMessage( message.text );
// Executes differend commands based in the specified instruction
switch ( command )
{
case 'invalidate_cdn':
const invalidationData = await invalidateDistribution();
await sendSlackMessage( message.channel,
`Sir/Madam, I've just invalidated the cache, this is the invalidation ID. *${invalidationData.Invalidation.Id}*` );
break;
default:
await sendSlackMessage( message.channel,
`Sir/Madam, I don't understand what you need. Please use \`@${process.env.BOT_NAME} invalidate_cdn\` to clear the CDN cache.` );
break;
}
}
}
/**
* Sends a message to Slack
* @param {String} channel
* @param {String} message
* @return {Promise}
*/
function | ( channel, message )
{
const params = {
token : process.env.BOT_TOKEN,
channel: channel,
text : message
};
return Slack.chat.postMessage( params );
}
/**
* Parses the command/intent from the text of a message received by the bot
* @param {String} message
* @return {String}
*/
function parseMessage( message )
{
return message.split( ' ', 2 ).pop();
}
/**
* Creates an invalidation in the configured CloudFront distribution and returns the invalidation ID
* @return {Promise|String}
*/
function invalidateDistribution()
{
const CloudFront = new AWS.CloudFront();
// Invalidation parameters
const params = {
DistributionId: process.env.CDN_DISTRIBUTION,
InvalidationBatch: {
CallerReference: Date.now() + '',
Paths: {
Quantity: '1',
Items: [
'/*'
]
}
}
};
return new Promise( ( resolve, reject ) =>
{
// Call the CloudFront wrapper to invalidate the CDN cache
CloudFront.createInvalidation( params, ( err, data ) =>
{
if ( err ) reject( err );
else resolve( data );
} );
} );
}
| sendSlackMessage |
protoc-gen-rust.rs | #![crate_type = "bin"]
extern crate protobuf;
use protobuf::codegen;
fn | () {
codegen::protoc_gen_rust_main();
}
| main |
test_domains.py | import pytest
from dolib.client import AsyncClient, Client
from dolib.models import Domain
@pytest.mark.vcr
@pytest.mark.block_network()
def test_crud_domains(client: Client) -> None:
domain = Domain(name="test.dolib.io")
# create domain
created_domain = client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
# read domain
read_domain = client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
# list domains
domains = client.domains.all()
assert len(domains) > 0
# create domain record
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
# update domain record
record.name = "test"
record.ttl = 60
record = client.domains.update_record(name=domain.name, record=record)
assert record.ttl == 60
assert record.name == "test"
# read domain records
records = client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
# delete domain record
client.domains.delete_record(name=domain.name, record=record)
# delete domain
client.domains.delete(domain=created_domain)
@pytest.mark.vcr
@pytest.mark.block_network()
@pytest.mark.asyncio
async def test_async_crud_domains(async_client: AsyncClient) -> None:
domain = Domain(name="test.dolib.io")
# create domain
created_domain = await async_client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
# read domain
read_domain = await async_client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
# list domains
domains = await async_client.domains.all()
assert len(domains) > 0
# create domain record
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = await async_client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
# update domain record | assert record.ttl == 60
assert record.name == "test"
# read domain records
records = await async_client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = await async_client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
# delete domain record
await async_client.domains.delete_record(name=domain.name, record=record)
# delete domain
await async_client.domains.delete(domain=created_domain) | record.name = "test"
record.ttl = 60
record = await async_client.domains.update_record(name=domain.name, record=record) |
craft_new.rs | use std::{env, fs};
use std::path::Path;
use std::collections::BTreeMap;
use rustc_serialize::{Decodable, Decoder};
use git2::Config as GitConfig;
use util::{GitRepo, HgRepo, CraftResult, human, ChainError, internal, Config, paths};
use workspace::Workspace;
use toml;
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VersionControl {
Git,
Hg,
NoVcs,
}
pub struct NewOptions<'a> {
pub version_control: Option<VersionControl>,
pub bin: bool,
pub lib: bool,
pub path: &'a str,
pub name: Option<&'a str>,
}
struct SourceFileInformation {
relative_path: String,
target_name: String,
bin: bool,
}
struct MkOptions<'a> {
version_control: Option<VersionControl>,
path: &'a Path,
name: &'a str,
source_files: Vec<SourceFileInformation>,
bin: bool,
}
impl Decodable for VersionControl {
fn decode<D: Decoder>(d: &mut D) -> Result<VersionControl, D::Error> {
Ok(match &d.read_str()?[..] {
"git" => VersionControl::Git,
"hg" => VersionControl::Hg,
"none" => VersionControl::NoVcs,
n => {
let err = format!("could not decode '{}' as version control", n);
return Err(d.error(&err));
}
})
}
}
impl<'a> NewOptions<'a> {
pub fn new(version_control: Option<VersionControl>,
bin: bool,
lib: bool,
path: &'a str,
name: Option<&'a str>)
-> NewOptions<'a> {
// default to lib
let is_lib = if !bin { true } else { lib };
NewOptions {
version_control: version_control,
bin: bin,
lib: is_lib,
path: path,
name: name,
}
}
}
struct CraftNewConfig {
name: Option<String>,
email: Option<String>,
version_control: Option<VersionControl>,
}
fn get_name<'a>(path: &'a Path, opts: &'a NewOptions) -> CraftResult<&'a str> {
if let Some(name) = opts.name {
return Ok(name);
}
if path.file_name().is_none() {
bail!("cannot auto-detect project name from path {:?} ; use --name to override",
path.as_os_str());
}
let dir_name = path.file_name()
.and_then(|s| s.to_str())
.chain_error(|| {
human(&format!("cannot create a project with a non-unicode name: {:?}",
path.file_name().unwrap()))
})?;
Ok(dir_name)
}
fn check_name(name: &str) -> CraftResult<()> {
// Ban keywords
let blacklist = ["chest", "test", "true", "auto", "break", "case", "char", "const", "continue", "default", "do",
"double", "else", "enum", "extern", "float", "for", "goto", "if", "int", "long", "register",
"return", "short", "signed", "sizeof", "static", "struct", "switch", "typedef", "union",
"unsigned", "void", "volatile", "while"];
if blacklist.contains(&name) {
bail!("The name `{}` cannot be used as a chest name\n\
use --name to override chest name",
name)
}
for c in name.chars() {
if c.is_alphanumeric() {
continue;
}
if c == '_' || c == '-' {
continue;
}
bail!("Invalid character `{}` in chest name: `{}`\n\
use --name to override chest name",
c,
name)
}
Ok(())
}
fn detect_source_paths_and_types(project_path: &Path,
project_name: &str,
detected_files: &mut Vec<SourceFileInformation>)
-> CraftResult<()> {
let path = project_path;
let name = project_name;
enum H {
Bin,
Lib,
Detect,
}
struct Test {
proposed_path: String,
handling: H,
}
let tests = vec![
Test { proposed_path: format!("src/main.c"), handling: H::Bin },
Test { proposed_path: format!("main.c"), handling: H::Bin },
Test { proposed_path: format!("src/{}.c", name), handling: H::Detect },
Test { proposed_path: format!("{}.c", name), handling: H::Detect },
Test { proposed_path: format!("src/lib.c"), handling: H::Lib },
Test { proposed_path: format!("lib.c"), handling: H::Lib },
];
for i in tests {
let pp = i.proposed_path;
// path/pp does not exist or is not a file
if !fs::metadata(&path.join(&pp)).map(|x| x.is_file()).unwrap_or(false) {
continue;
}
let sfi = match i.handling {
H::Bin => {
SourceFileInformation {
relative_path: pp,
target_name: project_name.to_string(),
bin: true,
}
}
H::Lib => {
SourceFileInformation {
relative_path: pp,
target_name: project_name.to_string(),
bin: false,
}
} | let content = paths::read(&path.join(pp.clone()))?;
let isbin = content.contains("int main");
SourceFileInformation {
relative_path: pp,
target_name: project_name.to_string(),
bin: isbin,
}
}
};
detected_files.push(sfi);
}
// Check for duplicate lib attempt
let mut previous_lib_relpath: Option<&str> = None;
let mut duplicates_checker: BTreeMap<&str, &SourceFileInformation> = BTreeMap::new();
for i in detected_files {
if i.bin {
if let Some(x) = BTreeMap::get::<str>(&duplicates_checker, i.target_name.as_ref()) {
bail!("\
multiple possible binary sources found:
{}
{}
cannot automatically generate Craft.toml as the main target would be ambiguous",
&x.relative_path,
&i.relative_path);
}
duplicates_checker.insert(i.target_name.as_ref(), i);
} else {
if let Some(plp) = previous_lib_relpath {
return Err(human(format!("cannot have a project with multiple libraries, found both `{}` and `{}`",
plp,
i.relative_path)));
}
previous_lib_relpath = Some(&i.relative_path);
}
}
Ok(())
}
fn plan_new_source_file(bin: bool, project_name: String) -> SourceFileInformation {
if bin {
SourceFileInformation {
relative_path: "src/main.c".to_string(),
target_name: project_name,
bin: true,
}
} else {
SourceFileInformation {
relative_path: "src/lib.c".to_string(),
target_name: project_name,
bin: false,
}
}
}
pub fn new(opts: NewOptions, config: &Config) -> CraftResult<()> {
let path = config.cwd().join(opts.path);
if fs::metadata(&path).is_ok() {
bail!("destination `{}` already exists", path.display())
}
if opts.lib && opts.bin {
bail!("can't specify both lib and binary outputs");
}
let name = get_name(&path, &opts)?;
check_name(name)?;
let mkopts = MkOptions {
version_control: opts.version_control,
path: &path,
name: name,
source_files: vec![plan_new_source_file(opts.bin, name.to_string())],
bin: opts.bin,
};
mk(config, &mkopts).chain_error(|| {
human(format!("Failed to create project `{}` at `{}`",
name,
path.display()))
})
}
pub fn init(opts: NewOptions, config: &Config) -> CraftResult<()> {
let path = config.cwd().join(opts.path);
let crafttoml_path = path.join("Craft.toml");
if fs::metadata(&crafttoml_path).is_ok() {
bail!("`craft init` cannot be run on existing Craft projects")
}
if opts.lib && opts.bin {
bail!("can't specify both lib and binary outputs");
}
let name = get_name(&path, &opts)?;
check_name(name)?;
let mut src_paths_types = vec![];
detect_source_paths_and_types(&path, name, &mut src_paths_types)?;
if src_paths_types.len() == 0 {
src_paths_types.push(plan_new_source_file(opts.bin, name.to_string()));
} else {
// --bin option may be ignored if lib.c or src/lib.c present
// Maybe when doing `craft init --bin` inside a library project stub,
// user may mean "initialize for library, but also add binary target"
}
let mut version_control = opts.version_control;
if version_control == None {
let mut num_detected_vsces = 0;
if fs::metadata(&path.join(".git")).is_ok() {
version_control = Some(VersionControl::Git);
num_detected_vsces += 1;
}
if fs::metadata(&path.join(".hg")).is_ok() {
version_control = Some(VersionControl::Hg);
num_detected_vsces += 1;
}
// if none exists, maybe create git, like in `craft new`
if num_detected_vsces > 1 {
bail!("both .git and .hg directories found and the ignore file can't be filled in as a result, specify \
--vcs to override detection");
}
}
let mkopts = MkOptions {
version_control: version_control,
path: &path,
name: name,
bin: src_paths_types.iter().any(|x| x.bin),
source_files: src_paths_types,
};
mk(config, &mkopts).chain_error(|| {
human(format!("Failed to create project `{}` at `{}`",
name,
path.display()))
})
}
fn existing_vcs_repo(path: &Path, cwd: &Path) -> bool {
GitRepo::discover(path, cwd).is_ok() || HgRepo::discover(path, cwd).is_ok()
}
fn mk(config: &Config, opts: &MkOptions) -> CraftResult<()> {
let path = opts.path;
let name = opts.name;
let cfg = global_config(config)?;
let mut ignore = "target\n".to_string();
let in_existing_vcs_repo = existing_vcs_repo(path.parent().unwrap(), config.cwd());
if !opts.bin {
ignore.push_str("Craft.lock\n");
}
let vcs = match (opts.version_control, cfg.version_control, in_existing_vcs_repo) {
(None, None, false) => VersionControl::Git,
(None, Some(option), false) => option,
(Some(option), _, _) => option,
(_, _, true) => VersionControl::NoVcs,
};
match vcs {
VersionControl::Git => {
if !fs::metadata(&path.join(".git")).is_ok() {
GitRepo::init(path, config.cwd())?;
}
paths::append(&path.join(".gitignore"), ignore.as_bytes())?;
}
VersionControl::Hg => {
if !fs::metadata(&path.join(".hg")).is_ok() {
HgRepo::init(path, config.cwd())?;
}
paths::append(&path.join(".hgignore"), ignore.as_bytes())?;
}
VersionControl::NoVcs => {
fs::create_dir_all(path)?;
}
};
let (author_name, email) = discover_author()?;
// Hoo boy, sure glad we've got exhaustivenes checking behind us.
let author = match (cfg.name, cfg.email, author_name, email) {
(Some(name), Some(email), _, _) |
(Some(name), None, _, Some(email)) |
(None, Some(email), name, _) |
(None, None, name, Some(email)) => format!("{} <{}>", name, email),
(Some(name), None, _, None) |
(None, None, name, None) => name,
};
let mut crafttoml_path_specifier = String::new();
// Calculare what [lib] and [[bin]]s do we need to append to Craft.toml
for i in &opts.source_files {
if i.bin {
if i.relative_path != "src/main.c" {
crafttoml_path_specifier.push_str(&format!(r#"
[[bin]]
name = "{}"
path = {}
"#,
i.target_name,
toml::Value::String(i.relative_path.clone())));
}
} else if i.relative_path != "src/lib.c" {
crafttoml_path_specifier.push_str(&format!(r#"
[lib]
name = "{}"
path = {}
"#,
i.target_name,
toml::Value::String(i.relative_path.clone())));
}
}
// Create Craft.toml file with necessary [lib] and [[bin]] sections, if needed
paths::write(&path.join("Craft.toml"),
format!(r#"[package]
name = "{}"
version = "0.1.0"
authors = [{}]
[dependencies]
{}"#,
name,
toml::Value::String(author),
crafttoml_path_specifier)
.as_bytes())?;
// Create all specified source files
// (with respective parent directories)
// if they are don't exist
for i in &opts.source_files {
let path_of_source_file = path.join(i.relative_path.clone());
if let Some(src_dir) = path_of_source_file.parent() {
fs::create_dir_all(src_dir)?;
}
let default_file_content: &[u8] = if i.bin {
b"\
#include <stdio.h>
int main(void) {
printf(\"Hello, world!\");
}
"
} else {
b""
};
if !fs::metadata(&path_of_source_file).map(|x| x.is_file()).unwrap_or(false) {
paths::write(&path_of_source_file, default_file_content)?;
}
}
if let Err(e) = Workspace::new(&path.join("Craft.toml"), config) {
let msg = format!("compiling this new chest may not work due to invalid workspace configuration\n\n{}",
e);
config.shell().warn(msg)?;
}
Ok(())
}
fn get_environment_variable(variables: &[&str]) -> Option<String> {
variables.iter()
.filter_map(|var| env::var(var).ok())
.next()
}
fn discover_author() -> CraftResult<(String, Option<String>)> {
let git_config = GitConfig::open_default().ok();
let git_config = git_config.as_ref();
let name_variables = ["CRAFT_NAME", "GIT_AUTHOR_NAME", "GIT_COMMITTER_NAME", "USER", "USERNAME", "NAME"];
let name = get_environment_variable(&name_variables[0..3])
.or_else(|| git_config.and_then(|g| g.get_string("user.name").ok()))
.or_else(|| get_environment_variable(&name_variables[3..]));
let name = match name {
Some(name) => name,
None => {
let username_var = if cfg!(windows) { "USERNAME" } else { "USER" };
bail!("could not determine the current user, please set ${}",
username_var)
}
};
let email_variables = ["CRAFT_EMAIL", "GIT_AUTHOR_EMAIL", "GIT_COMMITTER_EMAIL", "EMAIL"];
let email = get_environment_variable(&email_variables[0..3])
.or_else(|| git_config.and_then(|g| g.get_string("user.email").ok()))
.or_else(|| get_environment_variable(&email_variables[3..]));
let name = name.trim().to_string();
let email = email.map(|s| s.trim().to_string());
Ok((name, email))
}
fn global_config(config: &Config) -> CraftResult<CraftNewConfig> {
let name = config.get_string("craft-new.name")?.map(|s| s.val);
let email = config.get_string("craft-new.email")?.map(|s| s.val);
let vcs = config.get_string("craft-new.vcs")?;
let vcs = match vcs.as_ref().map(|p| (&p.val[..], &p.definition)) {
Some(("git", _)) => Some(VersionControl::Git),
Some(("hg", _)) => Some(VersionControl::Hg),
Some(("none", _)) => Some(VersionControl::NoVcs),
Some((s, p)) => {
return Err(internal(format!("invalid configuration for key `craft-new.vcs`, unknown vcs `{}` (found in \
{})",
s,
p)))
}
None => None,
};
Ok(CraftNewConfig {
name: name,
email: email,
version_control: vcs,
})
} | H::Detect => { |
components_skeleton_index.md.653a9b7a.lean.js | var J=Object.defineProperty;var b=Object.getOwnPropertySymbols;var K=Object.prototype.hasOwnProperty,M=Object.prototype.propertyIsEnumerable;var w=(c,a,s)=>a in c?J(c,a,{enumerable:!0,configurable:!0,writable:!0,value:s}):c[a]=s,A=(c,a)=>{for(var s in a||(a={}))K.call(a,s)&&w(c,s,a[s]);if(b)for(var s of b(a))M.call(a,s)&&w(c,s,a[s]);return c};import{_ as O,V as x,r as C,c as Q,a as v,w as m,b as D,d as t,e as n,o as R}from"./app.e2d85a78.js";const W={name:"component-doc",components:{"render-demo-0":function(){const{resolveComponent:c,createVNode:a,openBlock:s,createElementBlock:o}=x;function g(h,i){const r=c("d-skeleton");return s(),o("div",null,[a(r,{row:3})])}return A({render:g},{})}(),"render-demo-1":function(){const{resolveComponent:c,createVNode:a,createTextVNode:s,createElementVNode:o,withCtx:g,openBlock:F,createElementBlock:h}=x,i={class:"skeleton-btn-groups"},r={class:"skeleton-btn"},y=s(" \u5C55\u793A\u9AA8\u67B6\u5C4F\uFF1A "),B={class:"skeleton-btn"},E=s(" \u52A8\u753B\uFF1A "),f={class:"skeleton-btn"},k=s(" \u663E\u793A\u5934\u50CF\uFF1A "),q={class:"skeleton-btn"},V=s(" \u663E\u793A\u6807\u9898\uFF1A "),N={class:"skeleton-btn"},P=s(" \u663E\u793A\u6BB5\u843D\uFF1A "),U={class:"skeleton-btn"},T=s(" \u5934\u50CF\u5706\u89D2\uFF1A "),j={class:"skeleton-btn"},$=s(" \u6BB5\u843D\u548C\u6807\u9898\u5706\u89D2\uFF1A "),S=o("div",null,[o("div",null,"row one"),o("div",null,"row two"),o("div",null,"row three"),o("div",null,"row four")],-1);function z(e,u){const p=c("d-switch"),_=c("d-skeleton");return F(),h("div",null,[o("div",i,[o("div",r,[y,a(p,{checked:e.loading,"onUpdate:checked":u[0]||(u[0]=l=>e.loading=l)},null,8,["checked"])]),o("div",B,[E,a(p,{checked:e.animate,"onUpdate:checked":u[1]||(u[1]=l=>e.animate=l)},null,8,["checked"])]),o("div",f,[k,a(p,{checked:e.avatar,"onUpdate:checked":u[2]||(u[2]=l=>e.avatar=l)},null,8,["checked"])]),o("div",q,[V,a(p,{checked:e.title,"onUpdate:checked":u[3]||(u[3]=l=>e.title=l)},null,8,["checked"])]),o("div",N,[P,a(p,{checked:e.paragraph,"onUpdate:checked":u[4]||(u[4]=l=>e.paragraph=l)},null,8,["checked"])]),o("div",U,[T,a(p,{checked:e.roundAvatar,"onUpdate:checked":u[5]||(u[5]=l=>e.roundAvatar=l)},null,8,["checked"])]),o("div",j,[$,a(p,{checked:e.round,"onUpdate:checked":u[6]||(u[6]=l=>e.round=l)},null,8,["checked"])])]),a(_,{row:3,animate:e.animate,avatar:e.avatar,"avatar-shape":e.roundAvatar?"":"square",title:e.title,paragraph:e.paragraph,loading:e.loading,round:e.round},{default:g(()=>[S]),_:1},8,["animate","avatar","avatar-shape","title","paragraph","loading","round"])])}const{defineComponent:I,ref:d}=x,L=I({setup(){const e=d(!0),u=d(!0),p=d(!0),_=d(!0),l=d(!0),G=d(!0),H=d(!1);return{loading:e,animate:u,avatar:p,title:_,paragraph:l,roundAvatar:G,round:H}}});return A({render:z},L)}(),"render-demo-2":function(){const{resolveComponent:c,createVNode:a,createTextVNode:s,openBlock:o,createElementBlock:g}=x,F=s(" \xA0 "),h=s(" \xA0 "),i=s(" \xA0 "),r=s(" \xA0 ");function y(E,f){const k=c("d-skeleton-item");return o(),g("div",null,[a(k,{shape:"avatar",style:{"margin-left":"55px",width:"80px",height:"80px"}}),F,a(k,{shape:"image"}),h,a(k,{shape:"title"}),i,a(k,{shape:"paragraph",row:3,"row-width":["75%","50%"]}),r,a(k,{shape:"button"})])}return A({render:y},{})}()}},lt='{"title":"Skeleton \u9AA8\u67B6\u5C4F","description":"","frontmatter":{},"headers":[{"level":3,"title":"\u4F55\u65F6\u4F7F\u7528","slug":"\u4F55\u65F6\u4F7F\u7528"},{"level":3,"title":"\u57FA\u672C\u7528\u6CD5","slug":"\u57FA\u672C\u7528\u6CD5"},{"level":3,"title":"\u590D\u6742\u7EC4\u5408","slug":"\u590D\u6742\u7EC4\u5408"},{"level":3,"title":"\u7EC6\u7C92\u5EA6\u6A21\u5F0F","slug":"\u7EC6\u7C92\u5EA6\u6A21\u5F0F"},{"level":3,"title":"d-skeleton Props","slug":"d-skeleton-props"},{"level":3,"title":"d-skeleton__avatar Props","slug":"d-skeleton-avatar-props"},{"level":3,"title":"d-skeleton__title Props","slug":"d-skeleton-title-props"},{"level":3,"title":"d-skeleton__paragraph Props","slug":"d-skeleton-paragraph-props"},{"level":3,"title":"d-skeleton-item Props","slug":"d-skeleton-item-props"},{"level":3,"title":"d-skeleton-item__avatar Props","slug":"d-skeleton-item-avatar-props"}],"relativePath":"components/skeleton/index.md","lastUpdated":1639287489668}',X=D('<h1 id="skeleton-\u9AA8\u67B6\u5C4F" tabindex="-1">Skeleton \u9AA8\u67B6\u5C4F <a class="header-anchor" href="#skeleton-\u9AA8\u67B6\u5C4F" aria-hidden="true">#</a></h1><p>\u7528\u4E8E\u5728\u5185\u5BB9\u52A0\u8F7D\u8FC7\u7A0B\u4E2D\u5C55\u793A\u4E00\u7EC4\u5360\u4F4D\u56FE\u5F62\u3002</p><h3 id="\u4F55\u65F6\u4F7F\u7528" tabindex="-1">\u4F55\u65F6\u4F7F\u7528 <a class="header-anchor" href="#\u4F55\u65F6\u4F7F\u7528" aria-hidden="true">#</a></h3><p>\u5728\u9700\u8981\u7B49\u5F85\u52A0\u8F7D\u5185\u5BB9\u7684\u4F4D\u7F6E\u8BBE\u7F6E\u4E00\u4E2A\u9AA8\u67B6\u5C4F\uFF0C\u67D0\u4E9B\u573A\u666F\u4E0B\u6BD4 Loading \u7684\u89C6\u89C9\u6548\u679C\u66F4\u597D\u3002</p><h3 id="\u57FA\u672C\u7528\u6CD5" tabindex="-1">\u57FA\u672C\u7528\u6CD5 <a class="header-anchor" href="#\u57FA\u672C\u7528\u6CD5" aria-hidden="true">#</a></h3><p>\u6700\u57FA\u672C\u7684\u5360\u4F4D\u6548\u679C\u3002</p>',6),Y=t("div",{class:"language-vue"},[t("pre",null,[t("code",null,[t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("template")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-skeleton")]),n(),t("span",{class:"token attr-name"},":row"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("3"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("template")]),t("span",{class:"token punctuation"},">")]),n(`
`)])])],-1),Z=t("h3",{id:"\u590D\u6742\u7EC4\u5408",tabindex:"-1"},[n("\u590D\u6742\u7EC4\u5408 "),t("a",{class:"header-anchor",href:"#\u590D\u6742\u7EC4\u5408","aria-hidden":"true"},"#")],-1),tt=t("div",{class:"language-vue"},[t("pre",null,[t("code",null,[t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("template")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn-groups"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(`
\u5C55\u793A\u9AA8\u67B6\u5C4F\uFF1A
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-switch")]),n(),t("span",{class:"token attr-name"},[t("span",{class:"token namespace"},"v-model:"),n("checked")]),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("loading"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(` | \u663E\u793A\u5934\u50CF\uFF1A
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-switch")]),n(),t("span",{class:"token attr-name"},[t("span",{class:"token namespace"},"v-model:"),n("checked")]),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("avatar"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(`
\u663E\u793A\u6807\u9898\uFF1A
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-switch")]),n(),t("span",{class:"token attr-name"},[t("span",{class:"token namespace"},"v-model:"),n("checked")]),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("title"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(`
\u663E\u793A\u6BB5\u843D\uFF1A
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-switch")]),n(),t("span",{class:"token attr-name"},[t("span",{class:"token namespace"},"v-model:"),n("checked")]),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("paragraph"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(`
\u5934\u50CF\u5706\u89D2\uFF1A
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-switch")]),n(),t("span",{class:"token attr-name"},[t("span",{class:"token namespace"},"v-model:"),n("checked")]),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("roundAvatar"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(`
\u6BB5\u843D\u548C\u6807\u9898\u5706\u89D2\uFF1A
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-switch")]),n(),t("span",{class:"token attr-name"},[t("span",{class:"token namespace"},"v-model:"),n("checked")]),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("round"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-skeleton")]),n(),t("span",{class:"token attr-name"},":row"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("3"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":animate"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("animate"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":avatar"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("avatar"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":avatar-shape"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("roundAvatar?"),t("span",{class:"token punctuation"},"'"),t("span",{class:"token punctuation"},"'"),n(":"),t("span",{class:"token punctuation"},"'"),n("square"),t("span",{class:"token punctuation"},"'"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":title"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("title"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":paragraph"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("paragraph"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":loading"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("loading"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":round"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("round"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),t("span",{class:"token punctuation"},">")]),n("row one"),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),t("span",{class:"token punctuation"},">")]),n("row two"),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),t("span",{class:"token punctuation"},">")]),n("row three"),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),t("span",{class:"token punctuation"},">")]),n("row four"),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("d-skeleton")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("template")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("script")]),t("span",{class:"token punctuation"},">")]),t("span",{class:"token script"},[t("span",{class:"token language-javascript"},[n(`
`),t("span",{class:"token keyword"},"import"),n(),t("span",{class:"token punctuation"},"{"),n(" defineComponent"),t("span",{class:"token punctuation"},","),n(" ref "),t("span",{class:"token punctuation"},"}"),n(),t("span",{class:"token keyword"},"from"),n(),t("span",{class:"token string"},"'vue'"),n(`
`),t("span",{class:"token keyword"},"export"),n(),t("span",{class:"token keyword"},"default"),n(),t("span",{class:"token function"},"defineComponent"),t("span",{class:"token punctuation"},"("),t("span",{class:"token punctuation"},"{"),n(`
`),t("span",{class:"token function"},"setup"),n(),t("span",{class:"token punctuation"},"("),t("span",{class:"token punctuation"},")"),n(),t("span",{class:"token punctuation"},"{"),n(`
`),t("span",{class:"token keyword"},"const"),n(" loading "),t("span",{class:"token operator"},"="),n(),t("span",{class:"token function"},"ref"),t("span",{class:"token punctuation"},"("),t("span",{class:"token boolean"},"true"),t("span",{class:"token punctuation"},")"),n(`
`),t("span",{class:"token keyword"},"const"),n(" animate "),t("span",{class:"token operator"},"="),n(),t("span",{class:"token function"},"ref"),t("span",{class:"token punctuation"},"("),t("span",{class:"token boolean"},"true"),t("span",{class:"token punctuation"},")"),n(`
`),t("span",{class:"token keyword"},"const"),n(" avatar "),t("span",{class:"token operator"},"="),n(),t("span",{class:"token function"},"ref"),t("span",{class:"token punctuation"},"("),t("span",{class:"token boolean"},"true"),t("span",{class:"token punctuation"},")"),n(`
`),t("span",{class:"token keyword"},"const"),n(" title "),t("span",{class:"token operator"},"="),n(),t("span",{class:"token function"},"ref"),t("span",{class:"token punctuation"},"("),t("span",{class:"token boolean"},"true"),t("span",{class:"token punctuation"},")"),n(`
`),t("span",{class:"token keyword"},"const"),n(" paragraph "),t("span",{class:"token operator"},"="),n(),t("span",{class:"token function"},"ref"),t("span",{class:"token punctuation"},"("),t("span",{class:"token boolean"},"true"),t("span",{class:"token punctuation"},")"),n(`
`),t("span",{class:"token keyword"},"const"),n(" roundAvatar "),t("span",{class:"token operator"},"="),n(),t("span",{class:"token function"},"ref"),t("span",{class:"token punctuation"},"("),t("span",{class:"token boolean"},"true"),t("span",{class:"token punctuation"},")"),n(`
`),t("span",{class:"token keyword"},"const"),n(" round "),t("span",{class:"token operator"},"="),n(),t("span",{class:"token function"},"ref"),t("span",{class:"token punctuation"},"("),t("span",{class:"token boolean"},"false"),t("span",{class:"token punctuation"},")"),n(`
`),t("span",{class:"token keyword"},"return"),n(),t("span",{class:"token punctuation"},"{"),n(`
loading`),t("span",{class:"token punctuation"},","),n(`
animate`),t("span",{class:"token punctuation"},","),n(`
avatar`),t("span",{class:"token punctuation"},","),n(`
title`),t("span",{class:"token punctuation"},","),n(`
paragraph`),t("span",{class:"token punctuation"},","),n(`
roundAvatar`),t("span",{class:"token punctuation"},","),n(`
round
`),t("span",{class:"token punctuation"},"}"),n(`
`),t("span",{class:"token punctuation"},"}"),n(`
`),t("span",{class:"token punctuation"},"}"),t("span",{class:"token punctuation"},")"),n(`
`)])]),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("script")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("style")]),t("span",{class:"token punctuation"},">")]),t("span",{class:"token style"},[t("span",{class:"token language-css"},[n(`
`),t("span",{class:"token selector"},".skeleton-btn-groups"),t("span",{class:"token punctuation"},"{"),n(`
`),t("span",{class:"token property"},"display"),t("span",{class:"token punctuation"},":"),n(" flex"),t("span",{class:"token punctuation"},";"),n(`
`),t("span",{class:"token property"},"margin-bottom"),t("span",{class:"token punctuation"},":"),n(" 1rem"),t("span",{class:"token punctuation"},";"),n(`
`),t("span",{class:"token punctuation"},"}"),n(`
`),t("span",{class:"token selector"},".skeleton-btn"),t("span",{class:"token punctuation"},"{"),n(`
`),t("span",{class:"token property"},"display"),t("span",{class:"token punctuation"},":"),n(" flex"),t("span",{class:"token punctuation"},";"),n(`
`),t("span",{class:"token property"},"flex-direction"),t("span",{class:"token punctuation"},":"),n(" column"),t("span",{class:"token punctuation"},";"),n(`
`),t("span",{class:"token property"},"justify-content"),t("span",{class:"token punctuation"},":"),n(" space-between"),t("span",{class:"token punctuation"},";"),n(`
`),t("span",{class:"token punctuation"},"}"),n(`
`)])]),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("style")]),t("span",{class:"token punctuation"},">")]),n(`
`)])])],-1),nt=t("h3",{id:"\u7EC6\u7C92\u5EA6\u6A21\u5F0F",tabindex:"-1"},[n("\u7EC6\u7C92\u5EA6\u6A21\u5F0F "),t("a",{class:"header-anchor",href:"#\u7EC6\u7C92\u5EA6\u6A21\u5F0F","aria-hidden":"true"},"#")],-1),at=t("p",null,"\u63D0\u4F9B\u7EC6\u7C92\u5EA6\u7684\u9AA8\u67B6\u5C4F\u5143\u7D20\uFF0C\u7ED9\u4E88\u5F00\u53D1\u8005\u66F4\u7075\u6D3B\u7684\u5B9A\u5236\u80FD\u529B\u3002",-1),st=t("div",{class:"language-vue"},[t("pre",null,[t("code",null,[t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("template")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-skeleton-item")]),n(),t("span",{class:"token attr-name"},":shape"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),t("span",{class:"token punctuation"},"'"),n("avatar"),t("span",{class:"token punctuation"},"'"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token special-attr"},[t("span",{class:"token attr-name"},"style"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),t("span",{class:"token value css language-css"},[t("span",{class:"token property"},"margin-left"),t("span",{class:"token punctuation"},":"),n("55px"),t("span",{class:"token punctuation"},";"),t("span",{class:"token property"},"width"),t("span",{class:"token punctuation"},":"),n("80px"),t("span",{class:"token punctuation"},";"),t("span",{class:"token property"},"height"),t("span",{class:"token punctuation"},":"),n("80px"),t("span",{class:"token punctuation"},";")]),t("span",{class:"token punctuation"},'"')])]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-skeleton-item")]),n(),t("span",{class:"token attr-name"},":shape"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),t("span",{class:"token punctuation"},"'"),n("image"),t("span",{class:"token punctuation"},"'"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-skeleton-item")]),n(),t("span",{class:"token attr-name"},":shape"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),t("span",{class:"token punctuation"},"'"),n("title"),t("span",{class:"token punctuation"},"'"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-skeleton-item")]),n(),t("span",{class:"token attr-name"},":shape"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),t("span",{class:"token punctuation"},"'"),n("paragraph"),t("span",{class:"token punctuation"},"'"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":row"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("3"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token attr-name"},":row-width"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("["),t("span",{class:"token punctuation"},"'"),n("75%"),t("span",{class:"token punctuation"},"'"),n(","),t("span",{class:"token punctuation"},"'"),n("50%"),t("span",{class:"token punctuation"},"'"),n("]"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-skeleton-item")]),n(),t("span",{class:"token attr-name"},":shape"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),t("span",{class:"token punctuation"},"'"),n("button"),t("span",{class:"token punctuation"},"'"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("template")]),t("span",{class:"token punctuation"},">")]),n(`
`)])])],-1),et=D('<h3 id="d-skeleton-props" tabindex="-1">d-skeleton Props <a class="header-anchor" href="#d-skeleton-props" aria-hidden="true">#</a></h3><table><thead><tr><th style="text-align:center;">\u53C2\u6570</th><th style="text-align:center;">\u7C7B\u578B</th><th style="text-align:center;">\u9ED8\u8BA4</th><th style="text-align:left;">\u8BF4\u660E</th></tr></thead><tbody><tr><td style="text-align:center;">loading</td><td style="text-align:center;"><code>boolean</code></td><td style="text-align:center;"><code>true</code></td><td style="text-align:left;">\u662F\u5426\u663E\u793A\u9AA8\u67B6\u5C4F\uFF0C\u4F20 <code>false</code> \u65F6\u4F1A\u5C55\u793A\u5B50\u7EC4\u4EF6\u5185\u5BB9</td></tr><tr><td style="text-align:center;">animate</td><td style="text-align:center;"><code>boolean</code></td><td style="text-align:center;"><code>true</code></td><td style="text-align:left;">\u662F\u5426\u5F00\u542F\u52A8\u753B</td></tr><tr><td style="text-align:center;">avatar</td><td style="text-align:center;"><code>boolean</code></td><td style="text-align:center;"><code>false</code></td><td style="text-align:left;">\u662F\u5426\u663E\u793A\u5934\u50CF\u5360\u4F4D\u56FE</td></tr><tr><td style="text-align:center;">title</td><td style="text-align:center;"><code>boolean</code></td><td style="text-align:center;"><code>true</code></td><td style="text-align:left;">\u662F\u5426\u663E\u793A\u6807\u9898\u5360\u4F4D\u56FE</td></tr><tr><td style="text-align:center;">paragraph</td><td style="text-align:center;"><code>boolean</code></td><td style="text-align:center;"><code>true</code></td><td style="text-align:left;">\u662F\u5426\u663E\u793A\u6BB5\u843D\u5360\u4F4D\u56FE</td></tr><tr><td style="text-align:center;">round</td><td style="text-align:center;"><code>boolean</code></td><td style="text-align:center;"><code>false</code></td><td style="text-align:left;">\u662F\u5426\u5C06\u6807\u9898\u548C\u6BB5\u843D\u663E\u793A\u4E3A\u5706\u89D2\u98CE\u683C</td></tr></tbody></table><h3 id="d-skeleton-avatar-props" tabindex="-1">d-skeleton__avatar Props <a class="header-anchor" href="#d-skeleton-avatar-props" aria-hidden="true">#</a></h3><table><thead><tr><th style="text-align:center;">\u53C2\u6570</th><th style="text-align:center;">\u7C7B\u578B</th><th style="text-align:center;">\u9ED8\u8BA4</th><th style="text-align:left;">\u8BF4\u660E</th></tr></thead><tbody><tr><td style="text-align:center;">avatar-size</td><td style="text-align:center;"><code>number | string</code></td><td style="text-align:center;"><code>40px</code></td><td style="text-align:left;">\u5934\u50CF\u5360\u4F4D\u56FE\u5927\u5C0F</td></tr><tr><td style="text-align:center;">avatar-shape</td><td style="text-align:center;"><code>string</code></td><td style="text-align:center;"><code>round</code></td><td style="text-align:left;">\u5934\u50CF\u5360\u4F4D\u56FE\u5F62\u72B6\uFF0C\u53EF\u9009\u503C\u4E3A<code>square</code></td></tr></tbody></table><h3 id="d-skeleton-title-props" tabindex="-1">d-skeleton__title Props <a class="header-anchor" href="#d-skeleton-title-props" aria-hidden="true">#</a></h3><table><thead><tr><th style="text-align:center;">\u53C2\u6570</th><th style="text-align:center;">\u7C7B\u578B</th><th style="text-align:center;">\u9ED8\u8BA4</th><th style="text-align:left;">\u8BF4\u660E</th></tr></thead><tbody><tr><td style="text-align:center;">title-width</td><td style="text-align:center;"><code>number | string</code></td><td style="text-align:center;"><code>40%</code></td><td style="text-align:left;">\u8BBE\u7F6E\u6807\u9898\u5360\u4F4D\u56FE\u7684\u5BBD\u5EA6</td></tr></tbody></table><h3 id="d-skeleton-paragraph-props" tabindex="-1">d-skeleton__paragraph Props <a class="header-anchor" href="#d-skeleton-paragraph-props" aria-hidden="true">#</a></h3><table><thead><tr><th style="text-align:center;">\u53C2\u6570</th><th style="text-align:center;">\u7C7B\u578B</th><th style="text-align:center;">\u9ED8\u8BA4</th><th style="text-align:left;">\u8BF4\u660E</th></tr></thead><tbody><tr><td style="text-align:center;">row</td><td style="text-align:center;"><code>number</code></td><td style="text-align:center;"><code>0</code></td><td style="text-align:left;">\u6BB5\u843D\u5360\u4F4D\u56FE\u884C\u6570</td></tr><tr><td style="text-align:center;">row-width</td><td style="text-align:center;"><code>number | string | (number | string)[]</code></td><td style="text-align:center;"><code>["100%"]</code></td><td style="text-align:left;">\u6BB5\u843D\u5360\u4F4D\u56FE\u5BBD\u5EA6\uFF0C\u53EF\u4F20\u6570\u7EC4\u6765\u8BBE\u7F6E\u6BCF\u4E00\u884C\u7684\u5BBD\u5EA6</td></tr></tbody></table><h3 id="d-skeleton-item-props" tabindex="-1">d-skeleton-item Props <a class="header-anchor" href="#d-skeleton-item-props" aria-hidden="true">#</a></h3><p>\u7EC6\u7C92\u5EA6\u6A21\u5F0F</p><table><thead><tr><th style="text-align:center;">\u53C2\u6570</th><th style="text-align:center;">\u7C7B\u578B</th><th style="text-align:center;">\u9ED8\u8BA4</th><th style="text-align:left;">\u8BF4\u660E</th></tr></thead><tbody><tr><td style="text-align:center;">shape</td><td style="text-align:center;"><code>string</code></td><td style="text-align:center;">-</td><td style="text-align:left;">\u53EF\u9009\u503C\u4E3A<code>avatar</code>,<code>image</code>,<code>title</code>,<code>paragraph</code>,<code>button</code>\u3002</td></tr><tr><td style="text-align:center;">animate</td><td style="text-align:center;"><code>boolean</code></td><td style="text-align:center;"><code>true</code></td><td style="text-align:left;">\u662F\u5426\u5F00\u542F\u52A8\u753B</td></tr></tbody></table><blockquote><p>paragraph \u7684 API \u4E0E\u9ED8\u8BA4\u6A21\u5F0F\u76F8\u540C\u3002</p></blockquote><h3 id="d-skeleton-item-avatar-props" tabindex="-1">d-skeleton-item__avatar Props <a class="header-anchor" href="#d-skeleton-item-avatar-props" aria-hidden="true">#</a></h3><table><thead><tr><th style="text-align:center;">\u53C2\u6570</th><th style="text-align:center;">\u7C7B\u578B</th><th style="text-align:center;">\u9ED8\u8BA4</th><th style="text-align:left;">\u8BF4\u660E</th></tr></thead><tbody><tr><td style="text-align:center;">avatar-shape</td><td style="text-align:center;"><code>string</code></td><td style="text-align:center;"><code>round</code></td><td style="text-align:left;">\u5934\u50CF\u5360\u4F4D\u56FE\u5F62\u72B6\uFF0C\u53EF\u9009\u503C\u4E3A<code>square</code></td></tr></tbody></table>',14);function ot(c,a,s,o,g,F){const h=C("render-demo-0"),i=C("demo"),r=C("render-demo-1"),y=C("render-demo-2");return R(),Q("div",null,[X,v(i,{sourceCode:`<template>
<d-skeleton :row="3" />
</template>
`},{highlight:m(()=>[Y]),default:m(()=>[v(h)]),_:1}),Z,v(i,{sourceCode:`<template>
<div class="skeleton-btn-groups">
<div class="skeleton-btn">
\u5C55\u793A\u9AA8\u67B6\u5C4F\uFF1A
<d-switch v-model:checked="loading" />
</div>
<div class="skeleton-btn">
\u52A8\u753B\uFF1A
<d-switch v-model:checked="animate" />
</div>
<div class="skeleton-btn">
\u663E\u793A\u5934\u50CF\uFF1A
<d-switch v-model:checked="avatar" />
</div>
<div class="skeleton-btn">
\u663E\u793A\u6807\u9898\uFF1A
<d-switch v-model:checked="title" />
</div>
<div class="skeleton-btn">
\u663E\u793A\u6BB5\u843D\uFF1A
<d-switch v-model:checked="paragraph" />
</div>
<div class="skeleton-btn">
\u5934\u50CF\u5706\u89D2\uFF1A
<d-switch v-model:checked="roundAvatar" />
</div>
<div class="skeleton-btn">
\u6BB5\u843D\u548C\u6807\u9898\u5706\u89D2\uFF1A
<d-switch v-model:checked="round" />
</div>
</div>
<d-skeleton :row="3" :animate="animate" :avatar="avatar" :avatar-shape="roundAvatar?'':'square'" :title="title" :paragraph="paragraph" :loading="loading" :round="round">
<div>
<div>row one</div>
<div>row two</div>
<div>row three</div>
<div>row four</div>
</div>
</d-skeleton>
</template>
<script>
import { defineComponent, ref } from 'vue'
export default defineComponent({
setup () {
const loading = ref(true)
const animate = ref(true)
const avatar = ref(true)
const title = ref(true)
const paragraph = ref(true)
const roundAvatar = ref(true)
const round = ref(false)
return {
loading,
animate,
avatar,
title,
paragraph,
roundAvatar,
round
}
}
})
<\/script>
<style>
.skeleton-btn-groups{
display: flex;
margin-bottom: 1rem;
}
.skeleton-btn{
display: flex;
flex-direction: column;
justify-content: space-between;
}
</style>
`},{highlight:m(()=>[tt]),default:m(()=>[v(r)]),_:1}),nt,at,v(i,{sourceCode:`<template>
<d-skeleton-item :shape="'avatar'" style="margin-left:55px;width:80px;height:80px;" />
\xA0 <d-skeleton-item :shape="'image'" />
\xA0 <d-skeleton-item :shape="'title'" />
\xA0 <d-skeleton-item :shape="'paragraph'" :row="3" :row-width="['75%','50%']" />
\xA0 <d-skeleton-item :shape="'button'" />
</template>
`},{highlight:m(()=>[st]),default:m(()=>[v(y)]),_:1}),et])}var pt=O(W,[["render",ot]]);export{lt as __pageData,pt as default}; | \u52A8\u753B\uFF1A
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("d-switch")]),n(),t("span",{class:"token attr-name"},[t("span",{class:"token namespace"},"v-model:"),n("checked")]),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("animate"),t("span",{class:"token punctuation"},'"')]),n(),t("span",{class:"token punctuation"},"/>")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"</"),n("div")]),t("span",{class:"token punctuation"},">")]),n(`
`),t("span",{class:"token tag"},[t("span",{class:"token tag"},[t("span",{class:"token punctuation"},"<"),n("div")]),n(),t("span",{class:"token attr-name"},"class"),t("span",{class:"token attr-value"},[t("span",{class:"token punctuation attr-equals"},"="),t("span",{class:"token punctuation"},'"'),n("skeleton-btn"),t("span",{class:"token punctuation"},'"')]),t("span",{class:"token punctuation"},">")]),n(` |
RUN.py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 13 00:33:14 2019
@author: Amanda
Development of a Machine Vision Based Yield Monitor for Shallot Onions and Carrot crops
Precision Agriculture and Sensor Systems (PASS) Research Group
McGill University, Department of Bioresource Engineering
RUN.py --- This is a yield monitoring program for the masters thesis of
Amanda Boatswain Jacques. This is the main operation file.
"""
from YieldMonitor import YieldMonitor
import time
import cv2
import argparse
| # cd to the correct execution folder
# >> cd Desktop/yield_monitor
# This next line of code opens the "yield_monitor" conda env and runs the main file "RUN.py"
# when you click on the file icon
subprocess.run("activate yield_monitor && 'python RUN.py' ", shell = True)
# Execute the yield monitoring program and initialize the camera source
YM = YieldMonitor("conf.json")
time.sleep(5)
YM.pretty_print("[INFO] RUNNING", "Yield Monitor initialized!")
# Perform size calibration
YM.calibrate_monitor()
# Initialize the GPS sensor
YM.init_gps()
# Run the program, to exit press "Ctrl + C"
YM.run()
# Save the data to disk
YM.save_log()
# Close the program
YM.close() | # activate the conda environment
import subprocess
|
IpAcquire.py | from config.SourceUrl import getUrl
from ip.Ip2Db import insert
import threading
import requests
from Log import log
|
def acquireIp():
aUrl = getUrl()
log.info('获取ip地址:{}'.format(aUrl))
try:
reponse = requests.get(aUrl, headers=header, timeout=5)
if reponse.status_code == 200:
parseHtml(reponse.text)
except:
# traceback.print_exc()
log.error('请求ip异常:{}'.format(aUrl))
def parseHtml(html):
html = html.replace('\'', '').replace('b', '').replace('<r/>', '').replace('\r', '')
ips = html.split("\n")
for ip in ips:
ip = ip.strip()
if 'false' in ip:
log.war('您的套餐今日已到达上限')
return
elif '' == ip:
return
else:
if '.' in ip:
threading.Thread(target=insert, args=(ip,)).start() | header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
|
mongoose-tests.ts | import * as mongoose from 'mongoose';
// dummy variables
var cb = function () {};
/*
* Most of these tests are taken directly from the examples
* in the Mongoose API documentation.
*
* http://mongoosejs.com/docs/guide.html
* http://mongoosejs.com/docs/api.html
*/
/*
* section index.js
* http://mongoosejs.com/docs/api.html#index-js
*/
var connectUri = 'mongodb://user:pass@localhost:port/database';
const connection1: Promise<mongoose.Mongoose> = mongoose.connect(connectUri);
const connection2: Promise<mongoose.Mongoose> = mongoose.connect(connectUri, {
user: 'larry',
pass: 'housan',
config: {
autoIndex: true,
},
mongos: true,
bufferCommands: false,
useNewUrlParser: true,
useFindAndModify: true,
useCreateIndex: true
});
const connection3: null = mongoose.connect(connectUri, function (error) {
error.stack;
});
var mongooseConnection: mongoose.Connection = mongoose.createConnection();
mongooseConnection.dropDatabase().then(()=>{});
mongooseConnection.dropCollection('foo').then(()=>{});
mongoose.createConnection(connectUri).then((conn)=> {
return conn.collections;
}, () => {
});
mongoose.createConnection(connectUri).open('');
mongoose.createConnection(connectUri, {
db: {
native_parser: true
}
}).open('');
const dcWithCallback: null = mongoose.disconnect(cb);
const dcPromise: Promise<void> = mongoose.disconnect();
mongoose.get('test');
mongoose.model('Actor', new mongoose.Schema({
name: String
}), 'collectionName', true).find({});
mongoose.model('Actor').find({});
mongoose.modelNames()[0].toLowerCase();
mongoose.models.Actor.findOne({}).exec();
new (new mongoose.Mongoose(9, 8, 7)).Mongoose(1, 2, 3).connect('');
mongoose.plugin(cb, {}).connect('');
mongoose.set('test', 'value');
mongoose.set('debug', function(collectionName: any, methodName: any, arg1: any, arg2: any) {});
mongoose.STATES.hasOwnProperty('');
mongoose.connection.on('error', cb);
new mongoose.mongo.MongoError('error').stack;
mongoose.SchemaTypes.String;
mongoose.SchemaTypes.ObjectId;
mongoose.SchemaTypes.Decimal128;
mongoose.Types.ObjectId;
mongoose.Types.Decimal128;
mongoose.version.toLowerCase();
const sslConnections: {[key: string]: mongoose.Connection} = {
basic: mongoose.createConnection(connectUri, {ssl: true}),
customCA: mongoose.createConnection(
connectUri,
{
ssl: true,
sslCA: [new Buffer('ca string')],
sslCRL: [new Buffer('crl buffer')],
sslCert: 'ssl cert',
sslKey: new Buffer('ssl private key'),
sslPass: 'ssl password',
servername: 'localhost',
checkServerIdentity: true,
ciphers: 'ciphers',
ecdhCurve: 'ecdhCurve',
}
),
};
/*
* section collection.js
* http://mongoosejs.com/docs/api.html#collection-js
*
* section drivers/node-mongodb-native/collection.js
* http://mongoosejs.com/docs/api.html#drivers-node-mongodb-native-collection-js
*/
var coll1 = <mongoose.Collection> {};
coll1.$format(999).toLowerCase();
coll1.$print('name', 'i', [1, 2, 3]);
coll1.getIndexes();
/* inherited properties */
coll1.collectionName;
coll1.conn;
coll1.name;
coll1.ensureIndex();
coll1.find({});
coll1.insert({}, {});
var coll2 = new mongoose.Collection('', new mongoose.Connection(mongoose));
coll2.$format(999).toLowerCase();
/* inherited properties */
coll2.initializeOrderedBulkOp;
coll2.indexExists;
/*
* section connection.js
* http://mongoosejs.com/docs/api.html#connection-js
*
* section section drivers/node-mongodb-native/connection.js
* http://mongoosejs.com/docs/api.html#drivers-node-mongodb-native-connection-js
*/
var conn1: mongoose.Connection = mongoose.createConnection('mongodb://user:pass@localhost:port/database');
conn1 = new mongoose.Connection(mongoose);
conn1.open('mongodb://localhost/test', 'myDb', 27017, {
replset: null,
config: {
autoIndex: false
}
}, function (err) {}).open('');
conn1.openSet('mongodb://localhost/test', 'db', {
replset: null,
mongos: true
}, function (err) {}).then(cb).catch(cb);
conn1.openUri('mongodb://localhost/test', 'myDb', 27017, {
replset: null,
config: {
autoIndex: false
}
}, function (err) {}).open('');
conn1.close().then(function () {}).catch(function (err) {});
conn1.close(true).then(function () {}).catch(function (err) {});
conn1.close(function (err) {});
conn1.close(true, function (err) {});
conn1.collection('name').$format(999);
conn1.model('myModel', new mongoose.Schema({}), 'myCol').find();
conn1.models.myModel.findOne().exec();
interface IStatics {
staticMethod1: (a: number) => string;
}
conn1.modelNames()[0].toLowerCase();
conn1.config.hasOwnProperty('');
conn1.db.bufferMaxEntries;
conn1.collections['coll'].$format(999);
conn1.readyState.toFixed();
conn1.useDb('myDb').useDb('');
mongoose.Connection.STATES.hasOwnProperty('');
/* inherited properties */
conn1.on('data', cb);
conn1.addListener('close', cb);
// The connection returned by useDb is *not* thenable.
// From https://github.com/DefinitelyTyped/DefinitelyTyped/pull/26057#issuecomment-396150819
const getDB = async (tenant: string)=> {
return conn1.useDb(tenant);
};
/*
* section error.js
* http://mongoosejs.com/docs/api.html#error-js
*/
var mongooseError: mongoose.Error = new mongoose.Error('error');
/* inherited properties */
mongooseError.message;
mongooseError.name;
mongooseError.stack;
/* static properties */
mongoose.Error.messages.hasOwnProperty('');
mongoose.Error.Messages.hasOwnProperty('');
/*
* section error/cast.js
* https://mongoosejs.com/docs/api.html#mongooseerror_MongooseError.CastError
*/
var castError: mongoose.Error.CastError = new mongoose.Error.CastError('', '', '');
castError.setModel('foo');
castError.stringValue;
castError.kind;
castError.path;
castError.value;
/* inherited properties */
castError.name;
castError.stack;
castError.message;
/*
* section error/validator.js
* https://mongoosejs.com/docs/api.html#mongooseerror_MongooseError.ValidatorError
*/
var validatorError: mongoose.Error.ValidatorError = new mongoose.Error.ValidatorError({ foo: 'bar' })
validatorError.properties;
validatorError.kind;
validatorError.path;
validatorError.value;
validatorError.toString().toLowerCase();
validatorError.formatMessage('foo', {});
validatorError.formatMessage('foo', (bar: any)=>{ return bar; });
/* inherited properties */
validatorError.name;
validatorError.stack;
validatorError.message;
/*
* section error/validation.js
* https://mongoosejs.com/docs/api.html#mongooseerror_MongooseError.ValidationError
*/
var doc = <mongoose.MongooseDocument> {};
var validationError: mongoose.Error.ValidationError = new mongoose.Error.ValidationError(doc);
validationError.toString().toLowerCase();
validationError.inspect();
validationError.toJSON().hasOwnProperty('');
validationError.addError('foo', validatorError)
/* inherited properties */
validationError.name;
validationError.stack;
validationError.message;
/*
* section error/parallelSave.js
* https://mongoosejs.com/docs/api.html#mongooseerror_MongooseError.ParallelSaveError
*/
var parallelSaveError: mongoose.Error.ParallelSaveError = new mongoose.Error.ParallelSaveError(doc);
/* inherited properties */
parallelSaveError.name;
parallelSaveError.stack;
parallelSaveError.message;
/*
* section error/overwriteModel.js
* https://mongoosejs.com/docs/api.html#mongooseerror_MongooseError.OverwriteModelError
*/
var overwriteModelError: mongoose.Error.OverwriteModelError = new mongoose.Error.OverwriteModelError('foo');
/* inherited properties */
overwriteModelError.name;
overwriteModelError.stack;
overwriteModelError.message;
/*
* section error/missingSchema.js
* https://mongoosejs.com/docs/api.html#mongooseerror_MongooseError.MissingSchemaError
*/
var missingSchemaError: mongoose.Error.MissingSchemaError = new mongoose.Error.MissingSchemaError('foo');
/* inherited properties */
missingSchemaError.name;
missingSchemaError.stack;
missingSchemaError.message;
/*
* section error/divergentArray.js
* https://mongoosejs.com/docs/api.html#mongooseerror_MongooseError.MissingSchemaError
*/
var missingSchemaError: mongoose.Error.DivergentArrayError = new mongoose.Error.DivergentArrayError(['foo','bar']);
/* inherited properties */
missingSchemaError.name;
missingSchemaError.stack;
missingSchemaError.message;
const pluralize = mongoose.pluralize();
const plural: string = pluralize('foo');
/*
* section querycursor.js
* http://mongoosejs.com/docs/api.html#querycursor-js
*/
var querycursor = <mongoose.QueryCursor<any>> {};
querycursor.close(function (error, result) {
result.execPopulate();
}).catch(cb);
querycursor.eachAsync(function (doc) {
doc.execPopulate();
}, function (err) {}).catch(cb);
querycursor.next(cb).catch(cb);
/* inherited properties */
querycursor.pause();
querycursor.pipe(process.stdout);
/* practical example */
var QCModel = mongoose.model('QC', new mongoose.Schema({name: String}));
QCModel.find({}).cursor({}).on('data', function (doc: any) {
doc.depopulate('name');
}).on('error', function (error: any) {
throw error;
}).close().then(cb).catch(cb);
querycursor.map(function (doc) {
doc.foo = "bar";
return doc;
}).on('data', function (doc: any) {
console.log(doc.foo);
});
querycursor.map(function (doc) {
doc.foo = "bar";
return doc;
}).next(function (error, doc) {
console.log(doc.foo);
});
QCModel.watch().once('change', (change: any) => {
console.log(change);
});
QCModel.watch({
maxAwaitTimeMS: 10
}).once('change', (change: any) => {
console.log(change);
});
/*
* section virtualtype.js
* http://mongoosejs.com/docs/api.html#virtualtype-js
*/
var virtualtype: mongoose.VirtualType = new mongoose.VirtualType({}, 'hello');
virtualtype.applyGetters({}, {});
virtualtype.applySetters({}, {});
virtualtype.get(cb).get(cb);
virtualtype.set(cb).set(cb);
/*
* section schema.js
* http://mongoosejs.com/docs/api.html#schema-js
*/
var schema: mongoose.Schema = new mongoose.Schema({
name: String,
binary: Buffer,
living: Boolean,
updated: { type: Date, default: Date.now },
age: { type: Number, min: 18, max: 65 },
mixed: mongoose.Schema.Types.Mixed,
_someId: mongoose.Schema.Types.ObjectId,
someDecimal:mongoose.Schema.Types.Decimal128,
array: [],
ofString: [String],
ofNumber: [Number],
ofDates: [Date],
ofBuffer: [Buffer],
ofBoolean: [Boolean],
ofMixed: [mongoose.Schema.Types.Mixed],
ofObjectId: [mongoose.Schema.Types.ObjectId],
nested: {
stuff: { type: String, lowercase: true, trim: true }
}
});
schema.add({
mixedArray: {
type: [mongoose.Schema.Types.Mixed],
required: true
}
}, 'prefix');
schema.eachPath(function (path, type) {
path.toLowerCase();
type.sparse(true);
}).eachPath(cb);
schema.get('path');
schema.index({
name: 1,
binary: -1
}).index({}, {});
schema.indexes().slice();
schema.method('name', cb).method({
m1: cb,
m2: cb
});
schema.path('a', mongoose.Schema.Types.Buffer).path('a');
schema.pathType('m1').toLowerCase();
schema.plugin(function (schema: mongoose.Schema, opts?: any) {
schema.get('path');
if (opts) {
opts.hasOwnProperty('');
}
}).plugin(cb, {opts: true});
/* `.pre` hook tests */
interface PreHookTestDocumentInterface extends mongoose.Document {}
interface PreHookTestQueryInterface<T> extends mongoose.Query<T> {}
interface PreHookTestAggregateInterface<T> extends mongoose.Aggregate<T> {}
interface PreHookTestModelInterface<T extends mongoose.Document> extends mongoose.Model<T> {}
// it is used to ensure that all testing cases return a value of mongoose.Schema type
const preHookTestSchemaArr: mongoose.Schema[] = [];
// testing order:
// serial with default value and returning void
// serial with a type argument and returning a promise
// parallel with default value and returning void
// parallel with a type argument and returning a promise
// Document
preHookTestSchemaArr.push(
schema.pre("init", function (next) {
const isDefaultType: mongoose.Document = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestDocumentInterface>("init", function (next) {
const isSpecificType: PreHookTestDocumentInterface = this;
return Promise.resolve("");
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre("init", true, function (next, done) {
const isDefaultType: mongoose.Document = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestDocumentInterface>("init", true, function (next, done) {
const isSpecificType: PreHookTestDocumentInterface = this;
return Promise.resolve("");
}, err => {})
);
// Query
preHookTestSchemaArr.push(
schema.pre("count", function (next) {
const isDefaultType: mongoose.Query<any> = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestQueryInterface<number>>("count", function (next) {
const isSpecificType: PreHookTestQueryInterface<number> = this;
return Promise.resolve("");
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre("count", true, function (next, done) {
const isDefaultType: mongoose.Query<any> = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestQueryInterface<number>>("count", true, function (next, done) {
const isSpecificType: PreHookTestQueryInterface<number> = this;
return Promise.resolve("");
}, err => {})
);
// Aggregate
preHookTestSchemaArr.push(
schema.pre("aggregate", function(next) {
const isDefaultType: mongoose.Aggregate<any> = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestAggregateInterface<number>>("aggregate", function(next) {
const isSpecificType: PreHookTestAggregateInterface<number> = this;
return Promise.resolve("")
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre("aggregate", true, function(next, done) {
const isDefaultType: mongoose.Aggregate<any> = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestAggregateInterface<number>>("aggregate", true, function(next, done) {
const isSpecificType: PreHookTestAggregateInterface<number> = this;
return Promise.resolve("")
}, err => {})
);
// Model<Document>
preHookTestSchemaArr.push(
schema.pre("insertMany", function(next, docs) {
const isDefaultType: mongoose.Model<mongoose.Document> = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestModelInterface<PreHookTestDocumentInterface>>("insertMany", function(next, docs) {
const isSpecificType: PreHookTestModelInterface<PreHookTestDocumentInterface> = this;
return Promise.resolve("")
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre("insertMany", true, function(next, done, docs) {
const isDefaultType: mongoose.Model<mongoose.Document> = this;
}, err => {})
);
preHookTestSchemaArr.push(
schema.pre<PreHookTestModelInterface<PreHookTestDocumentInterface>>("insertMany", true, function(next, done, docs) {
const isSpecificType: PreHookTestModelInterface<PreHookTestDocumentInterface> = this;
return Promise.resolve("")
}, err => {})
);
schema
.post('save', function (error: mongoose.Error, doc: mongoose.Document, next: (err?: mongoose.NativeError) => void) {
error.stack;
doc.model;
next.apply;
})
.post('save', function (doc: mongoose.Document, next: (err?: mongoose.NativeError) => void) {
doc.model;
next(new Error());
})
.post('save', function (doc: mongoose.Document) {
doc.model;
});
schema.queue('m1', [1, 2, 3]).queue('m2', [[]]);
schema.remove('path');
schema.remove(['path1', 'path2', 'path3']);
schema.requiredPaths(true)[0].toLowerCase();
schema.set('id', true).set('id');
schema.static('static', cb).static({
s1: cb,
s2: cb
});
schema.virtual('virt', {}).applyGetters({}, {});
schema.virtualpath('path').applyGetters({}, {});
/* static properties */
mongoose.Schema.indexTypes[0].toLowerCase();
mongoose.Schema.reserved.hasOwnProperty('');
/* inherited properties */
schema.addListener('e', cb);
/* practical examples */
var animalSchema = new mongoose.Schema({
name: String,
type: String
});
animalSchema.methods.findSimilarTypes = function (cb: any) {
return this.model('Aminal').find({ type: this.type }, cb);
};
var Animal: any = mongoose.model('Animal', animalSchema);
var dog: any = new Animal({type: 'dog'});
dog['findSimilarTypes'](function (err: any, dogs: any) {
console.log(dogs);
});
new mongoose.Schema({
title: String,
author: String,
body: String,
comments: [{ body: String, date: Date }],
date: { type: Date, default: Date.now },
hidden: Boolean,
meta: {
votes: Number,
favs: Number,
text: String
},
meta2: {
text: mongoose.Schema.Types.Number,
select: {
type: String
}
}
});
new mongoose.Schema({ name: { type: String, index: true }});
new mongoose.Schema({ loc: { type: [Number], index: 'hashed' }});
new mongoose.Schema({ loc: { type: [Number], index: '2d', sparse: true }});
new mongoose.Schema({ loc: { type: [Number], index: { type: '2dsphere', sparse: true }}});
new mongoose.Schema({ date: { type: Date, index: { unique: true, expires: '1d' }}});
new mongoose.Schema({ born: { type: Date, required: '{PATH} is required!' }});
new mongoose.Schema({ born: { type: Date, required: function() {
return this.age >= 18;
}}});
new mongoose.Schema({ state: { type: String, enum: ['opening', 'open', 'closing', 'closed'] }});
new mongoose.Schema({ state: { type: String, enum: {
values: ['opening', 'open', 'closing', 'closed'],
message: 'enum validator failed for path `{PATH}` with value `{VALUE}`'
}}});
new mongoose.Schema({ name: { type: String, match: /^a/ }});
new mongoose.Schema({ name: { type: String, match: [
/\.html$/, "That file doesn't end in .html ({VALUE})"
]}});
new mongoose.Schema({
createdAt: {type: Date, expires: 60 * 60 * 24}
});
new mongoose.Schema({ createdAt: { type: Date, expires: '1.5h' }});
new mongoose.Schema({ d: { type: Date, max: new Date('2014-01-01') }});
new mongoose.Schema({ d: { type: Date, max: [
new Date('2014-01-01'),
'The value of path `{PATH}` ({VALUE}) exceeds the limit ({MAX}).'
]}});
new mongoose.Schema({d: {type: Date, min: [
new Date('1970-01-01'),
'The value of path `{PATH}` ({VALUE}) is beneath the limit ({MIN}).'
]}});
new mongoose.Schema({
integerOnly: {
type: Number,
get: (v: number) => Math.round(v),
set: (v: number) => Math.round(v),
validate: {
isAsync: false,
validator: (val: number): boolean => {
return false;
}
}
},
asyncValidated: {
type: Number,
validate: {
isAsync: true,
validator: (val: number, done): void => {
setImmediate(done, true);
}
}
},
promiseValidated: {
type: Number,
validate: {
validator: async (val: number) => {
return val === 2;
}
}
},
});
new mongoose.Schema({ name: { type: String, validate: [
{ validator: () => {return true}, msg: 'uh oh' },
{ validator: () => {return true}, msg: 'failed' }
]}});
animalSchema.statics.findByName = function(name: any, cb: any) {
return this.find({ name: new RegExp(name, 'i') }, cb);
};
Animal['findByName']('fido', function(err: any, animals: any) {
console.log(animals);
});
animalSchema.virtual('name.full').get(function () {
return this.name.first + ' ' + this.name.last;
});
var childSchema = new mongoose.Schema({ name: String });
var parentSchema = new mongoose.Schema({
children: [childSchema],
child: childSchema,
name: {
index: true,
required: true
}
});
new mongoose.Schema({
eggs: {
type: Number,
min: [6, 'Too few eggs'],
max: 12
},
bacon: {
type: Number,
required: [true, 'Why no bacon?']
},
drink: {
type: String,
enum: ['Coffee', 'Tea']
}
});
(new mongoose.Schema({})).plugin<any>(function (schema: mongoose.Schema, options: any) {
schema.add({ lastMod: Date })
schema.pre('save', function (next: Function) {
(this as any).lastMod = new Date
next()
})
if (options && options['index']) {
schema.path('lastMod').index(options['index'])
}
}, { index: true }).plugin<any>(function (schema: mongoose.Schema, options: any) {
schema.add({ lastMod: Date })
schema.pre('save', function (next: Function) {
(this as any).lastMod = new Date
next()
})
if (options && options['index']) {
schema.path('lastMod').index(options['index'])
}
}, {index: true});
new mongoose.Schema({foo: String}, {strict: 'throw'});
export default function(schema: mongoose.Schema) {
schema.pre('init', function(this: mongoose.Document, next: (err?: Error) => void): void {
console.log('success!');
});
}
// plugins
function MyPlugin(schema: mongoose.Schema, opts?: string) {
}
new mongoose.Schema({})
.plugin(MyPlugin)
interface PluginOption {
modelName: string;
timestamp: string;
}
function logger(modelName: string, timestamp: string) {
// call special logger with options
}
function | (schema: mongoose.Schema, options: PluginOption) {
if (options) {
schema.pre('save', function (next: Function) {
logger(options.modelName, options.timestamp)
})
}
}
new mongoose.Schema({})
.plugin<PluginOption>(AwesomeLoggerPlugin, {modelName: 'Executive', timestamp: 'yyyy/MM/dd'})
mongoose.plugin<PluginOption>(AwesomeLoggerPlugin, {modelName: 'Executive', timestamp: 'yyyy/MM/dd'})
/*
* section document.js
* http://mongoosejs.com/docs/api.html#document-js
*/
var doc = <mongoose.MongooseDocument> {};
doc.$isDefault('path').valueOf();
const docDotDepopulate: mongoose.MongooseDocument = doc.depopulate('path');
doc.equals(doc).valueOf();
doc.execPopulate().then(function (arg) {
arg.execPopulate();
}).catch(function (err) {});
doc.get('path', Number);
doc.init(doc).init(doc, {});
doc.inspect();
doc.invalidate('path', new Error('hi'), 999).toString();
doc.isDirectModified('path').valueOf();
doc.isInit('path').valueOf();
doc.isModified('path').valueOf();
doc.isSelected('path').valueOf();
doc.markModified('path');
doc.modifiedPaths()[0].toLowerCase();
doc.populate(function (err, doc) {
doc.populate('path', function (err, doc) {
doc.populate({
path: 'path',
select: 'path',
match: {}
});
});
});
doc.populated('path');
doc.set('path', 999, {}).set({ path: 999 });
doc.toJSON({
getters: true,
virtuals: false
});
doc.toObject({
transform: function (doc, ret, options) {
doc.toObject();
}
});
doc.toString().toLowerCase();
doc.unmarkModified('path');
doc.update(doc, cb).cursor();
doc.update(doc, {
safe: true,
upsert: true
}, cb).cursor();
doc.validate({}, function (err) {});
doc.validate().then(null).catch(null);
doc.validateSync(['path1', 'path2']).stack;
/* practical examples */
var MyModel = mongoose.model('test', new mongoose.Schema({
name: {
type: String,
alias: 'foo',
default: 'Val '
}
}));
doc = new MyModel();
doc.$isDefault('name');
MyModel.findOne().populate('author').exec(function (err, doc) {
if (doc) {
doc.depopulate('author');
}
});
MyModel.replaceOne({foo: 'bar'}, {qux: 'baz'}).where();
MyModel.replaceOne({foo: 'bar'}, {qux: 'baz'}, (err, raw) => {})
MyModel.bulkWrite([{foo:'bar'}]).then(r => {
console.log(r.deletedCount);
});
MyModel.bulkWrite([], (err, res) => {
console.log(res.modifiedCount)
})
doc.populate('path');
doc.populate({path: 'hello'});
doc.populate('path', cb)
doc.populate({path: 'hello'}, cb);
doc.populate(cb);
doc.populate({path: 'hello'}).execPopulate().catch(cb);
doc.update({$inc: {wheels:1}}, { w: 1 }, cb);
const ImageSchema = new mongoose.Schema({
name: {type: String, required: true},
id: {type: Number, unique: true, required: true, index: true},
}, { id: false });
const clonedSchema: mongoose.Schema = new mongoose.Schema().clone();
interface ImageDoc extends mongoose.Document {
name: string,
id: number
}
const ImageModel = mongoose.model<ImageDoc>('image', ImageSchema);
ImageModel.findOne({}, function(err, doc) {
if (doc) {
doc.name;
doc.id;
}
});
/*
* section types/subdocument.js
* http://mongoosejs.com/docs/api.html#types-subdocument-js
*/
// The constructor is private api, but we'll use it to test
var subdocument: mongoose.Types.Subdocument = new mongoose.Types.Subdocument();
subdocument.ownerDocument().errors;
subdocument.remove({}, function (err) {
return 6;
});
/* inherited properties */
subdocument.execPopulate();
/*
* section types/array.js
* http://mongoosejs.com/docs/api.html#types-array-js
*/
var mongooseArray: mongoose.Types.Array<string> = new mongoose.Types.Array<string>();
mongooseArray.$shift().toLowerCase();
mongooseArray.remove().$shift();
mongooseArray.$pop().toLowerCase();
mongooseArray.addToSet('hi', 9, 9, '4')[0].toLowerCase();
mongooseArray.indexOf({name: 'obj'}).toFixed();
mongooseArray.inspect();
mongooseArray.nonAtomicPush(9, 8, 'hi').toFixed();
mongooseArray.pop().toLowerCase();
mongooseArray.pull(5, 4, 'hi').$shift();
mongooseArray.push([]).toFixed();
mongooseArray.set(1, 'hi').$shift();
mongooseArray.shift().toLowerCase();
mongooseArray.sort(function (a, b) {
return a.length - b.length;
}).unshift();
mongooseArray.splice(4, 1).unshift();
mongooseArray.toObject({depopulate: true}).unshift();
mongooseArray.unshift(2, 4, 'hi').toFixed();
/* inherited properties */
mongooseArray.concat();
mongooseArray.length;
/* practical examples */
interface MySubEntity extends mongoose.Types.Subdocument {
property1: string;
property2: string;
}
interface MyEntity extends mongoose.Document {
sub: mongoose.Types.Array<MySubEntity>
}
var myEntity = <MyEntity> {};
var subDocArray = myEntity.sub.filter(sd => {
sd.property1;
sd.property2.toLowerCase();
return true;
});
/*
* section types/documentarray.js
* http://mongoosejs.com/docs/api.html#types-documentarray-js
*/
// The constructor is private api, but we'll use it to test
var documentArray: mongoose.Types.DocumentArray<mongoose.MongooseDocument> =
new mongoose.Types.DocumentArray();
documentArray.create({}).errors;
documentArray.id(new Buffer('hi'));
documentArray.inspect();
documentArray.toObject({}).length;
/* inherited from mongoose.Types.Array */
documentArray.$shift();
/* inherited from Native Array */
documentArray.concat();
/* practical example */
interface MySubEntity1 extends mongoose.Types.Subdocument {
property1: string;
property2: string;
}
interface MyEntity1 extends mongoose.Document {
sub: mongoose.Types.DocumentArray<MySubEntity>
}
var newEnt = <MyEntity1> {};
var newSub: MySubEntity1 = newEnt.sub.create({ property1: "example", property2: "example" });
/*
* section types/buffer.js
* http://mongoosejs.com/docs/api.html#types-buffer-js
*/
var mongooseBuffer: mongoose.Types.Buffer = new mongoose.Types.Buffer('hello');
mongooseBuffer.copy(mongooseBuffer, 1, 2, 3).toFixed();
mongooseBuffer.copy(new Buffer('hi')).toFixed();
mongooseBuffer.equals(new Buffer('hi')).valueOf();
mongooseBuffer.subtype(123);
mongooseBuffer.toObject().value();
mongooseBuffer.write('world', 3, 2, 1).toFixed();
/* inherited properties */
mongooseBuffer.compare(mongooseBuffer);
/* inherited static properties */
mongoose.Types.Buffer.from([1, 2, 3]);
/*
* section types/decimal128.js
* http://mongoosejs.com/docs/api.html#types-decimal128-js
*/
var decimal128: mongoose.Types.Decimal128 = mongoose.Types.Decimal128.fromString('123.45678901234567');
decimal128 = new mongoose.Types.Decimal128(new Buffer('12345'));
/* practical examples */
export interface ILargeValuesSchema extends mongoose.MongooseDocument {
sum: mongoose.Schema.Types.Decimal128;
}
export var LargeValuesSchema = new mongoose.Schema({
sum: {
type: mongoose.Schema.Types.Decimal128,
required: true
}
});
/*
* section types/objectid.js
* http://mongoosejs.com/docs/api.html#types-objectid-js
*/
var objectId: mongoose.Types.ObjectId = mongoose.Types.ObjectId.createFromHexString('0x1234');
objectId = new mongoose.Types.ObjectId(12345);
objectId = mongoose.Types.ObjectId(12345);
objectId.getTimestamp();
/* practical examples */
export interface IManagerSchema extends mongoose.MongooseDocument {
user: mongoose.Schema.Types.ObjectId;
}
export var ManagerSchema = new mongoose.Schema({
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true
}
});
/*
* section types/embedded.js
* http://mongoosejs.com/docs/api.html#types-embedded-js
*/
var embeddedDocument: mongoose.Types.Embedded = new mongoose.Types.Embedded();
embeddedDocument.inspect().hasOwnProperty('');
embeddedDocument.invalidate('hi', new Error('bleh')).valueOf();
embeddedDocument.ownerDocument().execPopulate();
embeddedDocument.parent().execPopulate();
embeddedDocument.parentArray().$shift();
embeddedDocument.remove().invalidate('hi', new Error('hi'));
embeddedDocument.markModified('path');
/* inherited properties */
embeddedDocument.execPopulate();
/*
* section query.js
* http://mongoosejs.com/docs/api.html#query-js
*/
var query = <mongoose.Query<mongoose.MongooseDocument[]>> {};
query.$where('').$where(cb);
query.all(99).all('path', 99);
query.and([{ color: 'green' }, { status: 'ok' }]).and([]);
query.batchSize(100).batchSize(100);
var lowerLeft = [40.73083, -73.99756]
var upperRight = [40.741404, -73.988135]
query.where('loc').within().box(lowerLeft, upperRight)
query.box({ ll : lowerLeft, ur : upperRight }).box({});
var queryModel = mongoose.model('QModel')
query.cast(new queryModel(), {}).hasOwnProperty('');
query.catch(cb).catch(cb);
query.center({}).center({});
query.centerSphere({ center: [50, 50], radius: 10 }).centerSphere('path', {});
query.circle({ center: [50, 50], radius: 10 }).circle('path');
query.collation({ locale: 'en_US', strength: 1 });
query.comment('comment').comment('comment');
query.where({color: 'black'}).count(function (err, count) {
count.toFixed();
}).then(function (res) {
res.toFixed();
}).catch(function (err) {});
query.cursor().close();
query.distinct('field', {}, cb);
query.distinct('field', {});
query.distinct('field', cb);
query.distinct('field');
query.distinct(cb);
query.distinct();
query.elemMatch('comment', {
author: 'autobot',
votes: {$gte: 5}
}).elemMatch('comment', function (elem) {
elem.where('author').equals('autobot');
elem.where('votes').gte(5);
});
query.where('age').equals(49);
query.exec('find', function (err, res) {
res[0].execPopulate();
}).then(function (arg) {
arg[0].execPopulate();
}).catch(cb);
query.where('name').exists().exists('age', false);
query.find({name: 'aa'}, function (err, res) {
res[0].execPopulate();
}).find();
query.findOne(function (err, res) {
res.execPopulate();
}).findOne();
query.findOneAndRemove({name: 'aa'}, {
rawResult: true
}, function (err, doc) {
doc.execPopulate();
}).findOneAndRemove();
query.findOneAndUpdate({name: 'aa'}, {name: 'bb'}, {
});
query.findOneAndUpdate({name: 'aa'}, {name: 'bb'}, {
rawResult: true
}, cb);
query.findOneAndUpdate({name: 'aa'}, {name: 'bb'}, cb);
query.findOneAndUpdate({name: 'aa'}, {name: 'bb'});
query.findOneAndUpdate({}, {}, { upsert: true, new: true });
query.findOneAndUpdate({name: 'bb'}, cb);
query.findOneAndUpdate({name: 'bb'});
query.findOneAndUpdate(cb);
query.findOneAndUpdate().then(function (doc) {
doc.execPopulate();
}).catch(cb);
var polyA = [[[ 10, 20 ], [ 10, 40 ], [ 30, 40 ], [ 30, 20 ]]]
query.where('loc').within().geometry({ type: 'Polygon', coordinates: polyA })
var polyB = [[ 0, 0 ], [ 1, 1 ]]
query.where('loc').within().geometry({ type: 'LineString', coordinates: polyB })
var polyC = [ 0, 0 ]
query.where('loc').within().geometry({ type: 'Point', coordinates: polyC })
query.where('loc').intersects().geometry({ type: 'Point', coordinates: polyC })
query.getQuery();
query.getUpdate();
query.find().where('age').gt(21);
query.find().gt('age', 21);
query.find().where('age').gte(21);
query.find().gte('age', 21);
query.hint({ indexA: 1, indexB: -1}).hint({});
query.in([1, 2, 3]).in('num', [1, 2, 3]);
query.where('path').intersects().geometry({
type: 'LineString'
, coordinates: [[180.0, 11.0], [180, 9.0]]
});
query.where('path').intersects({
type: 'LineString'
, coordinates: [[180.0, 11.0], [180, 9.0]]
});
query.find().lean().exec(function (err: any, docs: any) {
docs[0];
});
query.limit(20).limit(20);
query.find().where('age').lt(21);
query.find().lt('age', 21);
query.find().where('age').lte(21);
query.find().lte('age', 21);
query.maxDistance('path', 21).maxDistance(21);
query.maxscan(100).maxScan(100);
query.maxScan(100).maxScan(100);
query.merge(query).merge({});
query.mod([1, 2]).mod([5, 6]);
query.find().where('age').ne(21);
query.find().ne('age', 21);
query.where('loc').near({ center: [10, 10] });
query.where('loc').near({ center: [10, 10], maxDistance: 5 });
query.where('loc').near({ center: [10, 10], maxDistance: 5, spherical: true });
query.near('loc', { center: [10, 10], maxDistance: 5 });
query.where('loc').nearSphere({ center: [10, 10], maxDistance: 5 });
query.find().where('age').in([20, 21]);
query.find().in('age', [20, 21]);
query.nor([{ color: 'green' }, { status: 'ok' }]).nor([]);
query.or([{ color: 'red' }, { status: 'emergency' }]).or([]);
query.where('loc').within().polygon([10,20], [13, 25], [7,15]);
query.polygon('loc', [10,20], [13, 25], [7,15]);
query.findOne().populate('owner').exec(function (err, kitten) {
kitten.execPopulate();
});
query.find().populate({
path: 'owner'
, select: 'name'
, match: { color: 'black' }
, options: { sort: { name: -1 }}
}).exec(function (err, kittens) {
kittens[0].execPopulate();
});
query.find().populate('owner', 'name', null, {sort: { name: -1 }}).exec(function (err, kittens) {
kittens[0].execPopulate();
});
query.read('primary', []).read('primary');
query.readConcern('majority').readConcern('m');
query.regex(/re/).regex('path', /re/);
query.remove({}, cb);
query.remove({});
query.remove(cb);
query.remove();
query.select('a b');
query.select('-c -d');
query.select({ a: 1, b: 1 });
query.select({ c: 0, d: 0 });
query.select('+path');
query.selected();
query.selectedExclusively();
query.selectedInclusively();
query.setOptions({
tailable: true,
batchSize: true,
lean: false
});
query.setQuery({ age: 5 });
query.size(0).size('age', 0);
query.skip(100).skip(100);
query.slaveOk().slaveOk(false);
query.slice('comments', 5);
query.slice('comments', -5);
query.slice('comments', [10, 5]);
query.where('comments').slice(5);
query.where('comments').slice([-10, 5]);
query.snapshot().snapshot(true);
query.sort({ field: 'asc', test: -1 });
query.sort('field -test');
query.tailable().tailable(false);
query.then(cb).catch(cb);
(new (query.toConstructor())(1, 2, 3)).toConstructor();
query.update({}, doc, {
}, cb);
query.update({}, doc, {
});
query.update({}, doc, cb);
query.update({}, doc);
query.update(doc, cb);
query.update(doc);
query.update(cb);
query.update(true);
query.update();
query.where('age').gte(21).lte(65)
.where('name', /^vonderful/i)
.where('friends').slice(10)
.exec(cb);
query.where('path').within().box({})
query.where('path').within().circle({})
query.where('path').within().geometry({type: 'c', coordinates: []});
query.where('loc').within({ center: [50,50], radius: 10, unique: true, spherical: true });
query.where('loc').within({ box: [[40.73, -73.9], [40.7, -73.988]] });
query.where('loc').within({ polygon: [[],[],[],[]] });
query.where('loc').within([], [], []);
query.where('loc').within([], []);
query.where('loc').within({ type: 'LineString', coordinates: [] });
mongoose.Query.use$geoWithin = false;
/* practical example */
query.
find({
occupation: /host/,
'name.last': 'Ghost',
age: { $gt: 17, $lt: 66 },
likes: { $in: ['vaporizing', 'talking'] }
}).
limit(10).
sort({ occupation: -1 }).
select({ name: 1, occupation: 1 }).
exec(cb).then(cb).catch(cb);
query.
find({ occupation: /host/ }).
where('name.last').equals('Ghost').
where('age').gt(17).lt(66).
where('likes').in(['vaporizing', 'talking']).
limit(10).
sort('-occupation').
select('name occupation').
exec(cb).then(cb).catch(cb);
/*
* section schema/array.js
* http://mongoosejs.com/docs/api.html#schema-array-js
*/
var schemaArray: mongoose.Schema.Types.Array = new mongoose.Schema.Types.Array('key', new mongoose.SchemaType('hi'), {});
schemaArray.checkRequired('hello').valueOf();
/** static properties */
mongoose.Schema.Types.Array.schemaName.toLowerCase();
/** inherited properties */
schemaArray.sparse(true);
/*
* section schema/string.js
* http://mongoosejs.com/docs/api.html#schema-string-js
*/
var MongoDocument = <mongoose.Document> {};
var schemastring: mongoose.Schema.Types.String = new mongoose.Schema.Types.String('hello');
schemastring.checkRequired(234, MongoDocument).valueOf();
schemastring.enum(['hi', 'a', 'b']).enum('hi').enum({});
schemastring.lowercase().lowercase();
schemastring.match(/re/, 'error').match(/re/);
schemastring.maxlength(999, 'error').maxlength(999);
schemastring.minlength(999, 'error').minlength(999);
schemastring.trim().trim();
schemastring.uppercase().uppercase();
/* static properties */
mongoose.Schema.Types.String.schemaName.toLowerCase();
/* inherited properties */
schemastring.sparse(true);
/*
* section schema/documentarray.js
* http://mongoosejs.com/docs/api.html#schema-documentarray-js
*/
var documentarray: mongoose.Schema.Types.DocumentArray = new mongoose.Schema.Types.DocumentArray('key', new mongoose.Schema());
/* static properties */
mongoose.Schema.Types.DocumentArray.schemaName.toLowerCase();
/* inherited properties */
documentarray.sparse(true);
/*
* section schema/number.js
* http://mongoosejs.com/docs/api.html#schema-number-js
*/
var schemanumber: mongoose.Schema.Types.Number = new mongoose.Schema.Types.Number('num', {});
schemanumber.checkRequired(999, MongoDocument).valueOf();
schemanumber.max(999, 'error').max(999);
schemanumber.min(999, 'error').min(999);
/* static properties */
mongoose.Schema.Types.Number.schemaName.toLowerCase();
/* inherited properties */
schemanumber.sparse(true);
/*
* section schema/date.js
* http://mongoosejs.com/docs/api.html#schema-date-js
*/
var schemadate: mongoose.Schema.Types.Date = new mongoose.Schema.Types.Date('99');
schemadate.checkRequired([], MongoDocument).valueOf();
schemadate.expires(99).expires('now');
schemadate.max(new Date(), 'error').max(new Date(''));
schemadate.min(new Date(), 'error').min(new Date(''));
/* static properties */
mongoose.Schema.Types.Date.schemaName.toLowerCase();
/* inherited properties */
schemadate.sparse(true);
/*
* section schema/buffer.js
* http://mongoosejs.com/docs/api.html#schema-buffer-js
*/
var schemabuffer: mongoose.Schema.Types.Buffer = new mongoose.Schema.Types.Buffer('99');
schemabuffer.checkRequired(999, MongoDocument).valueOf();
/* static properties */
mongoose.Schema.Types.Buffer.schemaName.toLowerCase();
/* inherited properties */
schemabuffer.sparse(true);
/*
* section schema/boolean.js
* http://mongoosejs.com/docs/api.html#schema-boolean-js
*/
var schemaboolean: mongoose.Schema.Types.Boolean = new mongoose.Schema.Types.Boolean('99');
schemaboolean.checkRequired(99).valueOf();
/* static properties */
mongoose.Schema.Types.Boolean.schemaName.toLowerCase();
/* inherited properties */
schemaboolean.sparse(true);
/*
* section schema/objectid.js
* http://mongoosejs.com/docs/api.html#schema-objectid-js
*/
var schemaobjectid: mongoose.Schema.Types.ObjectId = new mongoose.Schema.Types.ObjectId('99');
schemaobjectid.auto(true).auto(false);
schemaobjectid.checkRequired(99, MongoDocument).valueOf();
/* static properties */
mongoose.Schema.Types.ObjectId.schemaName.toLowerCase();
/* inherited properties */
schemaobjectid.sparse(true);
/*
* section schema/mixed.js
* http://mongoosejs.com/docs/api.html#schema-mixed-js
*/
var schemamixed: mongoose.Schema.Types.Mixed = new mongoose.Schema.Types.Mixed('99');
/* static properties */
mongoose.Schema.Types.Mixed.schemaName.toLowerCase();
/* inherited properties */
schemamixed.sparse(true);
/*
* section schema/embedded.js
* http://mongoosejs.com/docs/api.html#schema-embedded-js
*/
var schemaembedded: mongoose.Schema.Types.Embedded =
new mongoose.Schema.Types.Embedded(new mongoose.Schema(), '99');
/* inherited properties */
schemaembedded.sparse(true);
/*
* section aggregate.js
* http://mongoosejs.com/docs/api.html#aggregate-js
*/
var aggregate: mongoose.Aggregate<Object[]>;
aggregate = mongoose.model('ex').aggregate([{ $match: { age: { $gte: 21 }}}]);
aggregate = new mongoose.Aggregate<Object[]>();
aggregate = new mongoose.Aggregate<Object[]>({ $project: { a: 1, b: 1 } });
aggregate = new mongoose.Aggregate<Object[]>({ $project: { a: 1, b: 1 } }, { $skip: 5 });
aggregate = new mongoose.Aggregate<Object[]>([{ $project: { a: 1, b: 1 } }, { $skip: 5 }]);
aggregate.addCursorFlag('flag', true).addCursorFlag('', false);
aggregate.allowDiskUse(true).allowDiskUse(false, []);
aggregate.append({ $project: { field: 1 }}, { $limit: 2 });
aggregate.append([{ $match: { daw: 'Logic Audio X' }} ]);
aggregate.collation({ locale: 'en_US', strength: 1 });
aggregate.count('countName');
aggregate.facet({ fieldA: [{ a: 1 }], fieldB: [{ b: 1 }] });
aggregate.cursor({ batchSize: 1000 }).exec().each(cb);
aggregate.exec().then(cb).catch(cb);
aggregate.option({foo: 'bar'}).exec();
const aggregateDotPipeline: any[] = aggregate.pipeline();
aggregate.explain(cb).then(cb).catch(cb);
aggregate.group({ _id: "$department" }).group({ _id: "$department" });
aggregate.limit(10).limit(10);
var lookupOpt = {
from: 'users', localField:
'userId', foreignField: '_id',
as: 'users'
};
aggregate.lookup(lookupOpt).lookup(lookupOpt);
aggregate.match({
department: {$in: [ "sales", "engineering"]}
});
aggregate.model(new (mongoose.model('xx'))()).model(null);
aggregate.near({
near: [40.724, -73.997],
distanceField: "dist.calculated",
maxDistance: 0.008,
query: { type: "public" },
includeLocs: "dist.location",
uniqueDocs: true,
num: 5
});
aggregate.project("a b -_id");
aggregate.project({a: 1, b: 1, _id: 0});
aggregate.project({
newField: '$b.nested'
, plusTen: { $add: ['$val', 10]}
, sub: {
name: '$a'
}
})
aggregate.project({ salary_k: { $divide: [ "$salary", 1000 ]}});
aggregate.read('primaryPreferred').read('pp');
aggregate.replaceRoot("user");
aggregate.replaceRoot({x: {$concat: ['$this', '$that']}});
aggregate.sample(3).sample(3);
aggregate.skip(10).skip(10);
aggregate.sort({ field: 'asc', test: -1 });
aggregate.sort('field -test');
aggregate.then(cb).catch(cb);
aggregate.unwind("tags").unwind('tags');
aggregate.unwind("a", "b", "c").unwind('tag1', 'tag2');
aggregate.unwind(
{
path: "tags",
includeArrayIndex: "idx",
preserveNullAndEmptyArrays: true
})
.unwind({
path: "tags",
includeArrayIndex: "idx",
preserveNullAndEmptyArrays: true
});
aggregate.unwind(
{
path: "a",
includeArrayIndex: "idx",
preserveNullAndEmptyArrays: true
}, {
path: "b",
includeArrayIndex: "idx",
preserveNullAndEmptyArrays: true
}, {
path: "c",
includeArrayIndex: "idx",
preserveNullAndEmptyArrays: true
})
.unwind({
path: "tag1",
includeArrayIndex: "idx",
preserveNullAndEmptyArrays: true
}, {
path: "tag2",
includeArrayIndex: "idx",
preserveNullAndEmptyArrays: true
});
/*
* section schematype.js
* http://mongoosejs.com/docs/api.html#schematype-js
*/
new mongoose.SchemaType('hello', 9, 'hello' );
var STSchema = new mongoose.Schema({
mixed: mongoose.Schema.Types.Mixed
});
var schematype = schema.path('mixed');
schematype.default('default');
STSchema.path('born').get(cb).get(cb);
STSchema.path('name').index(true).index({ unique: true, sparse: true });
schematype.required(true, 'mess').required(true);
schematype.select(true).select(false);
STSchema.path('name').set(cb).set(cb);
schematype.sparse(true).sparse(true);
schematype.text(true).text(true);
schematype.unique(true).unique(true);
schematype.validate(/re/)
.validate({}, 'error')
.validate(cb, 'try', 'tri');
/*
* section promise.js
* http://mongoosejs.com/docs/api.html#promise-js
*/
var mongopromise = new mongoose.Promise();
mongopromise = new mongoose.Promise(function (err: any, arg: any) {
arg.sparse(true);
err.stack;
});
mongopromise = new mongoose.Promise(function (err: any, arg1: any, arg2: any) {
arg1.sparse(true);
arg2.sparse(true);
err.stack;
});
mongopromise.addBack(function (err: any, arg: any) {
err.stack;
arg.sparse(true);
}).addBack(function (err: any, arg1: any, arg2: any) {
err.stack;
arg1.sparse(true);
arg2.sparse(true);
});
mongopromise.addCallback(function (arg: any) {
arg.sparse(true);
}).addCallback(function (arg1: any, arg2: any) {
arg1.sparse(true);
arg2.sparse(true);
});
mongopromise.addErrback(function (err: any) {
err.stack;
}).addErrback(function () {});
mongopromise.catch(function (err: any) {
err.stack;
}).catch(function () {});
mongopromise.end();
mongopromise.error(999).error([]);
mongopromise.on('init', function () {}).on('init', function () {});
mongopromise.reject({}).reject('').reject(new Error('hi'));
mongopromise.resolve(new Error('hi'), {}).resolve();
mongopromise.then(function (arg: any) {
arg.sparse(true);
}, function (err: any) {
err.stack;
}).then(function (arg1: any, arg2: any) {
arg1.sparse(true);
arg2.sparse(true);
});
mongopromise.complete(new mongoose.SchemaType('')).complete(
new mongoose.SchemaType(''),
new mongoose.SchemaType('')
);
/* static properties */
mongoose.Promise.ES6(function (complete: Function, error: Function) {
complete.apply(this);
error.apply(this);
});
/* inherited properties */
mongopromise.chain(mongopromise);
mongoose.Promise.FAILURE;
/* practical example */
mongoose.model('')
.findOne({})
.exec()
.then(function (arg) {
if (arg) {
arg.save;
}
return 1;
}).then(function (num) {
num.toFixed;
return new Promise<string>((resolve, reject) => {
resolve('string');
});
}).then(function (str) {
str.toLowerCase
return (mongoose.model('')).findOne({}).exec();
}).then(function (arg) {
if (arg) {
arg.save;
}
return 1;
}).catch(function (err) {
return 1;
}).then(function (arg) {
arg.toFixed;
return new Promise<{a: string, b: number}>((resolve, reject) => {
resolve({a: 'hi', b: 29});
});
}).then(function (arg) {
arg.a.toLowerCase;
arg.b.toFixed;
});
mongoose.model('').findOne({})
.then(function (arg) {
if (arg) {
arg.save;
}
return 2;
}).then(function (num) {
num.toFixed;
return new Promise<string>((resolve, reject) => {
resolve('str');
});
}).then(function (str) {
str.toLowerCase;
});
mongoose.model('').aggregate([])
.then(function (arg) {
return 2;
}).then(function (num) {
num.toFixed;
return new Promise<string>((resolve, reject) => {
resolve('str');
});
}).then(function (str) {
str.toLowerCase;
});
/* pluggable promise */
(<any>mongoose).Promise = Promise;
require('mongoose').Promise = Promise;
mongoose.Promise.race;
mongoose.Promise.all;
mongoose.model('').findOne()
.exec().then(cb);
/*
* section model.js
* http://mongoosejs.com/docs/api.html#model-js
*/
var MongoModel = mongoose.model('MongoModel', new mongoose.Schema({
name: String,
type: {
type: mongoose.Schema.Types.Mixed,
required: true
}
}), 'myCollection', true);
MongoModel.init().then(cb);
MongoModel.find({}).$where('indexOf("val") !== -1').exec(function (err, docs) {
docs[0].save();
docs[0].__v;
});
MongoModel.findById(999, function (err, doc) {
var handleSave = function(err: Error, product: mongoose.Document) {};
if (!doc) {
return;
}
doc.increment();
doc.save(handleSave).then(cb).catch(cb);
doc.save({ validateBeforeSave: false }, handleSave).then(cb).catch(cb);
doc.save({ safe: true }, handleSave).then(cb).catch(cb);
doc.save({ safe: { w: 2, j: true } }, handleSave).then(cb).catch(cb);
doc.save({ safe: { w: 'majority', wtimeout: 10000 } }, handleSave).then(cb).catch(cb);
// test if Typescript can infer the types of (err, product, numAffected)
doc.save(function(err, product) { product.save(); })
.then(function(p) { p.save() }).catch(cb);
doc.save({ validateBeforeSave: false }, function(err, product) {
product.save();
}).then(function(p) { p.save() }).catch(cb);
});
MongoModel = (new MongoModel()).model('MongoModel');
var mongoModel = new MongoModel();
mongoModel.remove(function (err, product) {
if (err) throw(err);
MongoModel.findById(product._id, function (err, product) {
if (product) {
product.id.toLowerCase();
product.remove();
}
});
});
mongoModel.save().then(function (product) {
product.save().then(cb).catch(cb);
});
MongoModel.aggregate(
[
{ $group: { _id: null, maxBalance: { $max: '$balance' }}},
{ $project: { _id: 0, maxBalance: 1 }}
],
cb
);
MongoModel.aggregate([])
.group({ _id: null, maxBalance: { $max: '$balance' } })
.exec(cb);
MongoModel.count({ type: 'jungle' }, function (err, count) {
count.toFixed();
});
MongoModel.create({
type: 'jelly bean'
}, {
type: 'snickers'
}, cb).then(function (a) {
a.save();
})
MongoModel.create([{ type: 'jelly bean' }, {
type: 'snickers'
}], function (err, candies) {
var jellybean = candies[0];
var snickers = candies[1];
}).then(function (arg) {
arg[0].save();
arg[1].save();
});
MongoModel.distinct('url', { clicks: {$gt: 100}}, function (err, result) {
});
MongoModel.distinct('url').exec(cb);
MongoModel.ensureIndexes({}, cb);
MongoModel.find({ name: 'john', age: { $gte: 18 }});
MongoModel.find({ name: 'john', age: { $gte: 18 }}, function (err, docs) {
docs[0].remove();
docs[1].execPopulate();
});
MongoModel.find({ name: /john/i }, 'name friends', function (err, docs) { })
MongoModel.find({ name: /john/i }, null, { skip: 10 })
MongoModel.find({ name: /john/i }, null, { skip: 10 }, function (err, docs) {});
MongoModel.find({ name: /john/i }, null, { skip: 10 }).exec(function (err, docs) {});
MongoModel.findById(999, function (err, adventure) {});
MongoModel.findById(999).exec(cb);
MongoModel.findById(999, 'name length', function (err, adventure) {
if (adventure) {
adventure.save();
}
});
MongoModel.findById(999, 'name length').exec(cb);
MongoModel.findById(999, '-length').exec(function (err, adventure) {
if (adventure) {
adventure.addListener('click', cb);
}
});
MongoModel.findById(999, 'name', { lean: true }, function (err, doc) {});
MongoModel.findById(999, 'name').lean().exec(function (err, doc) {});
MongoModel.findByIdAndRemove(999, {}, cb);
MongoModel.findByIdAndRemove(999, {});
MongoModel.findByIdAndRemove(999, cb);
MongoModel.findByIdAndRemove(999);
MongoModel.findByIdAndRemove();
MongoModel.findByIdAndUpdate(999, {}, {}, cb);
MongoModel.findByIdAndUpdate(999, {}, {});
MongoModel.findByIdAndUpdate(999, {}, { upsert: true, new: true });
MongoModel.findByIdAndUpdate(999, {}, cb);
MongoModel.findByIdAndUpdate(999, {});
MongoModel.findByIdAndUpdate();
MongoModel.findOne({ type: 'iphone' }, function (err, adventure) {});
MongoModel.findOne({ type: 'iphone' }).exec(function (err, adventure) {});
MongoModel.findOne({ type: 'iphone' }, 'name', function (err, adventure) {});
MongoModel.findOne({ type: 'iphone' }, 'name').exec(function (err, adventure) {});
MongoModel.findOne({ type: 'iphone' }, 'name', { lean: true }, cb);
MongoModel.findOne({ type: 'iphone' }, 'name', { lean: true }).exec(cb);
MongoModel.findOne({ type: 'iphone' }).select('name').lean().exec(cb);
interface ModelUser {
_id: any;
name: string;
abctest: string;
}
MongoModel.findOne({ type: 'iphone' }).select('name').lean().exec()
.then(function(doc: ModelUser) {
doc._id;
doc.name;
doc.abctest;
});
MongoModel.findOneAndRemove({}, {}, cb);
MongoModel.findOneAndRemove({}, {});
MongoModel.findOneAndRemove({}, cb);
MongoModel.findOneAndRemove({});
MongoModel.findOneAndRemove();
MongoModel.findOneAndUpdate({}, {}, {}, cb);
MongoModel.findOneAndUpdate({}, {}, {});
MongoModel.findOneAndUpdate({}, {}, { upsert: true, new: true });
MongoModel.findOneAndUpdate({}, {}, cb);
MongoModel.findOneAndUpdate({}, {});
MongoModel.findOneAndUpdate();
MongoModel.geoSearch({ type : "house" }, {
near: [10, 10], maxDistance: 5
}, function(err, res) {
res[0].remove();
});
MongoModel.hydrate({
_id: '54108337212ffb6d459f854c',
type: 'jelly bean'
}).execPopulate();
MongoModel.insertMany([
{ name: 'Star Wars' },
{ name: 'The Empire Strikes Back' }
], function(error, docs) {});
MongoModel.insertMany({name: 'Star Wars'}, function(error, doc) {});
MongoModel.mapReduce({
map: cb,
reduce: cb
}, function (err, results) {
console.log(results)
}).then(function (model) {
return model.find().where('value').gt(10).exec();
}).then(function (docs) {
console.log(docs);
}).then(null, cb);
MongoModel.findById(999, function (err, user) {
if (!user) {
return;
}
var opts = [
{ path: 'company', match: { x: 1 }, select: 'name' }
, { path: 'notes', options: { limit: 10 }, model: 'override' }
]
MongoModel.populate(user, opts, cb);
MongoModel.populate(user, opts, function (err, user) {
console.log(user);
});
});
MongoModel.find(999, function (err, users) {
var opts = [{ path: 'company', match: { x: 1 }, select: 'name' }]
var promise = MongoModel.populate(users, opts);
promise.then(console.log);
});
MongoModel.populate({
name: 'Indiana Jones',
weapon: 389
}, {
path: 'weapon',
model: 'Weapon'
}, cb);
var users = [{ name: 'Indiana Jones', weapon: 389 }]
users.push({ name: 'Batman', weapon: 8921 })
MongoModel.populate(users, { path: 'weapon' }, function (err, users) {
users.forEach(cb);
});
MongoModel.remove({ title: 'baby born from alien father' }, cb);
MongoModel.remove({_id: '999'}).exec().then(cb).catch(cb);
MongoModel.update({ age: { $gt: 18 } }, { oldEnough: true }, cb);
MongoModel.update({ name: 'Tobi' }, { ferret: true }, { multi: true }, cb);
MongoModel.where('age').gte(21).lte(65).exec(cb);
MongoModel.where('age').gte(21).lte(65).where('name', /^b/i);
new (mongoModel.base.model(''))();
mongoModel.baseModelName && mongoModel.baseModelName.toLowerCase();
mongoModel.collection.$format(99);
mongoModel.collection.initializeOrderedBulkOp;
mongoModel.collection.findOne;
mongoModel.db.openSet('');
mongoModel.discriminators;
mongoModel.modelName.toLowerCase();
MongoModel = mongoModel.base.model('new', mongoModel.schema);
/* inherited properties */
MongoModel.modelName;
mongoModel.modelName;
MongoModel.collection;
mongoModel.collection;
mongoModel._id;
mongoModel.execPopulate();
mongoModel.on('data', cb);
mongoModel.addListener('event', cb);
MongoModel.findOne({ title: /timex/i })
.populate('_creator', 'name')
.exec(function (err, story) {
if (story) {
story.execPopulate();
}
});
MongoModel.find({
id: 999
})
.populate({
path: 'fans',
match: { age: { $gte: 21 }},
select: 'name -_id',
options: { limit: 5 }
})
.exec();
/* practical example */
interface Location extends mongoose.Document {
name: string;
address: string;
rating: number;
facilities: string[];
coords: number[];
openingTimes: any[];
reviews: any[];
};
const locationSchema = new mongoose.Schema({
name: { type: String, required: true },
address: String,
rating: { type: Number, "default": 0, min: 0, max: 5 },
facilities: [String],
coords: { type: [Number], index: "2dsphere" },
openingTimes: [mongoose.Schema.Types.Mixed],
reviews: [mongoose.SchemaTypes.Mixed]
});
var LocModel = mongoose.model<Location>("Location", locationSchema);
LocModel.findById(999)
.select("-reviews -rating")
.exec(function (err, location) {
if (!location) {
return;
}
location.name = 'blah';
location.address = 'blah';
location.reviews.forEach(review => {});
location.facilities.forEach(facility => {
facility.toLowerCase();
});
});
LocModel.find()
.select('-reviews -rating')
.exec(function (err, locations) {
locations.forEach(location => {
location.name = 'blah';
location.address = 'blah';
location.reviews.forEach(review => {});
location.facilities.forEach(facility => {
facility.toLowerCase();
});
});
});
LocModel.find({}).$where('')
.exec(function (err, locations) {
locations[0].name;
locations[1].openingTimes;
});
LocModel.count({})
.exec(function (err, count) {
count.toFixed();
});
LocModel.distinct('')
.select('-review')
.exec(function (err, distinct) {
distinct.concat;
})
.then(cb).catch(cb);
LocModel.findByIdAndRemove()
.exec(function (err, doc) {
if (!doc) {
return;
}
doc.addListener;
doc.openingTimes;
});
LocModel.findByIdAndUpdate()
.select({})
.exec(function (err, location) {
if (location) {
location.reviews;
}
});
LocModel.findOne({}, function (err, doc) {
if (doc) {
doc.openingTimes;
}
});
LocModel.findOneAndRemove()
.exec(function (err, location) {
if (location) {
location.name;
}
});
LocModel.findOneAndUpdate().exec().then(function (arg) {
if (arg) {
arg.openingTimes;
}
});
LocModel.geoSearch({}, {
near: [1, 2],
maxDistance: 22
}, function (err, res) { res[0].openingTimes; });
interface IStatics {
staticMethod2: (a: number) => string;
}
interface MyDocument extends mongoose.Document {
prop: string;
method: () => void;
}
interface MyModel extends mongoose.Model<MyDocument> {
staticProp: string;
staticMethod: () => void;
}
interface ModelStruct {
doc: MyDocument;
model: MyModel;
method1: (callback: (model: MyModel, doc: MyDocument) => void) => MyModel;
}
var modelStruct1 = <ModelStruct> {};
var myModel1: MyModel;
var myDocument1: MyDocument;
modelStruct1.method1(function (myModel1, myDocument1) {
myModel1.staticProp;
myModel1.staticMethod();
myDocument1.prop;
myDocument1.method();
}).staticProp.toLowerCase();
var mySchema = new mongoose.Schema({});
export var Final: MyModel = <MyModel>mongoose.connection.model<MyDocument>('Final', mySchema);
Final.findOne(function (err: any, doc: MyDocument) {
doc.save();
doc.remove();
doc.model('');
});
export var Final2: MyModel = mongoose.model<MyDocument, MyModel>('Final2', mySchema);
Final2.staticMethod();
Final2.staticProp;
var final2 = new Final2();
final2.prop;
final2.method;interface ibase extends mongoose.Document {
username: string;
}
interface extended extends ibase {
email: string;
}
const base: mongoose.Model<ibase> = mongoose.model<ibase>('testfour')
const extended: mongoose.Model<extended> = base.discriminator<extended>('extendedS', schema);
const x = new extended({
username: 'hi', // required in baseSchema
email: 'beddiw', // required in extededSchema
});
new mongoose.Schema({}, {
timestamps: {
createdAt: 'foo',
updatedAt: 'bar'
}
});
new mongoose.Schema({}, {
collation: {
strength: 1,
locale: 'en_US'
}
});
new mongoose.Schema({}, {
toObject: {
versionKey: false
},
toJSON: {
depopulate: true
}
})
const aggregatePrototypeGraphLookup: mongoose.Aggregate<any> = MyModel.aggregate([]).graphLookup({});
const addFieldsAgg: mongoose.Aggregate<any> = aggregatePrototypeGraphLookup.addFields({})
MyModel.findById('foo').then((doc: mongoose.Document) => {
const a: boolean = doc.isDirectSelected('bar');
const b: boolean = doc.isDeleted();
doc.isDeleted(true);
});
MyModel.translateAliases({});
const queryPrototypeError: Error | null = MyModel.findById({}).error();
const queryProrotypeErrorSetUnset: mongoose.Query<any> = MyModel.findById({}).error(null).error(new Error('foo'));
MyModel.createIndexes().then(() => {});
MyModel.createIndexes((err: any): void => {}).then(() => {});
mongoose.connection.createCollection('foo').then(() => {});
mongoose.connection.createCollection('foo', {wtimeout: 5}).then(() => {});
mongoose.connection.createCollection('foo', {wtimeout: 5}, (err: Error, coll): void => {coll.collectionName}).then(() => {});
const db = mongoose.connection;
const User = mongoose.model('User', new mongoose.Schema({ name: String }));
let session: mongoose.ClientSession;
mongoose.connection.createCollection('users').
then(() => db.startSession()).
then(_session => {
session = _session;
session.startTransaction();
User.findOne({ name: 'foo' }).session(session);
session.commitTransaction();
return User.create({ name: 'foo' });
});
const Event = db.model('Event', new mongoose.Schema({ createdAt: Date }), 'Event');
db.createCollection('users').
then(() => db.startSession()).
then(_session => {
session = _session;
return User.create({ name: 'foo' });
}).
then(() => {
session.startTransaction();
return User.findOne({ name: 'foo' }).session(session).exec();
}).
then(() => {
session.commitTransaction();
return User.findOne({ name: 'bar' }).exec();
}).
catch(() => {
session.abortTransaction();
});
db.createCollection('Event').
then(() => db.startSession()).
then(_session => {
session = _session;
session.startTransaction();
return Event.insertMany([
{ createdAt: new Date('2018-06-01') },
{ createdAt: new Date('2018-06-02') },
{ createdAt: new Date('2017-06-01') },
{ createdAt: new Date('2017-05-31') }
], { session: session });
}).
then(() => Event.aggregate([
{
$group: {
_id: {
month: { $month: '$createdAt' },
year: { $year: '$createdAt' }
},
count: { $sum: 1 }
}
},
{ $sort: { count: -1, '_id.year': -1, '_id.month': -1 } }
]).session(session).exec()).
then((res: any) => {
session.commitTransaction();
});
/** https://mongoosejs.com/docs/transactions.html */
const Customer = db.model('Customer', new mongoose.Schema({ name: String }));
db.createCollection('customers').
then(() => db.startSession()).
then(_session => {
session = _session;
// Start a transaction
session.startTransaction();
// This `create()` is part of the transaction because of the `session`
// option.
return Customer.create([{ name: 'Test' }], { session: session });
}).
// Transactions execute in isolation, so unless you pass a `session`
// to `findOne()` you won't see the document until the transaction
// is committed.
then((customer: mongoose.Document[]) => Customer.findOne({ name: 'Test' }).exec()).
// This `findOne()` will return the doc, because passing the `session`
// means this `findOne()` will run as part of the transaction.
then(() => Customer.findOne({ name: 'Test' }).session(session).exec()).
// Once the transaction is committed, the write operation becomes
// visible outside of the transaction.
then(() => session.commitTransaction()).
then(() => Customer.findOne({ name: 'Test' }).exec())
/**
* https://mongoosejs.com/docs/guide.html#writeConcern
*/
new mongoose.Schema({ name: String }, {
writeConcern: {
w: 'majority',
j: true,
wtimeout: 1000
}
});
/* Query helpers: https://mongoosejs.com/docs/guide.html#query-helpers */
interface Animal2 extends mongoose.Document {
name: string;
type: string;
tags: string[];
}
var animal2Schema = new mongoose.Schema({
name: String,
type: String,
tags: { type: [String], index: true } // field level
});
let animal2QueryHelpers = {
byName<Q extends mongoose.DocumentQuery<any, Animal2>>(this: Q, name: string) {
return this.where({ name: new RegExp(name, 'i') });
}
};
animal2Schema.query = animal2QueryHelpers;
var Animal2 = mongoose.model<Animal2, mongoose.Model<Animal2, typeof animal2QueryHelpers>>('Animal', animal2Schema);
Animal2.find().byName('fido').exec(function(err, animals) {
console.log(animals);
});
Animal2.findOne().byName('fido').exec(function(err, animal) {
console.log(animal);
});
| AwesomeLoggerPlugin |
todo.actions.ts | // This file contains the action of the Redux Store
import { Injectable } from '@angular/core';
import { Action } from '@ngrx/store';
import { ToDoItem } from '../models/todo.model';
// This are the string constants of the actions
export const ADD_TODO = '[Todo] ADD_TODO';
export const REMOVE_TODO = '[Todo] REMOVE_TODO';
export const CHANGE_TODO = '[Todo] CHANGE_TODO';
export class AddTodo implements Action {
// This will implement the add item action
readonly type = ADD_TODO;
constructor(public payload: ToDoItem) {}
}
export class RemoveTodo implements Action {
// This implements the remove item action
readonly type = REMOVE_TODO;
constructor(public payload: ToDoItem) {}
}
export class | implements Action {
// This implements the change item action
readonly type = CHANGE_TODO;
constructor(public payload: ToDoItem) {}
}
// Export of all the actions
export type Actions = AddTodo | RemoveTodo | ChangeTodo; | ChangeTodo |
api_op_ListMultipartUploads.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package glacier
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
glaciercust "github.com/aws/aws-sdk-go-v2/service/glacier/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/glacier/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This operation lists in-progress multipart uploads for the specified vault. An
// in-progress multipart upload is a multipart upload that has been initiated by an
// InitiateMultipartUpload request, but has not yet been completed or aborted. The
// list returned in the List Multipart Upload response has no guaranteed order. The
// List Multipart Uploads operation supports pagination. By default, this operation
// returns up to 50 multipart uploads in the response. You should always check the
// response for a marker at which to continue the list; if there are no more items
// the marker is null. To return a list of multipart uploads that begins at a
// specific upload, set the marker request parameter to the value you obtained from
// a previous List Multipart Upload request. You can also limit the number of
// uploads returned in the response by specifying the limit parameter in the
// request. Note the difference between this operation and listing parts
// (ListParts). The List Multipart Uploads operation lists all multipart uploads
// for a vault and does not require a multipart upload ID. The List Parts operation
// requires a multipart upload ID since parts are associated with a single upload.
// An AWS account has full permission to perform all operations (actions). However,
// AWS Identity and Access Management (IAM) users don't have any permissions by
// default. You must grant them explicit permission to perform specific actions.
// For more information, see Access Control Using AWS Identity and Access
// Management (IAM)
// (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
// For conceptual information and the underlying REST API, see Working with
// Archives in Amazon S3 Glacier
// (https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
// and List Multipart Uploads
// (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html)
// in the Amazon Glacier Developer Guide.
func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) {
if params == nil {
params = &ListMultipartUploadsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, addOperationListMultipartUploadsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListMultipartUploadsOutput)
out.ResultMetadata = metadata
return out, nil
}
// Provides options for retrieving list of in-progress multipart uploads for an
// Amazon Glacier vault.
type ListMultipartUploadsInput struct {
// The AccountId value is the AWS account ID of the account that owns the vault.
// You can either specify an AWS account ID or optionally a single '-' (hyphen), in
// which case Amazon S3 Glacier uses the AWS account ID associated with the
// credentials used to sign the request. If you use an account ID, do not include
// any hyphens ('-') in the ID.
//
// This member is required.
AccountId *string
// The name of the vault.
//
// This member is required.
VaultName *string
// Specifies the maximum number of uploads returned in the response body. If this
// value is not specified, the List Uploads operation returns up to 50 uploads.
Limit *int32
// An opaque string used for pagination. This value specifies the upload at which
// the listing of uploads should begin. Get the marker value from a previous List
// Uploads response. You need only include the marker if you are continuing the
// pagination of results started in a previous List Uploads request.
Marker *string
}
// Contains the Amazon S3 Glacier response to your request.
type ListMultipartUploadsOutput struct {
// An opaque string that represents where to continue pagination of the results.
// You use the marker in a new List Multipart Uploads request to obtain more
// uploads in the list. If there are no more uploads, this value is null.
Marker *string
// A list of in-progress multipart uploads.
UploadsList []types.UploadListElement
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpListMultipartUploads{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListMultipartUploads{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = glaciercust.AddTreeHashMiddleware(stack); err != nil {
return err
}
if err = glaciercust.AddGlacierAPIVersionMiddleware(stack, ServiceAPIVersion); err != nil {
return err
}
if err = glaciercust.AddDefaultAccountIDMiddleware(stack, setDefaultAccountID); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// ListMultipartUploadsAPIClient is a client that implements the
// ListMultipartUploads operation.
type ListMultipartUploadsAPIClient interface {
ListMultipartUploads(context.Context, *ListMultipartUploadsInput, ...func(*Options)) (*ListMultipartUploadsOutput, error)
}
var _ ListMultipartUploadsAPIClient = (*Client)(nil)
// ListMultipartUploadsPaginatorOptions is the paginator options for
// ListMultipartUploads
type ListMultipartUploadsPaginatorOptions struct {
// Specifies the maximum number of uploads returned in the response body. If this
// value is not specified, the List Uploads operation returns up to 50 uploads.
Limit int32
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads
type ListMultipartUploadsPaginator struct {
options ListMultipartUploadsPaginatorOptions
client ListMultipartUploadsAPIClient
params *ListMultipartUploadsInput
nextToken *string
firstPage bool
}
// NewListMultipartUploadsPaginator returns a new ListMultipartUploadsPaginator
func NewListMultipartUploadsPaginator(client ListMultipartUploadsAPIClient, params *ListMultipartUploadsInput, optFns ...func(*ListMultipartUploadsPaginatorOptions)) *ListMultipartUploadsPaginator {
if params == nil {
params = &ListMultipartUploadsInput{}
}
options := ListMultipartUploadsPaginatorOptions{}
if params.Limit != nil {
options.Limit = *params.Limit
}
for _, fn := range optFns {
fn(&options)
}
return &ListMultipartUploadsPaginator{
options: options,
client: client,
params: params,
firstPage: true,
}
}
// HasMorePages returns a boolean indicating whether more pages are available
func (p *ListMultipartUploadsPaginator) HasMorePages() bool {
return p.firstPage || p.nextToken != nil
}
// NextPage retrieves the next ListMultipartUploads page.
func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.Marker = p.nextToken
var limit *int32
if p.options.Limit > 0 {
limit = &p.options.Limit
}
params.Limit = limit
result, err := p.client.ListMultipartUploads(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.Marker
if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken |
return result, nil
}
func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "glacier",
OperationName: "ListMultipartUploads",
}
}
| {
p.nextToken = nil
} |
apiRequest.js | function | (method, target, data = null) {
const url = `https://lamp.cse.fau.edu/~cen4010_s21_g01/api-v1.php${target}`
const init = {
method,
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
}
if (['POST', 'PUT', 'PATCH', 'DELETE'].includes(method)) {
init['body'] = JSON.stringify({ data })
}
return fetch(url, init)
}
export default apiRequest
| apiRequest |
InstanceService.ts | import { store, useDispatch } from '../../store'
import { client } from '../../feathers'
import { AlertService } from '../../common/services/AlertService'
import { accessAuthState } from '../../user/services/AuthService'
import { createState, useState } from '@hookstate/core'
import { Instance } from '@xrengine/common/src/interfaces/Instance'
import { InstanceResult } from '@xrengine/common/src/interfaces/InstanceResult'
//State
export const INSTNCE_PAGE_LIMIT = 100
| const state = createState({
instances: [] as Array<Instance>,
skip: 0,
limit: INSTNCE_PAGE_LIMIT,
total: 0,
retrieving: false,
fetched: false,
updateNeeded: true,
lastFetched: Date.now()
})
store.receptors.push((action: InstanceActionType): any => {
state.batch((s) => {
switch (action.type) {
case 'INSTANCES_RETRIEVED':
return s.merge({
instances: action.instanceResult.data,
skip: action.instanceResult.skip,
limit: action.instanceResult.limit,
total: action.instanceResult.total,
retrieving: false,
fetched: true,
updateNeeded: false,
lastFetched: Date.now()
})
case 'INSTANCE_REMOVED_ROW':
s.merge({ updateNeeded: true })
}
}, action.type)
})
export const accessInstanceState = () => state
export const useInstanceState = () => useState(state) as any as typeof state
//Service
export const InstanceService = {
fetchAdminInstances: async (incDec?: 'increment' | 'decrement', search: string | null = null) => {
const dispatch = useDispatch()
{
const skip = accessInstanceState().skip.value
const limit = accessInstanceState().limit.value
const user = accessAuthState().user
try {
if (user.userRole.value === 'admin') {
const instances = await client.service('instance').find({
query: {
$sort: {
createdAt: -1
},
$skip: skip,
$limit: limit,
action: 'admin',
search: search
}
})
dispatch(InstanceAction.instancesRetrievedAction(instances))
}
} catch (err) {
AlertService.dispatchAlertError(err)
}
}
},
removeInstance: async (id: string) => {
const dispatch = useDispatch()
{
const result = await client.service('instance').patch(id, {
ended: true
})
dispatch(InstanceAction.instanceRemovedAction(result))
}
}
}
if (globalThis.process.env['VITE_OFFLINE_MODE'] !== 'true') {
client.service('instance').on('removed', (params) => {
store.dispatch(InstanceAction.instanceRemovedAction(params.instance))
})
}
//Action
export const InstanceAction = {
instancesRetrievedAction: (instanceResult: InstanceResult) => {
return {
type: 'INSTANCES_RETRIEVED' as const,
instanceResult: instanceResult
}
},
instanceRemovedAction: (instance: Instance) => {
return {
type: 'INSTANCE_REMOVED_ROW' as const,
instance: instance
}
}
}
export type InstanceActionType = ReturnType<typeof InstanceAction[keyof typeof InstanceAction]> | |
test_by_address.py | from __future__ import unicode_literals | try:
from unittest import mock
except ImportError:
import mock
try:
from .base import BaseTestCase, BasePlatformTestCase
except (ValueError, ImportError):
from pynextcaller.tests.base import BaseTestCase, BasePlatformTestCase
ADDRESS_JSON_RESULT_EXAMPLE = '''
{
"records": [
{
"id": "97d949a413f4ea8b85e9586e1f2d9a",
"first_name": "Jerry",
"last_name": "Seinfeld",
"name": "Jerry Seinfeld",
"language": "English",
"fraud_threat": "low",
"spoof": "false",
"phone": [
{
"number": "2125558383",
"carrier": "Verizon Wireless",
"line_type": "LAN"
}
],
"address": [
{
"city": "New York",
"extended_zip": "",
"country": "USA",
"line2": "Apt 5a",
"line1": "129 West 81st Street",
"state": "NY",
"zip_code": "10024"
}
],
"email": "[email protected]",
"social_links": [
{
"followers": 1,
"type": "twitter",
"url": "https://twitter.com/nextcaller"
},
{
"type": "facebook",
"url": "https://www.facebook.com/nextcaller"
},
{
"type": "linkedin",
"url": "https://www.linkedin.com/company/next-caller"
}
],
"age": "45-54",
"gender": "Male",
"household_income": "50k-75k",
"marital_status": "Single",
"presence_of_children": "No",
"home_owner_status": "Rent",
"market_value": "350k-500k",
"length_of_residence": "12 Years",
"high_net_worth": "No",
"occupation": "Entertainer",
"education": "Completed College",
"department": "not specified"
}
]
}
'''
WRONG_ADDRESS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
}
WRONG_ADDRESS_ZIP_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '1002',
}
WRONG_ADDRESS_FIELDS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '10024',
'test_field': 'xx',
}
ADDRESS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '10024',
}
class AddressTestCase(BaseTestCase):
def test_address_by_not_full_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_DATA)
def test_address_by_wrong_zip(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_ZIP_DATA)
def test_address_by_wrong_fields(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_FIELDS_DATA)
def test_by_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
res = self.client.get_by_address_name(ADDRESS_DATA)
self.assertTrue(res['records'])
self.assertEqual(res['records'][0]['email'], '[email protected]')
self.assertEqual(res['records'][0]['first_name'], 'Jerry')
self.assertEqual(res['records'][0]['last_name'], 'Seinfeld')
class PlatformAddressTestCase(BasePlatformTestCase):
def test_address_by_not_full_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_DATA, self.platform_username)
def test_address_by_wrong_zip(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_ZIP_DATA, self.platform_username)
def test_address_by_wrong_fields(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_FIELDS_DATA, self.platform_username)
def test_by_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
res = self.client.get_by_address_name(ADDRESS_DATA, self.platform_username)
self.assertTrue(res['records'])
self.assertEqual(res['records'][0]['email'], '[email protected]')
self.assertEqual(res['records'][0]['first_name'], 'Jerry')
self.assertEqual(res['records'][0]['last_name'], 'Seinfeld')
if __name__ == '__main__':
unittest.main() | import unittest |
storage.go | package users
import (
"sync"
"time"
"github.com/onedss/filebrowser/errors"
)
// StorageBackend is the interface to implement for a users storage.
type StorageBackend interface {
GetBy(interface{}) (*User, error)
Gets() ([]*User, error)
Save(u *User) error
Update(u *User, fields ...string) error
DeleteByID(uint) error
DeleteByUsername(string) error
}
type Store interface {
Get(baseScope string, id interface{}) (user *User, err error)
Gets(baseScope string) ([]*User, error)
Update(user *User, fields ...string) error
Save(user *User) error
Delete(id interface{}) error
LastUpdate(id uint) int64
}
// Storage is a users storage.
type Storage struct {
back StorageBackend
updated map[uint]int64
mux sync.RWMutex
}
// NewStorage creates a users storage from a backend.
func NewStorage(back StorageBackend) *Storage |
// Get allows you to get a user by its name or username. The provided
// id must be a string for username lookup or a uint for id lookup. If id
// is neither, a ErrInvalidDataType will be returned.
func (s *Storage) Get(baseScope string, id interface{}) (user *User, err error) {
user, err = s.back.GetBy(id)
if err != nil {
return
}
if err := user.Clean(baseScope); err != nil {
return nil, err
}
return
}
// Gets gets a list of all users.
func (s *Storage) Gets(baseScope string) ([]*User, error) {
users, err := s.back.Gets()
if err != nil {
return nil, err
}
for _, user := range users {
if err := user.Clean(baseScope); err != nil { //nolint:govet
return nil, err
}
}
return users, err
}
// Update updates a user in the database.
func (s *Storage) Update(user *User, fields ...string) error {
err := user.Clean("", fields...)
if err != nil {
return err
}
err = s.back.Update(user, fields...)
if err != nil {
return err
}
s.mux.Lock()
s.updated[user.ID] = time.Now().Unix()
s.mux.Unlock()
return nil
}
// Save saves the user in a storage.
func (s *Storage) Save(user *User) error {
if err := user.Clean(""); err != nil {
return err
}
return s.back.Save(user)
}
// Delete allows you to delete a user by its name or username. The provided
// id must be a string for username lookup or a uint for id lookup. If id
// is neither, a ErrInvalidDataType will be returned.
func (s *Storage) Delete(id interface{}) error {
switch id := id.(type) {
case string:
user, err := s.back.GetBy(id)
if err != nil {
return err
}
if user.ID == 1 {
return errors.ErrRootUserDeletion
}
return s.back.DeleteByUsername(id)
case uint:
if id == 1 {
return errors.ErrRootUserDeletion
}
return s.back.DeleteByID(id)
default:
return errors.ErrInvalidDataType
}
}
// LastUpdate gets the timestamp for the last update of an user.
func (s *Storage) LastUpdate(id uint) int64 {
s.mux.RLock()
defer s.mux.RUnlock()
if val, ok := s.updated[id]; ok {
return val
}
return 0
}
| {
return &Storage{
back: back,
updated: map[uint]int64{},
}
} |
people_extras.py | from django import template
register = template.Library()
def fontawesome(icon_name, size=""):
"""
Generate fontawesome syntax for HTML.
Usage:
{% fontawesome "iconname" %} |
Size values are: lg, 2x, 3x, 4x, 5x
"""
if len(size) > 0:
size = "fa-%s" % size
return '<i class="fa fa-%s %s"></i>' % (icon_name, size)
register.simple_tag(fontawesome) | {% fontawesome "iconname" "size" %} |
013.py | import os
def solve():
filepath = os.path.join(os.path.dirname(__file__), '013_numbers.txt')
with open(filepath) as f:
numbers = [int(x) for x in f]
| return int(str(sum(numbers))[:10])
if __name__ == '__main__':
print(solve()) | |
actions.js | /* -------------------------------------------------------------------------- */
/* Copyright 2002-2021, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
define(function(require) {
var Sunstone = require("sunstone");
var Notifier = require("utils/notifier");
var OpenNebulaSupport = require("opennebula/support");
var SupportUtils = require("./utils/common");
var RESOURCE = "Support";
var TAB_ID = require("./tabId");
var CREATE_DIALOG_ID = require("./form-panels/create/formPanelId");
var UPLOAD_DIALOG_ID = require("./dialogs/upload/dialogId");
var majorVersion = function(version){
var r = 0;
if(version && version.length){
var major = version.substring(0, version.lastIndexOf("."));
if(major && major.length){
r = parseFloat(major);
}
}
return r;
};
var minorVersion = function(version){
var r = 0;
if(version && version.length){
var minor = version.substring(version.lastIndexOf(".")+1);
if(minor && minor.length){
r = parseFloat(minor);
}
}
return r;
};
var _actions = {
"Support.list" : {
type: "list",
call: OpenNebulaSupport.list,
callback: function(req, list, res){
SupportUtils.showSupportList();
$(".support_open_value").text(res.open_requests);
$(".support_pending_value").text(res.pending_requests);
var elements = [];
if(res.REQUEST_POOL.REQUEST){
elements = res.REQUEST_POOL.REQUEST;
}
Sunstone.getDataTable(TAB_ID).updateView(req, elements);
},
error: function(request, error_json) {
if (error_json.error.http_status=="401") {
SupportUtils.stopIntervalRefresh();
}
SupportUtils.showSupportConnect();
}
},
"Support.refresh" : {
type: "custom",
call: function() {
var tab = $("#" + TAB_ID);
if (Sunstone.rightInfoVisible(tab)) {
Sunstone.runAction(RESOURCE+".show", Sunstone.rightInfoResourceId(tab));
} else {
Sunstone.getDataTable(TAB_ID).waitingNodes();
Sunstone.runAction(RESOURCE+".list", {force: true});
}
},
error: function(request, error_json) {
SupportUtils.showSupportConnect();
}
},
"Support.show" : {
type: "single",
call: OpenNebulaSupport.show,
callback: function(request, response) {
//Sunstone.getDataTable(TAB_ID).updateElement(request, response);
if (Sunstone.rightInfoVisible($("#"+TAB_ID))) {
Sunstone.insertPanels(TAB_ID, response);
}
},
error: function(request, error_json) {
SupportUtils.showSupportConnect();
}
}, | type: "create",
call: OpenNebulaSupport.create,
callback: function(request, response){
Sunstone.resetFormPanel(TAB_ID, CREATE_DIALOG_ID);
Sunstone.hideFormPanel(TAB_ID);
Sunstone.runAction("Support.refresh");
},
error: function(request, error_json){
if (error_json.error.http_status=="403") {
Sunstone.hideFormPanelLoading(TAB_ID);
Notifier.notifyError(error_json.error.message);
} else {
Sunstone.hideFormPanel(TAB_ID);
SupportUtils.showSupportConnect();
}
}
},
"Support.create_dialog" : {
type: "custom",
call: function(){
Sunstone.showFormPanel(TAB_ID, CREATE_DIALOG_ID, "create");
}
},
"Support.update" : {
type: "single",
call: OpenNebulaSupport.update,
callback: function(request, response){
Sunstone.runAction("Support.refresh");
Notifier.notifyMessage("Comment added correctly");
},
error: function(request, response){
Sunstone.runAction("Support.refresh");
//Notifier.onError(request, response);
Notifier.notifyError("Comment failed to be added");
}
},
"Support.signout" : {
type: "single",
call: function() {
$.ajax({
url: "support/credentials",
type: "DELETE",
dataType: "text",
success: function(){
SupportUtils.showSupportConnect();
Sunstone.runAction("Support.refresh");
},
error: function(response){
if (response.status=="401") {
Notifier.notifyError("Support credentials are incorrect");
} else {
Notifier.notifyError(response.responseText);
}
}
});
}
},
"Support.upload" : {
type: "single",
call: function() {
var selected_nodes = Sunstone.getDataTable(TAB_ID).elements();
var resource_id = "" + selected_nodes[0];
Sunstone.getDialog(UPLOAD_DIALOG_ID).setParams({requestId: resource_id});
Sunstone.getDialog(UPLOAD_DIALOG_ID).reset();
Sunstone.getDialog(UPLOAD_DIALOG_ID).show();
}
}
};
return _actions;
}); | "Support.create" : { |
utils.py | # -*- coding: utf-8 -*-
import os
import sys
import functools
DATABASE_FILE = "version"
def MakeDirs(dirname):
dirname = os.path.abspath(dirname)
dirname = dirname.replace("\\","/")
dirnames = dirname.split("/")
destdir = ""
destdir = os.path.join(dirnames[0] + "/",dirnames[1])
if not os.path.exists(destdir):
os.mkdir(destdir)
for name in dirnames[2:]:
destdir=os.path.join(destdir,name)
if not os.path.exists(destdir):
os.mkdir(destdir)
def get_relative_name(module_path,path_list = sys.path):
path = os.path.dirname(module_path)
recent_path = ''
while True:
#when route to sys path or root path,such as / or c:\\,skip the circle
if PathsContainPath(path_list,path) or os.path.dirname(path) == path:
recent_path = path
break
path = os.path.dirname(path)
path_name = module_path.replace(recent_path + os.sep,'').split('.')[0]
if os.name == 'nt':
path_name = path_name.replace(os.sep,'/')
parts = path_name.split('/')
if parts[-1] == "__init__":
relative_module_name = '.'.join(parts[0:-1])
is_package = True
else:
relative_module_name = '.'.join(parts)
is_package = False
return relative_module_name,is_package
def strcmp(str1,str2):
i = 0
while i<len(str1) and i<len(str2):
if str1[i] != str2[i]:
if str1[i] == '_':
return 1
elif str2[i] == '_':
return -1
outcome = py_cmp(str1[i],str2[i])
return outcome
i += 1
return py_cmp(len(str1),len(str2))
def CmpMember(x,y):
if strcmp(x.lower() , y.lower()) == 1:
return 1
return -1
def CmpMember2(x,y):
if x.startswith("_") and not y.startswith("_"):
return 1
elif y.startswith("_") and not x.startswith("_"):
return -1
if x.lower() > y.lower():
return 1
return -1
def CompareDatabaseVersion_(new_version,old_version):
new_verions = new_version.split(".")
old_versions = old_version.split(".")
for i,v in enumerate(new_verions):
if i >= len(old_versions):
return 1
if int(v) > int(old_versions[i]):
return 1
return 0
def IsNoneOrEmpty(value):
if value is None:
return True
elif value == "":
return True
return False
def IsPython3():
if sys.version_info[0] >= 3:
return True
return False
def IsPython2():
if sys.version_info[0] == 2:
return True
return False
def ComparePath(path1,path2):
if os.name == 'nt':
path1 = path1.replace("/",os.sep).rstrip(os.sep)
path2 = path2.replace("/",os.sep).rstrip(os.sep)
return path1.lower() == path2.lower()
return path1.rstrip(os.sep) == path2.rstrip(os.sep)
def PathsContainPath(path_list,path):
if os.name == 'nt':
for p in path_list:
if ComparePath(p,path):
return True
return False
return path in path_list
def CalcVersionValue(ver_str="0.0.0"):
"""Calculates a version value from the provided dot-formated string
1) SPECIFICATION: Version value calculation AA.BBB.CCC
- major values: < 1 (i.e 0.0.85 = 0.850)
- minor values: 1 - 999 (i.e 0.1.85 = 1.850)
- micro values: >= 1000 (i.e 1.1.85 = 1001.850)
@keyword ver_str: Version string to calculate value of
"""
ver_str = ''.join([char for char in ver_str
if char.isdigit() or char == '.'])
ver_lvl = ver_str.split(u".")
if len(ver_lvl) < 3:
return 0
major = int(ver_lvl[0]) * 1000
minor = int(ver_lvl[1])
if len(ver_lvl[2]) <= 2:
ver_lvl[2] += u'0'
micro = float(ver_lvl[2]) / 1000
return float(major) + float(minor) + micro
def CompareCommonVersion(new_version,old_version):
'''
比较通用版本号大小,如果新版本号大于旧版本号返回1,否则返回0,返回0才正常,返回1需要更新
'''
def format_version(version_str):
'''
标准化版本字符串,至少包含3个点.如果是类似x.x的版本则转换为x.x.0之类的
'''
if len(version_str.split('.')) == 2:
version_str += ".0"
return version_str
new_version = format_version(new_version)
old_version = format_version(old_version)
if CalcVersionValue(new_version) <= CalcVersionValue(old_version):
return 0
return 1
def py_sorted(iter_obj,cmp_func):
if IsPython2():
sort_obj = sorted(iter_obj, cmp=cmp_func)
elif IsPython3():
sort_obj = sorted(iter_obj, key=functools.cmp_to_key(cmp_func))
return sort_obj
def py3_cmp(l,r):
if r < l:
return 1
if l < r:
return -1
return 0
#python3没有cmp函数,自己实现一个
if IsPython2():
py_cmp = cmp
elif IsPython3():
py_cmp = py3_cmp
def LoadDatabaseVersion(database_location):
with open(os.path.join(database_location,DATABASE_FILE)) as f:
return f.read()
def SaveDatabaseVersion(database_location,new_database_version):
with open(os.path.join(database_location,DATABASE_FILE),"w") as f:
f.write(new_database_version)
def NeedRenewDatabase(database_location,new_database_version):
if not os.path.exists(os.path.join(database_location,DATABASE_FILE)):
return True
old_database_version = LoadDatabaseVersion(database_location)
if 0 == CompareCommonVersion(new_database_version,old_database_version | return False
return True | ):
|
auth_client_adapter.go | // Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapter
import (
"context"
pb "lucastetreault/did-tangaroa/etcdserver/etcdserverpb"
grpc "google.golang.org/grpc"
)
type as2ac struct{ as pb.AuthServer }
func AuthServerToAuthClient(as pb.AuthServer) pb.AuthClient {
return &as2ac{as}
}
func (s *as2ac) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error) {
return s.as.AuthEnable(ctx, in)
}
func (s *as2ac) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error) {
return s.as.AuthDisable(ctx, in)
}
func (s *as2ac) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error) {
return s.as.Authenticate(ctx, in)
}
func (s *as2ac) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error) {
return s.as.RoleAdd(ctx, in)
}
func (s *as2ac) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error) {
return s.as.RoleDelete(ctx, in)
}
func (s *as2ac) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error) {
return s.as.RoleGet(ctx, in)
}
func (s *as2ac) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error) {
return s.as.RoleList(ctx, in)
}
|
func (s *as2ac) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error) {
return s.as.RoleGrantPermission(ctx, in)
}
func (s *as2ac) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error) {
return s.as.UserDelete(ctx, in)
}
func (s *as2ac) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error) {
return s.as.UserAdd(ctx, in)
}
func (s *as2ac) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error) {
return s.as.UserGet(ctx, in)
}
func (s *as2ac) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error) {
return s.as.UserList(ctx, in)
}
func (s *as2ac) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error) {
return s.as.UserGrantRole(ctx, in)
}
func (s *as2ac) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error) {
return s.as.UserRevokeRole(ctx, in)
}
func (s *as2ac) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error) {
return s.as.UserChangePassword(ctx, in)
} | func (s *as2ac) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error) {
return s.as.RoleRevokePermission(ctx, in)
} |
utils.rs | use crate::constants::*;
use crate::errors::*;
use log::*;
use nix::sys::signal::{kill, SIGTERM};
use nix::unistd::Pid;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
#[macro_export]
macro_rules! fatal {
($msg:tt) => {{
error!("{} in file {} line {}", $msg, file!(), line!());
clean();
std::process::exit(1)
}};
}
| pub fn clean() {
debug!("remove pid and socket file");
let _ = remove_file(OUT_DIR.join(PID_FILE));
let _ = remove_file(OUT_DIR.join(SOCKET_FILE));
}
pub fn clean_and_exit() {
clean();
debug!("exit ...");
std::process::exit(0);
}
pub fn remove_file<P: AsRef<Path>>(p: P) -> Result {
if p.as_ref().exists() {
std::fs::remove_file(p).map_err(|e| {
error!("{}", e);
Error::RemoveFileError
})
} else {
Err(Error::FileNotExistError)
}
}
pub fn read_file<P: AsRef<Path>>(p: P) -> Result<String> {
File::open(p)
.map_err(|e| {
error!("open file error {}", e);
Error::OpenFileError
})
.and_then(|mut f: File| {
let mut buf = String::new();
f.read_to_string(&mut buf).map_err(|e| {
error!("read to string error {}", e);
Error::ReadToStringError
})?;
Ok(buf)
})
}
pub fn kill_server() -> Result {
let pid = read_file(OUT_DIR.join(PID_FILE))?
.parse::<i32>()
.map_err(|e| {
error!("{}", e);
Error::ParseError
})?;
kill(Pid::from_raw(pid), SIGTERM).map_err(|e| {
error!("{}", e);
Error::KillError
})?;
clean();
Ok(())
} | |
update_number_status.go | package cloudcallcenter
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// UpdateNumberStatus invokes the cloudcallcenter.UpdateNumberStatus API synchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/updatenumberstatus.html
func (client *Client) UpdateNumberStatus(request *UpdateNumberStatusRequest) (response *UpdateNumberStatusResponse, err error) {
response = CreateUpdateNumberStatusResponse()
err = client.DoAction(request, response)
return
}
// UpdateNumberStatusWithChan invokes the cloudcallcenter.UpdateNumberStatus API asynchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/updatenumberstatus.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) UpdateNumberStatusWithChan(request *UpdateNumberStatusRequest) (<-chan *UpdateNumberStatusResponse, <-chan error) {
responseChan := make(chan *UpdateNumberStatusResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.UpdateNumberStatus(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// UpdateNumberStatusWithCallback invokes the cloudcallcenter.UpdateNumberStatus API asynchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/updatenumberstatus.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) UpdateNumberStatusWithCallback(request *UpdateNumberStatusRequest, callback func(response *UpdateNumberStatusResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *UpdateNumberStatusResponse
var err error
defer close(result)
response, err = client.UpdateNumberStatus(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// UpdateNumberStatusRequest is the request struct for api UpdateNumberStatus
type UpdateNumberStatusRequest struct {
*requests.RpcRequest
NumberStatus *[]UpdateNumberStatusNumberStatus `position:"Query" name:"numberStatus" type:"Repeated"`
}
// UpdateNumberStatusNumberStatus is a repeated param struct in UpdateNumberStatusRequest
type UpdateNumberStatusNumberStatus struct {
Number string `name:"Number"`
Inbound string `name:"Inbound"`
Enable string `name:"Enable"`
RamId string `name:"RamId"`
Tenant string `name:"Tenant"`
}
// UpdateNumberStatusResponse is the response struct for api UpdateNumberStatus
type UpdateNumberStatusResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
HttpStatusCode int `json:"HttpStatusCode" xml:"HttpStatusCode"`
ResultList ResultList `json:"resultList" xml:"resultList"`
}
// CreateUpdateNumberStatusRequest creates a request to invoke UpdateNumberStatus API
func CreateUpdateNumberStatusRequest() (request *UpdateNumberStatusRequest) {
request = &UpdateNumberStatusRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("CloudCallCenter", "2017-07-05", "UpdateNumberStatus", "", "")
request.Method = requests.POST
return
}
// CreateUpdateNumberStatusResponse creates a response to parse from UpdateNumberStatus response
func CreateUpdateNumberStatusResponse() (response *UpdateNumberStatusResponse) {
response = &UpdateNumberStatusResponse{
BaseResponse: &responses.BaseResponse{}, | }
return
} | |
request_utils.py | import requests,base64
def request_download_file_by_url(download_url, file_name):
r = requests.get(download_url)
with open(file_name, 'wb') as f:
f.write(r.content)
def request_get_rss_news(rss_url):
try:
r = requests.get(rss_url) | print('The scraping job failed. See exception: ')
print(e) | # print(r.encoding)
print(r.text)
return print('The scraping job succeeded: ', r.status_code)
except Exception as e: |
modal-datetimepicker.ios.ts | import * as application from "tns-core-modules/application";
import * as frame from "tns-core-modules/ui/frame";
import { Label } from "tns-core-modules/ui/label/";
import { Page } from "tns-core-modules/ui/page";
class ButtonHandler extends NSObject {
public close(nativeButton: UIButton, nativeEvent: _UIEvent) {
picker.close();
}
public chooseDate(nativeButton: UIButton, nativeEvent: _UIEvent) {
picker.chooseDate();
}
public chooseTime(nativeButton: UIButton, nativeEvent: _UIEvent) {
picker.chooseTime();
}
public static ObjCExposedMethods = {
close: {
returns: interop.types.void,
params: [interop.types.id, interop.types.id]
},
chooseDate: {
returns: interop.types.void,
params: [interop.types.id, interop.types.id]
},
chooseTime: {
returns: interop.types.void,
params: [interop.types.id, interop.types.id]
}
};
}
const buttonHandler = ButtonHandler.new();
let myResolve;
export interface PickerOptions {
type?: string;
title?: string;
theme?: string;
maxDate?: Date;
minDate?: Date;
startingDate?: Date;
startingHour?: number;
startingMinute?: number;
cancelLabel?: string;
doneLabel?: string;
}
export interface PickerResponse {
day?: number;
month?: number;
year?: number;
hour?: number;
minute?: number;
}
let window: UIWindow;
let effectView: UIVisualEffectView; // this view blurs the background
let pickerHolderView: UIView; // this is the view that holds the picker
let bottomContentContainer: UIView; // this view holds the picker and the action buttons.
let topContentContainer: UIView; // this is the view the holds the title.
let titleLabel: UILabel;
let minMaxLabel: UILabel;
let datePickerView: UIDatePicker;
export class ModalDatetimepicker {
constructor() {}
public pickDate(options?: PickerOptions) {
if (!options) options = {};
options.type = "date";
return this.show(options);
}
public pickTime(options?: PickerOptions) {
if (!options) options = {};
options.type = "time";
return this.show(options);
}
private show(options: PickerOptions) {
return new Promise((resolve, reject) => {
myResolve = resolve;
if (!options.type) options.type = "date";
if (!options.theme) options.theme = "dark";
if (!options.title) {
if (options.type === "date") {
options.title = "Choose A Date";
} else {
options.title = "Choose A Time";
}
}
let startingDate = new Date();
if (options.type === "date") {
if (
options.startingDate &&
typeof options.startingDate.getMonth !== "function"
) {
reject("startingDate must be a Date.");
}
else if (options.startingDate) {
startingDate = options.startingDate;
}
} else {
if (options.startingHour) {
startingDate.setHours(options.startingHour);
}
if (options.startingMinute) {
startingDate.setMinutes(options.startingMinute);
}
}
if (
options.minDate &&
typeof options.minDate.getMonth !== "function"
) {
reject("minDate must be a Date.");
}
if (
options.maxDate &&
typeof options.maxDate.getMonth !== "function"
) {
reject("maxDate must be a Date.");
}
window = UIApplication.sharedApplication.keyWindow;
let containerBounds = window.bounds;
// blur the background of the application.
effectView = UIVisualEffectView.alloc().init();
effectView.frame = CGRectMake(
containerBounds.origin.x,
containerBounds.origin.y,
containerBounds.size.width,
containerBounds.size.height + 20
);
effectView.autoresizingMask =
UIViewAutoresizing.FlexibleWidth |
UIViewAutoresizing.FlexibleHeight;
window.addSubview(effectView);
window.bringSubviewToFront(effectView);
UIView.animateWithDurationAnimations(0.4, () => {
effectView.effect = UIBlurEffect.effectWithStyle(
options.theme === "light"
? UIBlurEffectStyle.Light
: UIBlurEffectStyle.Dark
);
});
bottomContentContainer = UIView.alloc().init();
bottomContentContainer.frame = CGRectMake(
10,
containerBounds.size.height - 320,
containerBounds.size.width - 20,
310
);
bottomContentContainer.autoresizingMask =
UIViewAutoresizing.FlexibleTopMargin |
UIViewAutoresizing.FlexibleWidth;
bottomContentContainer.autoresizesSubviews = true;
bottomContentContainer.transform = CGAffineTransformMakeTranslation(
0,
320
);
pickerHolderView = UIView.alloc().init();
pickerHolderView.backgroundColor = UIColor.whiteColor;
pickerHolderView.frame = CGRectMake(
0,
0,
containerBounds.size.width - 20,
270
);
pickerHolderView.layer.cornerRadius = 10;
pickerHolderView.layer.masksToBounds = true;
pickerHolderView.autoresizingMask =
UIViewAutoresizing.FlexibleWidth |
UIViewAutoresizing.FlexibleHeight;
pickerHolderView.layer.masksToBounds = false;
pickerHolderView.layer.shadowColor = UIColor.blackColor.CGColor;
pickerHolderView.layer.shadowOffset = CGSizeMake(2.0, 2.0);
pickerHolderView.layer.shadowOpacity = 0.5;
pickerHolderView.layer.shadowRadius = 8;
pickerHolderView.layer.shadowPath = UIBezierPath.bezierPathWithRect(
pickerHolderView.bounds
).CGPath;
let buttonContainer: UIView = UIView.alloc().initWithFrame(
CGRectMake(0, 270, containerBounds.size.width - 20, 40)
);
buttonContainer.autoresizingMask = UIViewAutoresizing.FlexibleWidth;
buttonContainer.autoresizesSubviews = true;
let cancelButton: UIButton = UIButton.buttonWithType(
UIButtonType.System
);
cancelButton.setTitleForState(options.cancelLabel || "Cancel", UIControlState.Normal);
cancelButton.addTargetActionForControlEvents(
buttonHandler,
"close",
UIControlEvents.TouchUpInside
);
cancelButton.frame = CGRectMake(
0,
0,
buttonContainer.bounds.size.width / 2,
40
);
cancelButton.setTitleColorForState(
UIColor.whiteColor,
UIControlState.Normal
);
cancelButton.titleLabel.font = UIFont.systemFontOfSize(18);
cancelButton.autoresizingMask = UIViewAutoresizing.FlexibleWidth;
buttonContainer.addSubview(cancelButton);
buttonContainer.bringSubviewToFront(cancelButton);
let doneButton: UIButton = UIButton.buttonWithType(
UIButtonType.System
);
doneButton.setTitleForState(options.doneLabel || "Done", UIControlState.Normal);
if (options.type === "date") {
doneButton.addTargetActionForControlEvents(
buttonHandler,
"chooseDate",
UIControlEvents.TouchUpInside
);
} else {
doneButton.addTargetActionForControlEvents(
buttonHandler,
"chooseTime",
UIControlEvents.TouchUpInside
);
}
doneButton.frame = CGRectMake(
buttonContainer.bounds.size.width / 2,
0,
buttonContainer.bounds.size.width / 2,
40
);
doneButton.setTitleColorForState(
UIColor.colorWithRedGreenBlueAlpha(0, 153, 255, 1),
UIControlState.Normal
);
doneButton.titleLabel.font = UIFont.boldSystemFontOfSize(18);
doneButton.autoresizingMask = UIViewAutoresizing.FlexibleWidth;
buttonContainer.addSubview(doneButton);
buttonContainer.bringSubviewToFront(doneButton);
bottomContentContainer.addSubview(buttonContainer);
bottomContentContainer.bringSubviewToFront(buttonContainer);
datePickerView = UIDatePicker.alloc().initWithFrame(
CGRectMake(0, 0, containerBounds.size.width - 20, 250)
);
datePickerView.datePickerMode =
options.type === "date"
? UIDatePickerMode.Date
: UIDatePickerMode.Time;
datePickerView.autoresizingMask = UIViewAutoresizing.FlexibleWidth;
datePickerView.date = startingDate;
if (options.minDate) datePickerView.minimumDate = options.minDate;
if (options.maxDate) datePickerView.maximumDate = options.maxDate;
pickerHolderView.addSubview(datePickerView);
pickerHolderView.bringSubviewToFront(datePickerView);
bottomContentContainer.addSubview(pickerHolderView);
bottomContentContainer.bringSubviewToFront(pickerHolderView); | true,
25
);
titleLabel.textAlignment = NSTextAlignment.Center;
titleLabel.frame = CGRectMake(
0,
20,
containerBounds.size.width,
containerBounds.size.height - 360
);
titleLabel.transform = CGAffineTransformMakeScale(0.8, 0.8);
titleLabel.adjustsFontForContentSizeCategory = true;
titleLabel.adjustsFontSizeToFitWidth = true;
titleLabel.layer.masksToBounds = false;
titleLabel.alpha = 0;
titleLabel.autoresizingMask =
UIViewAutoresizing.FlexibleHeight |
UIViewAutoresizing.FlexibleTopMargin |
UIViewAutoresizing.FlexibleWidth;
window.addSubview(titleLabel);
window.bringSubviewToFront(titleLabel);
window.addSubview(bottomContentContainer);
window.bringSubviewToFront(bottomContentContainer);
let animationOptions: UIViewAnimationOptions;
UIView.animateWithDurationDelayOptionsAnimationsCompletion(
0.4,
0,
UIViewAnimationOptions.CurveEaseOut,
() => {
bottomContentContainer.transform = CGAffineTransformMakeTranslation(
0,
0
);
titleLabel.transform = CGAffineTransformMakeScale(1, 1);
titleLabel.alpha = 1;
},
() => {
// console.dir("animation completed");
}
);
});
}
private labelFactory(text, color, shadow, size) {
window = UIApplication.sharedApplication.keyWindow;
let containerBounds = window.bounds;
let label = UILabel.alloc().init();
label.text = text;
label.font = UIFont.boldSystemFontOfSize(size);
label.textColor = color;
if (shadow) {
label.shadowColor = UIColor.colorWithRedGreenBlueAlpha(
0,
0,
0,
0.4
);
label.shadowOffset = CGSizeMake(2.0, 2.0);
label.layer.shadowRadius = 8.0;
label.layer.shadowOpacity = 0.5;
label.layer.masksToBounds = false;
label.layer.shouldRasterize = true;
}
return label;
}
public chooseDate() {
let pickedDate = new Date(datePickerView.date);
let response: PickerResponse = {
day: pickedDate.getDate(),
month: pickedDate.getMonth() + 1,
year: pickedDate.getFullYear()
};
this.close(response);
}
public chooseTime() {
let pickedDate = new Date(datePickerView.date);
let response: PickerResponse = {
hour: pickedDate.getHours(),
minute: pickedDate.getMinutes()
};
this.close(response);
}
public close(response?) {
if (!response) response = false;
UIView.animateWithDurationAnimationsCompletion(
0.3,
() => {
effectView.effect = null;
bottomContentContainer.transform = CGAffineTransformMakeTranslation(
0,
320
);
titleLabel.transform = CGAffineTransformMakeScale(0.8, 0.8);
titleLabel.alpha = 0;
},
() => {
effectView.removeFromSuperview();
bottomContentContainer.removeFromSuperview();
titleLabel.removeFromSuperview();
myResolve(response);
}
);
}
}
let picker = new ModalDatetimepicker(); |
titleLabel = this.labelFactory(
options.title,
UIColor.whiteColor, |
mod.rs | mod export;
mod import;
mod mongo;
mod postgres;
mod stdf;
mod store;
mod telemetry;
use crate::cli::export::SomeExportStrategy;
use crate::cli::export::{ExportParams, ExportStrategy};
use crate::cli::import::ImportStrategy;
use crate::cli::import::SomeImportStrategy;
use crate::cli::store::Store;
use anyhow::{Context, Result};
use std::fs::File;
use std::path::{Path, PathBuf};
use structopt::StructOpt;
use crate::cli::telemetry::TelemetryClient;
use rand::RngCore;
use synth_core::Name;
pub struct Cli {
store: Store,
args: CliArgs,
telemetry: TelemetryClient,
}
fn with_telemetry<F: FnOnce() -> Result<T>, T>(
command: &str,
tel_client: &TelemetryClient,
func: F,
) -> Result<T> {
match func() {
Ok(t) => {
let _ = tel_client.success(command);
Ok(t)
}
Err(e) => {
let _ = tel_client.failed(command);
Err(e)
}
}
}
impl Cli {
/// this is going to get confusing with `init` command
pub fn new(args: CliArgs, version: String, os: String) -> Result<Self> {
Ok(Self {
store: Store::init()?,
args,
telemetry: TelemetryClient::new(version, os),
})
}
fn derive_seed(random: bool, seed: Option<u64>) -> Result<u64> {
if random && seed.is_some() {
return Err(anyhow!(
"Cannot have the --random flag and --seed specified at the same time."
));
}
match random {
true => Ok(rand::thread_rng().next_u64()),
false => Ok(seed.unwrap_or(0)),
}
}
pub async fn | (self) -> Result<()> {
match self.args {
CliArgs::Generate {
ref namespace,
ref collection,
size,
ref to,
seed,
random,
} => with_telemetry("generate", &self.telemetry, || {
self.generate(
namespace.clone(),
collection.clone(),
size,
to.clone(),
Self::derive_seed(random, seed)?,
)
}),
CliArgs::Import {
ref namespace,
ref collection,
ref from,
} => with_telemetry("import", &self.telemetry, || {
self.import(namespace.clone(), collection.clone(), from.clone())
}),
CliArgs::Init {} => with_telemetry("init", &self.telemetry, || self.init()),
CliArgs::Telemetry(telemetry) => {
match telemetry {
TelemetryCommand::Enable => {
with_telemetry("telemetry::enable", &self.telemetry, telemetry::enable)
}
TelemetryCommand::Disable => {
with_telemetry("telemetry::disable", &self.telemetry, || {
telemetry::disable()
})
}
TelemetryCommand::Status => {
if telemetry::is_enabled() {
println!("Telemetry is enabled. To disable it run `synth telemetry disable`.");
} else {
println!(
"Telemetry is disabled. To enable it run `synth telemetry enable`."
);
}
Ok(())
}
}
}
}
}
fn init(&self) -> Result<()> {
match self.workspace_initialised() {
true => {
println!("Workspace already initialised");
std::process::exit(1)
}
false => {
let workspace_dir = ".synth";
let base_path = std::fs::canonicalize(".")?;
let result = std::fs::create_dir(workspace_dir).context(format!(
"Failed to initialize workspace at: {}",
base_path.join(workspace_dir).to_str().unwrap()
));
let config_path = ".synth/config.toml";
match result {
Ok(()) => {
File::create(config_path).context(format!(
"Failed to initialize workspace at: {}",
base_path.join(config_path).to_str().unwrap()
))?;
Ok(())
}
Err(ref e)
if e.downcast_ref::<std::io::Error>().unwrap().kind()
== std::io::ErrorKind::AlreadyExists =>
{
File::create(config_path).context(format!(
"Failed to initialize workspace at: {}",
base_path.join(config_path).to_str().unwrap()
))?;
Ok(())
}
_ => result,
}
}
}
}
fn workspace_initialised(&self) -> bool {
Path::new(".synth/config.toml").exists()
}
fn import(
&self,
path: PathBuf,
collection: Option<Name>,
import_strategy: Option<SomeImportStrategy>,
) -> Result<()> {
if !self.workspace_initialised() {
return Err(anyhow!(
"Workspace has not been initialised. To initialise the workspace run `synth init`."
));
}
if !path.is_relative() {
return Err(anyhow!(
"The namespace path `{}` is absolute. Only paths relative to an initialised workspace root are accepted.",
path.display()
));
}
// TODO: If ns exists and no collection: break
// If collection and ns exists and collection exists: break
if let Some(collection) = collection {
if self.store.collection_exists(&path, &collection) {
return Err(anyhow!(
"The collection `{}` already exists. Will not import into an existing collection.",
Store::relative_collection_path(&path, &collection).display()
));
} else {
let content = import_strategy
.unwrap_or_default()
.import_collection(&collection)?;
self.store
.save_collection_path(&path, collection, content)?;
Ok(())
}
} else if self.store.ns_exists(&path) {
Err(anyhow!(
"The namespace at `{}` already exists. Will not import into an existing namespace.",
path.display()
))
} else {
let ns = import_strategy.unwrap_or_default().import()?;
self.store.save_ns_path(path, ns)?;
Ok(())
}
}
fn generate(
&self,
ns_path: PathBuf,
collection: Option<Name>,
target: usize,
to: Option<SomeExportStrategy>,
seed: u64,
) -> Result<()> {
if !self.workspace_initialised() {
return Err(anyhow!(
"Workspace has not been initialised. To initialise the workspace run `synth init`."
));
}
let namespace = self
.store
.get_ns(ns_path.clone())
.context("Unable to open the namespace")?;
let params = ExportParams {
namespace,
collection_name: collection,
target,
seed,
};
to.unwrap_or_default()
.export(params)
.context(format!("At namespace {:?}", ns_path))
}
}
#[derive(StructOpt)]
#[structopt(name = "synth", about = "synthetic data engine on the command line")]
pub enum CliArgs {
#[structopt(about = "Initialise the workspace")]
Init {},
#[structopt(about = "Generate data from a namespace")]
Generate {
#[structopt(
help = "the namespace directory from which to generate",
parse(from_os_str)
)]
namespace: PathBuf,
#[structopt(long, help = "the specific collection from which to generate")]
collection: Option<Name>,
#[structopt(long, help = "the number of samples", default_value = "1")]
size: usize,
#[structopt(
long,
help = "The sink into which to generate data. Can be a postgres uri, a mongodb uri. If not specified, data will be written to stdout"
)]
to: Option<SomeExportStrategy>,
#[structopt(
long,
help = "an unsigned 64 bit integer seed to be used as a seed for generation"
)]
seed: Option<u64>,
#[structopt(
long,
help = "generation will use a random seed - this cannot be used with --seed"
)]
random: bool,
},
#[structopt(about = "Import data from an external source")]
Import {
#[structopt(
help = "The namespace directory into which to import",
parse(from_os_str)
)]
namespace: PathBuf,
#[structopt(
long,
help = "The name of a collection into which the data will be imported"
)]
collection: Option<Name>,
#[structopt(
long,
help = "The source from which to import data. Can be a postgres uri, a mongodb uri or a path to a JSON file / directory. If not specified, data will be read from stdin"
)]
from: Option<SomeImportStrategy>,
},
#[structopt(about = "Toggle anonymous usage data collection")]
Telemetry(TelemetryCommand),
}
#[derive(StructOpt)]
pub enum TelemetryCommand {
#[structopt(about = "Enable anonymous usage data collection")]
Enable,
#[structopt(about = "Disable anonymous usage data collection")]
Disable,
#[structopt(about = "Check telemetry status")]
Status,
}
#[cfg(test)]
pub mod tests {
use super::*;
#[test]
fn test_derive_seed() {
assert_eq!(Cli::derive_seed(false, None).unwrap(), 0);
assert_eq!(Cli::derive_seed(false, Some(5)).unwrap(), 5);
assert!(Cli::derive_seed(true, Some(5)).is_err());
assert!(Cli::derive_seed(true, None).is_ok());
}
}
| run |
6-pandas_creating_series.py | #!/usr/bin/env python3
import numpy as np
import pandas as pd
def pretty_print(name, to_print):
|
orders = pd.Series(data=[300.50, 60, 123.40, 60, np.nan],
index=['Customer 1', 'Customer 2', 'Customer 3', 'Customer 4', 'Customer 5'])
pretty_print("orders", orders.to_string())
pretty_print("first row of orders", orders.head(n=1))
pretty_print("orders indexes", orders.index)
pretty_print("order types", orders.dtypes)
pretty_print("orders shape", orders.shape)
pretty_print("orders description with types", orders.describe())
pretty_print("orders sorted values", orders.sort_values())
pretty_print("orders counts of values", orders.value_counts())
pretty_print("orders check for null elements", orders.isnull())
| print(f'{name}:')
print(f'{to_print}\n\n') |
authorize_response.go | package fosite
import (
"net/http"
"net/url"
)
// AuthorizeResponse is an implementation of AuthorizeResponder
type AuthorizeResponse struct {
Header http.Header
Query url.Values
Fragment url.Values
code string
}
func | () *AuthorizeResponse {
return &AuthorizeResponse{
Header: http.Header{},
Query: url.Values{},
Fragment: url.Values{},
}
}
func (a *AuthorizeResponse) GetCode() string {
return a.code
}
func (a *AuthorizeResponse) GetHeader() http.Header {
return a.Header
}
func (a *AuthorizeResponse) AddHeader(key, value string) {
a.Header.Add(key, value)
}
func (a *AuthorizeResponse) GetQuery() url.Values {
return a.Query
}
func (a *AuthorizeResponse) GetFragment() url.Values {
return a.Fragment
}
func (a *AuthorizeResponse) AddQuery(key, value string) {
if key == "code" {
a.code = value
}
a.Query.Add(key, value)
}
func (a *AuthorizeResponse) AddFragment(key, value string) {
if key == "code" {
a.code = value
}
a.Fragment.Add(key, value)
}
| NewAuthorizeResponse |
stop_retry_error.go | package retry
import "fmt"
func StopRetryWithError(err error) error {
if err == nil {
return nil
}
return stopRetryError{originError: err}
}
| type stopRetryError struct {
originError error
}
func (err stopRetryError) Error() string {
return fmt.Sprintf("stop retry with error: %s", err.originError)
} | |
appEnum.ts | export const SIDE_BAR_MINI_WIDTH = 58;
export const SIDE_BAR_SHOW_TIT_MINI_WIDTH = 80;
export enum ContentEnum {
// auto width
FULL = 'full',
// fixed width
FIXED = 'fixed',
}
// app current theme
export enum ThemeModeEnum {
LIGHT = 'light-mode',
DARK = 'dark-mode',
SEMI_DARK = 'semi-dark-mode',
}
// menu theme enum
export enum ThemeEnum {
DARK = 'dark',
LIGHT = 'light',
}
/**
* 权限模式
*/
export enum PermissionModeEnum {
// role
ROLE = 'ROLE',
// black
BACK = 'BACK',
}
// Route switching animation
export enum RouterTransitionEnum {
ZOOM_FADE = 'zoom-fade', | ZOOM_OUT = 'zoom-out',
FADE_SIDE = 'fade-slide',
FADE = 'fade',
FADE_BOTTOM = 'fade-bottom',
FADE_SCALE = 'fade-scale',
} | |
references.go | // Copyright 2018 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encode
import (
"github.com/vmware/govmomi/vim25/types"
"github.com/vmware/vic/lib/apiservers/service/models"
)
func AsManagedObjectID(mobid string) string {
moref := new(types.ManagedObjectReference)
ok := moref.FromString(mobid)
if !ok {
return "" // TODO (#6717): Handle? (We probably don't want to let this fail the request, but may want to convey that something unexpected happened.)
}
return moref.Value
}
// common provides an interface for the relevant parts of object.Common
type common interface {
Reference() types.ManagedObjectReference
Name() string
}
func | (object common) models.ManagedObject {
return models.ManagedObject{
Name: object.Name(),
ID: object.Reference().Value,
}
}
| AsManagedObject |
sink_connector.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::collections::HashMap;
use std::fs::OpenOptions;
use std::time::Duration;
use anyhow::{anyhow, bail, Context};
use interchange::avro::get_debezium_transaction_schema;
use mz_avro::types::Value;
use rdkafka::admin::{AdminClient, AdminOptions, NewTopic, ResourceSpecifier, TopicReplication};
use rdkafka::client::DefaultClientContext;
use rdkafka::config::ClientConfig;
use rdkafka::{Message, Offset, TopicPartitionList};
use dataflow_types::{
AvroOcfSinkConnector, AvroOcfSinkConnectorBuilder, KafkaSinkConnector,
KafkaSinkConnectorBuilder, KafkaSinkConsistencyConnector, SinkConnector, SinkConnectorBuilder,
};
use expr::GlobalId;
use ore::collections::CollectionExt;
use rdkafka::consumer::{BaseConsumer, Consumer};
use repr::Timestamp;
use sql::kafka_util;
use crate::error::CoordError;
pub async fn build(
builder: SinkConnectorBuilder,
id: GlobalId,
) -> Result<SinkConnector, CoordError> {
match builder {
SinkConnectorBuilder::Kafka(k) => build_kafka(k, id).await,
SinkConnectorBuilder::AvroOcf(a) => build_avro_ocf(a, id),
}
}
/// Polls a message from a Kafka Source
fn | (
consumer: &mut BaseConsumer,
timeout: Duration,
) -> Result<Option<Vec<u8>>, anyhow::Error> {
if let Some(result) = consumer.poll(timeout) {
match result {
Ok(message) => match message.payload() {
Some(p) => Ok(Some(p.to_vec())),
None => {
bail!("unexpected null payload")
}
},
Err(err) => {
bail!("Failed to process message {}", err)
}
}
} else {
Ok(None)
}
}
// Retrieves the latest committed timestamp from the consistency topic
async fn get_latest_ts(
consistency_topic: &str,
consumer: &mut BaseConsumer,
timeout: Duration,
) -> Result<Option<Timestamp>, anyhow::Error> {
// ensure the consistency topic has exactly one partition
let partitions = kafka_util::get_partitions(&consumer, consistency_topic, timeout)
.with_context(|| {
format!(
"Unable to fetch metadata about consistency topic {}",
consistency_topic
)
})?;
if partitions.len() != 1 {
bail!(
"Consistency topic {} should contain a single partition, but instead contains {} partitions",
consistency_topic, partitions.len(),
);
}
let partition = partitions.into_element();
// Seek to end-1 offset
let mut tps = TopicPartitionList::new();
tps.add_partition(consistency_topic, partition);
tps.set_partition_offset(consistency_topic, partition, Offset::Beginning)?;
consumer.assign(&tps).with_context(|| {
format!(
"Error seeking in consistency topic {}:{}",
consistency_topic, partition
)
})?;
// We scan from the beginning and see if we can find an END record. We have
// to do it like this because Kafka Control Batches mess with offsets. We
// therefore cannot simply take the last offset from the back and expect an
// END message there. With a transactional producer, the OffsetTail(1) will
// not point to an END message but a control message. With aborted
// transactions, there might even be a lot of garbage at the end of the
// topic or in between.
let mut latest_message = None;
while let Some(message) = get_next_message(consumer, timeout)? {
latest_message = Some(message);
}
if latest_message.is_none() {
// fetch watermarks to distinguish between a timeout reading end-1 and an empty topic
match consumer.fetch_watermarks(consistency_topic, 0, timeout) {
Ok((lo, hi)) => {
if hi == 0 {
return Ok(None);
} else {
bail!(
"uninitialized consistency topic {}:{}, lo/hi: {}/{}",
consistency_topic,
partition,
lo,
hi
);
}
}
Err(e) => {
bail!(
"Failed to fetch metadata while reading from consistency topic: {}",
e
);
}
}
}
let latest_message = latest_message.expect("known to exist");
// the latest valid message should be an END message. If not, things have
// gone wrong!
let timestamp = decode_consistency_end_record(&latest_message, consistency_topic)?;
Ok(Some(timestamp))
}
fn decode_consistency_end_record(
bytes: &[u8],
consistency_topic: &str,
) -> Result<Timestamp, anyhow::Error> {
// The first 5 bytes are reserved for the schema id/schema registry information
let mut bytes = &bytes[5..];
let record = mz_avro::from_avro_datum(get_debezium_transaction_schema(), &mut bytes)
.context("Failed to decode consistency topic message")?;
if let Value::Record(r) = record {
let m: HashMap<String, Value> = r.into_iter().collect();
let status = m.get("status");
let id = m.get("id");
match (status, id) {
(Some(Value::String(status)), Some(Value::String(id))) if status == "END" => {
if let Ok(ts) = id.parse::<u64>() {
Ok(Timestamp::from(ts))
} else {
bail!(
"Malformed consistency record, failed to parse timestamp {} in topic {}",
id,
consistency_topic
);
}
}
_ => {
bail!(
"Malformed consistency record in topic {}, expected END with a timestamp but record was {:?}, tried matching {:?} {:?}",
consistency_topic, m, status, id);
}
}
} else {
bail!("Failed to decode consistency topic message, was not a parseable record");
}
}
#[allow(clippy::too_many_arguments)]
async fn register_kafka_topic(
client: &AdminClient<DefaultClientContext>,
topic: &str,
mut partition_count: i32,
mut replication_factor: i32,
ccsr: &ccsr::Client,
value_schema: &str,
key_schema: Option<&str>,
succeed_if_exists: bool,
) -> Result<(Option<i32>, i32), CoordError> {
// if either partition count or replication factor should be defaulted to the broker's config
// (signaled by a value of -1), explicitly poll the broker to discover the defaults.
// Newer versions of Kafka can instead send create topic requests with -1 and have this happen
// behind the scenes, but this is unsupported and will result in errors on pre-2.4 Kafka
if partition_count == -1 || replication_factor == -1 {
let metadata = client
.inner()
.fetch_metadata(None, Duration::from_secs(5))
.with_context(|| {
format!(
"error fetching metadata when creating new topic {} for sink",
topic
)
})?;
if metadata.brokers().len() == 0 {
coord_bail!("zero brokers discovered in metadata request");
}
let broker = metadata.brokers()[0].id();
let configs = client
.describe_configs(
&[ResourceSpecifier::Broker(broker)],
&AdminOptions::new().request_timeout(Some(Duration::from_secs(5))),
)
.await
.with_context(|| {
format!(
"error fetching configuration from broker {} when creating new topic {} for sink",
broker,
topic
)
})?;
if configs.len() != 1 {
coord_bail!(
"error creating topic {} for sink: broker {} returned {} config results, but one was expected",
topic,
broker,
configs.len()
);
}
let config = configs.into_element().map_err(|e| {
anyhow!(
"error reading broker configuration when creating topic {} for sink: {}",
topic,
e
)
})?;
for entry in config.entries {
if entry.name == "num.partitions" && partition_count == -1 {
if let Some(s) = entry.value {
partition_count = s.parse::<i32>().with_context(|| {
format!(
"default partition count {} cannot be parsed into an integer",
s
)
})?;
}
} else if entry.name == "default.replication.factor" && replication_factor == -1 {
if let Some(s) = entry.value {
replication_factor = s.parse::<i32>().with_context(|| {
format!(
"default replication factor {} cannot be parsed into an integer",
s
)
})?;
}
}
}
if partition_count == -1 {
coord_bail!("default was requested for partition_count, but num.partitions was not found in broker config");
}
if replication_factor == -1 {
coord_bail!("default was requested for replication_factor, but default.replication.factor was not found in broker config");
}
}
let res = client
.create_topics(
&[NewTopic::new(
&topic,
partition_count,
TopicReplication::Fixed(replication_factor),
)],
&AdminOptions::new().request_timeout(Some(Duration::from_secs(5))),
)
.await
.with_context(|| format!("error creating new topic {} for sink", topic))?;
if res.len() != 1 {
coord_bail!(
"error creating topic {} for sink: \
kafka topic creation returned {} results, but exactly one result was expected",
topic,
res.len()
);
}
if let Err((_, e)) = res.into_element() {
// if the topic already exists and we reuse_existing, don't fail - instead proceed
// to read the schema
if !(succeed_if_exists && e == rdkafka::types::RDKafkaErrorCode::TopicAlreadyExists) {
coord_bail!("error creating topic {} for sink: {}", topic, e)
}
}
// Publish value schema for the topic.
//
// TODO(benesch): do we need to delete the Kafka topic if publishing the
// schema fails?
let value_schema_id = ccsr
.publish_schema(&format!("{}-value", topic), value_schema)
.await
.context("unable to publish value schema to registry in kafka sink")?;
let key_schema_id = if let Some(key_schema) = key_schema {
Some(
ccsr.publish_schema(&format!("{}-key", topic), key_schema)
.await
.context("unable to publish key schema to registry in kafka sink")?,
)
} else {
None
};
Ok((key_schema_id, value_schema_id))
}
async fn build_kafka(
builder: KafkaSinkConnectorBuilder,
id: GlobalId,
) -> Result<SinkConnector, CoordError> {
let maybe_append_nonce = {
let exactly_once = builder.exactly_once;
let topic_suffix_nonce = builder.topic_suffix_nonce;
move |topic: &str| {
if exactly_once {
topic.to_string()
} else {
format!("{}-{}-{}", topic, id, topic_suffix_nonce)
}
}
};
let topic = maybe_append_nonce(&builder.topic_prefix);
// Create Kafka topic with single partition.
let mut config = ClientConfig::new();
config.set("bootstrap.servers", &builder.broker_addrs.to_string());
for (k, v) in builder.config_options.iter() {
// Explicitly reject the statistics interval option here because its not
// properly supported for this client.
if k != "statistics.interval.ms" {
config.set(k, v);
}
}
let client = config
.create::<AdminClient<_>>()
.context("creating admin client failed")?;
let ccsr = builder.ccsr_config.build()?;
let (key_schema_id, value_schema_id) = register_kafka_topic(
&client,
&topic,
builder.partition_count,
builder.replication_factor,
&ccsr,
&builder.value_schema,
builder.key_schema.as_deref(),
builder.exactly_once,
)
.await
.context("error registering kafka topic for sink")?;
let consistency = if let Some(consistency_value_schema) = builder.consistency_value_schema {
let consistency_topic = maybe_append_nonce(
builder
.consistency_topic_prefix
.as_ref()
.expect("known to exist"),
);
// create consistency topic/schema and retrieve schema id
let (_, consistency_schema_id) = register_kafka_topic(
&client,
&consistency_topic,
1,
builder.replication_factor,
&ccsr,
&consistency_value_schema,
None,
builder.exactly_once,
)
.await
.context("error registering kafka consistency topic for sink")?;
// get latest committed timestamp from consistencty topic
let gate_ts = if builder.exactly_once {
let mut consumer_config = config.clone();
consumer_config
.set("group.id", format!("materialize-bootstrap-{}", topic))
.set("isolation.level", "read_committed")
.set("enable.auto.commit", "false")
.set("auto.offset.reset", "earliest");
let mut consumer = consumer_config
.create::<BaseConsumer>()
.context("creating consumer client failed")?;
get_latest_ts(&consistency_topic, &mut consumer, Duration::from_secs(5))
.await
.context("error restarting from existing kafka consistency topic for sink")?
} else {
None
};
Some(KafkaSinkConsistencyConnector {
topic: consistency_topic,
schema_id: consistency_schema_id,
gate_ts,
})
} else {
None
};
Ok(SinkConnector::Kafka(KafkaSinkConnector {
key_schema_id,
value_schema_id,
topic,
topic_prefix: builder.topic_prefix,
addrs: builder.broker_addrs,
relation_key_indices: builder.relation_key_indices,
key_desc_and_indices: builder.key_desc_and_indices,
value_desc: builder.value_desc,
consistency,
exactly_once: builder.exactly_once,
transitive_source_dependencies: builder.transitive_source_dependencies,
fuel: builder.fuel,
config_options: builder.config_options,
}))
}
fn build_avro_ocf(
builder: AvroOcfSinkConnectorBuilder,
id: GlobalId,
) -> Result<SinkConnector, CoordError> {
let mut name = match builder.path.file_stem() {
None => coord_bail!(
"unable to read file name from path {}",
builder.path.display()
),
Some(stem) => stem.to_owned(),
};
name.push("-");
name.push(id.to_string());
name.push("-");
name.push(builder.file_name_suffix);
if let Some(extension) = builder.path.extension() {
name.push(".");
name.push(extension);
}
let path = builder.path.with_file_name(name);
// Try to create a new sink file
let _ = OpenOptions::new()
.append(true)
.create_new(true)
.open(&path)
.map_err(|e| {
anyhow!(
"unable to create avro ocf sink file {} : {}",
path.display(),
e
)
})?;
Ok(SinkConnector::AvroOcf(AvroOcfSinkConnector {
path,
value_desc: builder.value_desc,
}))
}
| get_next_message |
imagereview.go | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha1
import (
"errors"
internal "github.com/zoetrope/ssa-helper/applyconfigurations/internal"
v1 "github.com/zoetrope/ssa-helper/applyconfigurations/meta/v1"
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// ImageReviewApplyConfiguration represents an declarative configuration of the ImageReview type for use
// with apply.
type ImageReviewApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
Spec *ImageReviewSpecApplyConfiguration `json:"spec,omitempty"`
Status *ImageReviewStatusApplyConfiguration `json:"status,omitempty"`
}
// ImageReview constructs an declarative configuration of the ImageReview type for use with
// apply.
func ImageReview(name string) *ImageReviewApplyConfiguration {
b := &ImageReviewApplyConfiguration{}
b.WithName(name)
b.WithKind("ImageReview")
b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1")
return b
}
// ExtractImageReview extracts the applied configuration owned by fieldManager from
// imageReview. If no managedFields are found in imageReview for fieldManager, a
// ImageReviewApplyConfiguration is returned with only the Name, Namespace (if applicable),
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// imageReview must be a unmodified ImageReview API object that was retrieved from the Kubernetes API.
// ExtractImageReview provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) {
return extractImageReview(imageReview, fieldManager, "")
}
// ExtractImageReviewStatus is the same as ExtractImageReview except
// that it extracts the status subresource applied configuration.
// Experimental!
func ExtractImageReviewStatus(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) {
return extractImageReview(imageReview, fieldManager, "status")
}
func extractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string, subresource string) (*ImageReviewApplyConfiguration, error) {
b := &ImageReviewApplyConfiguration{}
err := managedfields.ExtractInto(imageReview, internal.Parser().Type("io.k8s.api.imagepolicy.v1alpha1.ImageReview"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
b.WithName(imageReview.Name)
b.WithKind("ImageReview")
b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1")
return b, nil
}
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithKind(value string) *ImageReviewApplyConfiguration {
b.Kind = &value
return b
}
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithAPIVersion(value string) *ImageReviewApplyConfiguration {
b.APIVersion = &value
return b
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithName(value string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Name = &value
return b
}
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithGenerateName(value string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.GenerateName = &value
return b
}
// WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithNamespace(value string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Namespace = &value
return b
}
// WithSelfLink sets the SelfLink field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SelfLink field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithSelfLink(value string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.SelfLink = &value
return b
}
// WithUID sets the UID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UID field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithUID(value types.UID) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.UID = &value
return b
}
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithResourceVersion(value string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.ResourceVersion = &value
return b
}
// WithGeneration sets the Generation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithGeneration(value int64) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionTimestamp = &value
return b
}
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionGracePeriodSeconds = &value
return b
}
// WithLabels puts the entries into the Labels field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Labels field,
// overwriting an existing map entries in Labels field with the same key.
func (b *ImageReviewApplyConfiguration) WithLabels(entries map[string]string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Labels == nil && len(entries) > 0 {
b.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Labels[k] = v
}
return b
}
// WithAnnotations puts the entries into the Annotations field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Annotations field,
// overwriting an existing map entries in Annotations field with the same key.
func (b *ImageReviewApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Annotations == nil && len(entries) > 0 {
b.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Annotations[k] = v
}
return b
}
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
func (b *ImageReviewApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
b.OwnerReferences = append(b.OwnerReferences, *values[i])
}
return b
}
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field.
func (b *ImageReviewApplyConfiguration) WithFinalizers(values ...string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
b.Finalizers = append(b.Finalizers, values[i])
}
return b
}
// WithClusterName sets the ClusterName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ClusterName field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithClusterName(value string) *ImageReviewApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.ClusterName = &value
return b
}
func (b *ImageReviewApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
}
}
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithSpec(value *ImageReviewSpecApplyConfiguration) *ImageReviewApplyConfiguration {
b.Spec = value
return b
}
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
func (b *ImageReviewApplyConfiguration) WithStatus(value *ImageReviewStatusApplyConfiguration) *ImageReviewApplyConfiguration {
b.Status = value
return b
}
func (b *ImageReviewApplyConfiguration) Original() client.Object {
return &imagepolicyv1alpha1.ImageReview{}
}
func (b *ImageReviewApplyConfiguration) Extract(obj client.Object, fieldManager string, subresource string) (*ImageReviewApplyConfiguration, error) {
return extractImageReview(obj.(*imagepolicyv1alpha1.ImageReview), fieldManager, subresource)
}
func (b *ImageReviewApplyConfiguration) ObjectKey() (client.ObjectKey, error) {
if b.Name == nil |
return client.ObjectKey{
Name: *b.Name,
}, nil
}
| {
return client.ObjectKey{}, errors.New("The ImageReviewApplyConfiguration name should not be empty.")
} |
_inputs.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'DeploymentPropertiesArgs',
'IdentityArgs',
'ParametersLinkArgs',
'PlanArgs',
'SkuArgs',
'TemplateLinkArgs',
]
@pulumi.input_type
class DeploymentPropertiesArgs:
def __init__(__self__, *,
mode: pulumi.Input['DeploymentMode'],
parameters: Optional[Any] = None,
parameters_link: Optional[pulumi.Input['ParametersLinkArgs']] = None,
template: Optional[Any] = None,
template_link: Optional[pulumi.Input['TemplateLinkArgs']] = None):
"""
Deployment properties.
:param pulumi.Input['DeploymentMode'] mode: The deployment mode.
:param Any parameters: Deployment parameters. It can be a JObject or a well formed JSON string. Use only one of Parameters or ParametersLink.
:param pulumi.Input['ParametersLinkArgs'] parameters_link: The parameters URI. Use only one of Parameters or ParametersLink.
:param Any template: The template content. It can be a JObject or a well formed JSON string. Use only one of Template or TemplateLink.
:param pulumi.Input['TemplateLinkArgs'] template_link: The template URI. Use only one of Template or TemplateLink.
"""
pulumi.set(__self__, "mode", mode)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if parameters_link is not None:
pulumi.set(__self__, "parameters_link", parameters_link)
if template is not None:
pulumi.set(__self__, "template", template)
if template_link is not None:
pulumi.set(__self__, "template_link", template_link)
@property
@pulumi.getter
def mode(self) -> pulumi.Input['DeploymentMode']:
"""
The deployment mode.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: pulumi.Input['DeploymentMode']):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def parameters(self) -> Optional[Any]:
"""
Deployment parameters. It can be a JObject or a well formed JSON string. Use only one of Parameters or ParametersLink.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[Any]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="parametersLink")
def parameters_link(self) -> Optional[pulumi.Input['ParametersLinkArgs']]:
"""
The parameters URI. Use only one of Parameters or ParametersLink.
"""
return pulumi.get(self, "parameters_link")
@parameters_link.setter
def parameters_link(self, value: Optional[pulumi.Input['ParametersLinkArgs']]):
pulumi.set(self, "parameters_link", value)
@property
@pulumi.getter
def template(self) -> Optional[Any]:
"""
The template content. It can be a JObject or a well formed JSON string. Use only one of Template or TemplateLink.
"""
return pulumi.get(self, "template")
@template.setter
def template(self, value: Optional[Any]):
pulumi.set(self, "template", value)
@property
@pulumi.getter(name="templateLink")
def template_link(self) -> Optional[pulumi.Input['TemplateLinkArgs']]:
"""
The template URI. Use only one of Template or TemplateLink.
"""
return pulumi.get(self, "template_link")
@template_link.setter
def template_link(self, value: Optional[pulumi.Input['TemplateLinkArgs']]):
pulumi.set(self, "template_link", value)
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
Identity for the resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ParametersLinkArgs:
def __init__(__self__, *,
uri: pulumi.Input[str],
content_version: Optional[pulumi.Input[str]] = None):
"""
Entity representing the reference to the deployment parameters.
:param pulumi.Input[str] uri: URI referencing the template.
:param pulumi.Input[str] content_version: If included it must match the ContentVersion in the template.
"""
pulumi.set(__self__, "uri", uri)
if content_version is not None:
pulumi.set(__self__, "content_version", content_version)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
URI referencing the template.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter(name="contentVersion")
def content_version(self) -> Optional[pulumi.Input[str]]:
"""
If included it must match the ContentVersion in the template.
"""
return pulumi.get(self, "content_version")
@content_version.setter
def content_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_version", value)
@pulumi.input_type
class PlanArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
promotion_code: Optional[pulumi.Input[str]] = None,
publisher: Optional[pulumi.Input[str]] = None):
"""
Plan for the resource.
:param pulumi.Input[str] name: The plan ID.
:param pulumi.Input[str] product: The offer ID.
:param pulumi.Input[str] promotion_code: The promotion code.
:param pulumi.Input[str] publisher: The publisher ID.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
pulumi.set(__self__, "product", product)
if promotion_code is not None:
pulumi.set(__self__, "promotion_code", promotion_code)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
"""
The offer ID.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="promotionCode")
def promotion_code(self) -> Optional[pulumi.Input[str]]:
"""
The promotion code.
"""
return pulumi.get(self, "promotion_code")
@promotion_code.setter
def promotion_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "promotion_code", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The publisher ID.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input[int]] = None,
family: Optional[pulumi.Input[str]] = None,
model: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
Sku for the resource.
:param pulumi.Input[int] capacity: The sku capacity.
:param pulumi.Input[str] family: The sku family.
:param pulumi.Input[str] model: The sku model.
:param pulumi.Input[str] name: The sku name.
:param pulumi.Input[str] size: The sku size.
:param pulumi.Input[str] tier: The sku tier.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if model is not None:
pulumi.set(__self__, "model", model)
if name is not None:
pulumi.set(__self__, "name", name)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
The sku capacity.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
"""
The sku family.
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def | (self) -> Optional[pulumi.Input[str]]:
"""
The sku model.
"""
return pulumi.get(self, "model")
@model.setter
def model(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The sku size.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
The sku tier.
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class TemplateLinkArgs:
def __init__(__self__, *,
uri: pulumi.Input[str],
content_version: Optional[pulumi.Input[str]] = None):
"""
Entity representing the reference to the template.
:param pulumi.Input[str] uri: URI referencing the template.
:param pulumi.Input[str] content_version: If included it must match the ContentVersion in the template.
"""
pulumi.set(__self__, "uri", uri)
if content_version is not None:
pulumi.set(__self__, "content_version", content_version)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
URI referencing the template.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter(name="contentVersion")
def content_version(self) -> Optional[pulumi.Input[str]]:
"""
If included it must match the ContentVersion in the template.
"""
return pulumi.get(self, "content_version")
@content_version.setter
def content_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_version", value)
| model |
update_runtime_config_test.go | package cmd_test
import (
"errors"
"github.com/cppforlife/go-patch/patch"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/cloudfoundry/bosh-cli/cmd"
fakecmd "github.com/cloudfoundry/bosh-cli/cmd/cmdfakes"
. "github.com/cloudfoundry/bosh-cli/cmd/opts"
boshdir "github.com/cloudfoundry/bosh-cli/director"
fakedir "github.com/cloudfoundry/bosh-cli/director/directorfakes"
boshtpl "github.com/cloudfoundry/bosh-cli/director/template"
fakeui "github.com/cloudfoundry/bosh-cli/ui/fakes"
)
var _ = Describe("UpdateRuntimeConfigCmd", func() {
var (
ui *fakeui.FakeUI
director *fakedir.FakeDirector
releaseUploader *fakecmd.FakeReleaseUploader
command UpdateRuntimeConfigCmd
)
BeforeEach(func() {
ui = &fakeui.FakeUI{}
director = &fakedir.FakeDirector{}
releaseUploader = &fakecmd.FakeReleaseUploader{
UploadReleasesStub: func(bytes []byte) ([]byte, error) { return bytes, nil },
}
command = NewUpdateRuntimeConfigCmd(ui, director, releaseUploader)
})
Describe("Run", func() {
var (
opts UpdateRuntimeConfigOpts
)
BeforeEach(func() {
opts = UpdateRuntimeConfigOpts{
Args: UpdateRuntimeConfigArgs{
RuntimeConfig: FileBytesArg{Bytes: []byte("runtime: config")},
},
Name: "angry-smurf",
}
})
act := func() error { return command.Run(opts) }
It("updates runtime config", func() {
err := act()
Expect(err).ToNot(HaveOccurred())
Expect(director.UpdateRuntimeConfigCallCount()).To(Equal(1))
name, bytes := director.UpdateRuntimeConfigArgsForCall(0)
Expect(name).To(Equal("angry-smurf"))
Expect(bytes).To(Equal([]byte("runtime: config\n")))
})
It("updates templated runtime config", func() {
opts.Args.RuntimeConfig = FileBytesArg{
Bytes: []byte("name1: ((name1))\nname2: ((name2))"),
}
| opts.VarsFiles = []boshtpl.VarsFileArg{
{Vars: boshtpl.StaticVariables(map[string]interface{}{"name1": "val1-from-file"})},
{Vars: boshtpl.StaticVariables(map[string]interface{}{"name2": "val2-from-file"})},
}
opts.OpsFiles = []OpsFileArg{
{
Ops: patch.Ops([]patch.Op{
patch.ReplaceOp{Path: patch.MustNewPointerFromString("/xyz?"), Value: "val"},
}),
},
}
err := act()
Expect(err).ToNot(HaveOccurred())
Expect(director.UpdateRuntimeConfigCallCount()).To(Equal(1))
name, bytes := director.UpdateRuntimeConfigArgsForCall(0)
Expect(name).To(Equal("angry-smurf"))
Expect(bytes).To(Equal([]byte("name1: val1-from-kv\nname2: val2-from-file\nxyz: val\n")))
})
It("uploads releases provided in the manifest after manifest has been interpolated", func() {
opts.Args.RuntimeConfig = FileBytesArg{
Bytes: []byte("before-upload-config: ((key))"),
}
opts.VarKVs = []boshtpl.VarKV{
{Name: "key", Value: "key-val"},
}
releaseUploader.UploadReleasesReturns([]byte("after-upload-config"), nil)
err := act()
Expect(err).ToNot(HaveOccurred())
bytes := releaseUploader.UploadReleasesArgsForCall(0)
Expect(bytes).To(Equal([]byte("before-upload-config: key-val\n")))
Expect(director.UpdateRuntimeConfigCallCount()).To(Equal(1))
name, bytes := director.UpdateRuntimeConfigArgsForCall(0)
Expect(name).To(Equal("angry-smurf"))
Expect(bytes).To(Equal([]byte("after-upload-config")))
})
It("returns error and does not deploy if uploading releases fails", func() {
opts.Args.RuntimeConfig = FileBytesArg{
Bytes: []byte(`
releases:
- name: capi
sha1: capi-sha1
url: https://capi-url
version: 1+capi
`),
}
releaseUploader.UploadReleasesReturns(nil, errors.New("fake-err"))
err := act()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-err"))
Expect(director.UpdateRuntimeConfigCallCount()).To(Equal(0))
})
It("does not update if confirmation is rejected", func() {
ui.AskedConfirmationErr = errors.New("stop")
err := act()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("stop"))
Expect(director.UpdateRuntimeConfigCallCount()).To(Equal(0))
})
It("returns error if updating failed", func() {
director.UpdateRuntimeConfigReturns(errors.New("fake-err"))
err := act()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-err"))
})
It("returns an error if diffing failed", func() {
director.DiffRuntimeConfigReturns(boshdir.ConfigDiff{}, errors.New("Fetching diff result"))
err := act()
Expect(err).To(HaveOccurred())
})
It("gets the diff from the deployment", func() {
diff := [][]interface{}{
[]interface{}{"some line that stayed", nil},
[]interface{}{"some line that was added", "added"},
[]interface{}{"some line that was removed", "removed"},
}
expectedDiff := boshdir.NewConfigDiff(diff)
director.DiffRuntimeConfigReturns(expectedDiff, nil)
err := act()
Expect(err).ToNot(HaveOccurred())
Expect(director.DiffRuntimeConfigCallCount()).To(Equal(1))
Expect(ui.Said).To(ContainElement(" some line that stayed\n"))
Expect(ui.Said).To(ContainElement("+ some line that was added\n"))
Expect(ui.Said).To(ContainElement("- some line that was removed\n"))
})
Context("when NoRedact option is passed", func() {
BeforeEach(func() {
opts = UpdateRuntimeConfigOpts{
Args: UpdateRuntimeConfigArgs{
RuntimeConfig: FileBytesArg{Bytes: []byte("runtime: config")},
},
Name: "angry-smurf",
NoRedact: true,
}
})
It("adds redact to api call", func() {
director.DiffRuntimeConfigReturns(boshdir.NewConfigDiff([][]interface{}{}), nil)
err := act()
Expect(err).ToNot(HaveOccurred())
_, _, noRedact := director.DiffRuntimeConfigArgsForCall(0)
Expect(noRedact).To(Equal(true))
})
})
})
}) | opts.VarKVs = []boshtpl.VarKV{
{Name: "name1", Value: "val1-from-kv"},
}
|
p_header.go | package backends
import (
"github.com/artpar/go-guerrilla/mail"
"strings"
"time"
)
type HeaderConfig struct {
PrimaryHost string `json:"primary_mail_host"`
}
// ----------------------------------------------------------------------------------
// Processor Name: header
// ----------------------------------------------------------------------------------
// Description : Adds delivery information headers to e.DeliveryHeader
// ----------------------------------------------------------------------------------
// Config Options: none
// --------------:-------------------------------------------------------------------
// Input : e.Helo
// : e.RemoteAddress
// : e.RcptTo
// : e.Hashes
// ----------------------------------------------------------------------------------
// Output : Sets e.DeliveryHeader with additional delivery info
// ----------------------------------------------------------------------------------
func init() {
processors["header"] = func() Decorator {
return Header()
}
}
// Generate the MTA delivery header
// Sets e.DeliveryHeader part of the envelope with the generated header
func Header() Decorator {
var config *HeaderConfig
Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
configType := BaseConfig(&HeaderConfig{})
bcfg, err := Svc.ExtractConfig(backendConfig, configType)
if err != nil {
return err
}
config = bcfg.(*HeaderConfig)
return nil
}))
return func(p Processor) Processor {
return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
if task == TaskSaveMail | else {
return p.Process(e, task)
}
})
}
}
| {
to := strings.TrimSpace(e.RcptTo[0].User) + "@" + config.PrimaryHost
hash := "unknown"
if len(e.Hashes) > 0 {
hash = e.Hashes[0]
}
protocol := "SMTP"
if e.ESMTP {
protocol = "E" + protocol
}
if e.TLS {
protocol = protocol + "S"
}
var addHead string
addHead += "Delivered-To: " + to + "\n"
addHead += "Received: from " + e.RemoteIP + " ([" + e.RemoteIP + "])\n"
if len(e.RcptTo) > 0 {
addHead += " by " + e.RcptTo[0].Host + " with " + protocol + " id " + hash + "@" + e.RcptTo[0].Host + ";\n"
}
addHead += " " + time.Now().Format(time.RFC1123Z) + "\n"
// save the result
e.DeliveryHeader = addHead
// next processor
return p.Process(e, task)
} |
merge.js |
var isScalar = require('is-scalar');
module.exports = merge;
// merge nextValue into origValue
// strictly same object and array
function merge(origValue, nextValue) {
if(isScalar(nextValue)) return nextValue;
if(isArray(nextValue)) {
if(isArray(origValue)) {
origValue.splice.apply(origValue, [0, origValue.length].concat(nextValue));
return origValue;
}
return nextValue.slice(0);
}
if(isScalar(origValue)) return nextValue;
if(isArray(origValue)) return nextValue;
Object.keys(nextValue).forEach(function(key){
origValue[key] = merge(origValue[key], nextValue[key]);
});
return origValue;
}
function | (val) {
return typeof val === 'array' || val instanceof Array;
}
| isArray |
rpc_client.rs | //! Communication with a Solana node over RPC.
//!
//! Software that interacts with the Solana blockchain, whether querying its
//! state or submitting transactions, communicates with a Solana node over
//! [JSON-RPC], using the [`RpcClient`] type.
//!
//! [JSON-RPC]: https://www.jsonrpc.org/specification
#[allow(deprecated)]
use crate::rpc_deprecated_config::{
RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig,
RpcGetConfirmedSignaturesForAddress2Config,
};
use {
crate::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
rpc_config::RpcAccountInfoConfig,
rpc_config::*,
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter},
rpc_response::*,
rpc_sender::RpcSender,
},
bincode::serialize,
indicatif::{ProgressBar, ProgressStyle},
log::*,
serde_json::{json, Value},
solana_account_decoder::{
parse_token::{TokenAccountType, UiTokenAccount, UiTokenAmount},
UiAccount, UiAccountData, UiAccountEncoding,
},
solana_sdk::{
account::Account,
clock::{Epoch, Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT, MAX_HASH_AGE_IN_SECONDS},
commitment_config::{CommitmentConfig, CommitmentLevel},
epoch_info::EpochInfo,
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, uses_durable_nonce, Transaction},
},
solana_transaction_status::{
EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionStatus, UiConfirmedBlock,
UiTransactionEncoding,
},
solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
std::{
cmp::min,
net::SocketAddr,
str::FromStr,
sync::RwLock,
thread::sleep,
time::{Duration, Instant},
},
};
#[derive(Default)]
pub struct RpcClientConfig {
commitment_config: CommitmentConfig,
confirm_transaction_initial_timeout: Option<Duration>,
}
impl RpcClientConfig {
fn with_commitment(commitment_config: CommitmentConfig) -> Self {
RpcClientConfig {
commitment_config,
..Self::default()
}
}
}
/// A client of a remote Solana node.
///
/// `RpcClient` communicates with a Solana node over [JSON-RPC], with the
/// [Solana JSON-RPC protocol][jsonprot]. It is the primary Rust interface for
/// querying and transacting with the network from external programs.
///
/// `RpcClient`s generally communicate over HTTP on port 8899, a typical server
/// URL being "http://localhost:8899".
///
/// By default, requests to confirm transactions are only completed once those
/// transactions are finalized, meaning they are definitely permanently
/// committed. Transactions can be confirmed with less finality by creating
/// `RpcClient` with an explicit [`CommitmentConfig`], or by calling the various
/// `_with_commitment` methods, like
/// [`RpcClient::confirm_transaction_with_commitment`].
///
/// Requests may timeout, in which case they return a [`ClientError`] where the
/// [`ClientErrorKind`] is [`ClientErrorKind::Reqwest`], and where the interior
/// [`reqwest::Error`](crate::client_error::reqwest::Error)s
/// [`is_timeout`](crate::client_error::reqwest::Error::is_timeout) method
/// returns `true`. The default timeout is 30 seconds, and may be changed by
/// calling an appropriate constructor with a `timeout` parameter.
///
/// `RpcClient` encapsulates an [`RpcSender`], which implements the underlying
/// RPC protocol. On top of `RpcSender` it adds methods for common tasks, while
/// re-exposing the underlying RPC sending functionality through the
/// [`send`][RpcClient::send] method.
///
/// [jsonprot]: https://docs.solana.com/developing/clients/jsonrpc-api
/// [JSON-RPC]: https://www.jsonrpc.org/specification
///
/// While `RpcClient` encapsulates an abstract `RpcSender`, it is most commonly
/// created with an [`HttpSender`], communicating over HTTP, usually on port
/// 8899. It can also be created with [`MockSender`] during testing.
pub struct RpcClient {
sender: Box<dyn RpcSender + Send + Sync + 'static>,
config: RpcClientConfig,
node_version: RwLock<Option<semver::Version>>,
}
impl RpcClient {
/// Create an `RpcClient` from an [`RpcSender`] and an [`RpcClientConfig`].
///
/// This is the basic constructor, allowing construction with any type of
/// `RpcSender`. Most applications should use one of the other constructors,
/// such as [`new`] and [`new_mock`], which create an `RpcClient`
/// encapsulating an [`HttpSender`] and [`MockSender`] respectively.
fn new_sender<T: RpcSender + Send + Sync + 'static>(
sender: T,
config: RpcClientConfig,
) -> Self {
Self {
sender: Box::new(sender),
node_version: RwLock::new(None),
config,
}
}
/// Create an HTTP `RpcClient`.
///
/// The URL is an HTTP URL, usually for port 8899, as in
/// "http://localhost:8899".
///
/// The client has a default timeout of 30 seconds, and a default commitment
/// level of [`Finalized`](CommitmentLevel::Finalized).
///
/// # Examples
///
/// ```
/// # use solana_client::rpc_client::RpcClient;
/// let url = "http://localhost:8899".to_string();
/// let client = RpcClient::new(url);
/// ```
pub fn new(url: String) -> Self {
Self::new_with_commitment(url, CommitmentConfig::default())
}
/// Create an HTTP `RpcClient` with specified commitment level.
///
/// The URL is an HTTP URL, usually for port 8899, as in
/// "http://localhost:8899".
///
/// The client has a default timeout of 30 seconds, and a user-specified
/// [`CommitmentLevel`] via [`CommitmentConfig`].
///
/// # Examples
///
/// ```
/// # use solana_sdk::commitment_config::CommitmentConfig;
/// # use solana_client::rpc_client::RpcClient;
/// let url = "http://localhost:8899".to_string();
/// let commitment_config = CommitmentConfig::processed();
/// let client = RpcClient::new_with_commitment(url, commitment_config);
/// ```
pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self {
Self::new_sender(
HttpSender::new(url),
RpcClientConfig::with_commitment(commitment_config),
)
}
/// Create an HTTP `RpcClient` with specified timeout.
///
/// The URL is an HTTP URL, usually for port 8899, as in
/// "http://localhost:8899".
///
/// The client has and a default commitment level of
/// [`Finalized`](CommitmentLevel::Finalized).
///
/// # Examples
///
/// ```
/// # use std::time::Duration;
/// # use solana_client::rpc_client::RpcClient;
/// let url = "http://localhost::8899".to_string();
/// let timeout = Duration::from_secs(1);
/// let client = RpcClient::new_with_timeout(url, timeout);
/// ```
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
Self::new_sender(
HttpSender::new_with_timeout(url, timeout),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
)
}
/// Create an HTTP `RpcClient` with specified timeout and commitment level.
///
/// The URL is an HTTP URL, usually for port 8899, as in
/// "http://localhost:8899".
///
/// # Examples
///
/// ```
/// # use std::time::Duration;
/// # use solana_client::rpc_client::RpcClient;
/// # use solana_sdk::commitment_config::CommitmentConfig;
/// let url = "http://localhost::8899".to_string();
/// let timeout = Duration::from_secs(1);
/// let commitment_config = CommitmentConfig::processed();
/// let client = RpcClient::new_with_timeout_and_commitment(
/// url,
/// timeout,
/// commitment_config,
/// );
/// ```
pub fn new_with_timeout_and_commitment(
url: String,
timeout: Duration,
commitment_config: CommitmentConfig,
) -> Self {
Self::new_sender(
HttpSender::new_with_timeout(url, timeout),
RpcClientConfig::with_commitment(commitment_config),
)
}
/// Create an HTTP `RpcClient` with specified timeout and commitment level.
///
/// The URL is an HTTP URL, usually for port 8899, as in
/// "http://localhost:8899".
///
/// The `confirm_transaction_initial_timeout` argument specifies, when
/// confirming a transaction via one of the `_with_spinner` methods, like
/// [`RpcClient::send_and_confirm_transaction_with_spinner`], the amount of
/// time to allow for the server to initially process a transaction. In
/// other words, setting `confirm_transaction_initial_timeout` to > 0 allows
/// `RpcClient` to wait for confirmation of a transaction that the server
/// has not "seen" yet.
///
/// # Examples
///
/// ```
/// # use std::time::Duration;
/// # use solana_client::rpc_client::RpcClient;
/// # use solana_sdk::commitment_config::CommitmentConfig;
/// let url = "http://localhost::8899".to_string();
/// let timeout = Duration::from_secs(1);
/// let commitment_config = CommitmentConfig::processed();
/// let confirm_transaction_initial_timeout = Duration::from_secs(10);
/// let client = RpcClient::new_with_timeouts_and_commitment(
/// url,
/// timeout,
/// commitment_config,
/// confirm_transaction_initial_timeout,
/// );
/// ```
pub fn new_with_timeouts_and_commitment(
url: String,
timeout: Duration,
commitment_config: CommitmentConfig,
confirm_transaction_initial_timeout: Duration,
) -> Self {
Self::new_sender(
HttpSender::new_with_timeout(url, timeout),
RpcClientConfig {
commitment_config,
confirm_transaction_initial_timeout: Some(confirm_transaction_initial_timeout),
},
)
}
/// Create a mock `RpcClient`.
///
/// See the [`MockSender`] documentation for an explanation of
/// how it treats the `url` argument.
///
/// # Examples
///
/// ```
/// # use solana_client::rpc_client::RpcClient;
/// // Create an `RpcClient` that always succeeds
/// let url = "succeeds".to_string();
/// let successful_client = RpcClient::new_mock(url);
/// ```
///
/// ```
/// # use solana_client::rpc_client::RpcClient;
/// // Create an `RpcClient` that always fails
/// let url = "fails".to_string();
/// let successful_client = RpcClient::new_mock(url);
/// ```
pub fn new_mock(url: String) -> Self {
Self::new_sender(
MockSender::new(url),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
)
}
/// Create a mock `RpcClient`.
///
/// See the [`MockSender`] documentation for an explanation of how it treats
/// the `url` argument.
///
/// # Examples
///
/// ```
/// # use solana_client::{
/// # rpc_client::RpcClient,
/// # rpc_request::RpcRequest,
/// # };
/// # use std::collections::HashMap;
/// # use serde_json::json;
/// use solana_client::rpc_response::{Response, RpcResponseContext};
///
/// // Create a mock with a custom repsonse to the `GetBalance` request
/// let account_balance = 50;
/// let account_balance_response = json!(Response {
/// context: RpcResponseContext { slot: 1 },
/// value: json!(account_balance),
/// });
///
/// let mut mocks = HashMap::new();
/// mocks.insert(RpcRequest::GetBalance, account_balance_response);
/// let url = "succeeds".to_string();
/// let client = RpcClient::new_mock_with_mocks(url, mocks);
/// ```
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
Self::new_sender(
MockSender::new_with_mocks(url, mocks),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
)
}
/// Create an HTTP `RpcClient` from a [`SocketAddr`].
///
/// The client has a default timeout of 30 seconds, and a default commitment
/// level of [`Finalized`](CommitmentLevel::Finalized).
///
/// # Examples
///
/// ```
/// # use std::net::SocketAddr;
/// # use solana_client::rpc_client::RpcClient;
/// let addr = SocketAddr::from(([127, 0, 0, 1], 8899));
/// let client = RpcClient::new_socket(addr);
/// ```
pub fn new_socket(addr: SocketAddr) -> Self {
Self::new(get_rpc_request_str(addr, false))
}
/// Create an HTTP `RpcClient` from a [`SocketAddr`] with specified commitment level.
///
/// The client has a default timeout of 30 seconds, and a user-specified
/// [`CommitmentLevel`] via [`CommitmentConfig`].
///
/// # Examples
///
/// ```
/// # use std::net::SocketAddr;
/// # use solana_client::rpc_client::RpcClient;
/// # use solana_sdk::commitment_config::CommitmentConfig;
/// let addr = SocketAddr::from(([127, 0, 0, 1], 8899));
/// let commitment_config = CommitmentConfig::processed();
/// let client = RpcClient::new_socket_with_commitment(
/// addr,
/// commitment_config
/// );
/// ```
pub fn new_socket_with_commitment(
addr: SocketAddr,
commitment_config: CommitmentConfig,
) -> Self {
Self::new_with_commitment(get_rpc_request_str(addr, false), commitment_config)
}
/// Create an HTTP `RpcClient` from a [`SocketAddr`] with specified timeout.
///
/// The client has and a default commitment level of [`Finalized`](CommitmentLevel::Finalized).
///
/// # Examples
///
/// ```
/// # use std::net::SocketAddr;
/// # use std::time::Duration;
/// # use solana_client::rpc_client::RpcClient;
/// let addr = SocketAddr::from(([127, 0, 0, 1], 8899));
/// let timeout = Duration::from_secs(1);
/// let client = RpcClient::new_socket_with_timeout(addr, timeout);
/// ```
pub fn new_socket_with_timeout(addr: SocketAddr, timeout: Duration) -> Self {
let url = get_rpc_request_str(addr, false);
Self::new_with_timeout(url, timeout)
}
fn get_node_version(&self) -> Result<semver::Version, RpcError> {
let r_node_version = self.node_version.read().unwrap();
if let Some(version) = &*r_node_version {
Ok(version.clone())
} else {
drop(r_node_version);
let mut w_node_version = self.node_version.write().unwrap();
let node_version = self.get_version().map_err(|e| {
RpcError::RpcRequestError(format!("cluster version query failed: {}", e))
})?;
let node_version = semver::Version::parse(&node_version.solana_core).map_err(|e| {
RpcError::RpcRequestError(format!("failed to parse cluster version: {}", e))
})?;
*w_node_version = Some(node_version.clone());
Ok(node_version)
}
}
pub fn commitment(&self) -> CommitmentConfig {
self.config.commitment_config
}
fn use_deprecated_commitment(&self) -> Result<bool, RpcError> {
Ok(self.get_node_version()? < semver::Version::new(1, 5, 5))
}
fn maybe_map_commitment(
&self,
requested_commitment: CommitmentConfig,
) -> Result<CommitmentConfig, RpcError> {
if matches!(
requested_commitment.commitment,
CommitmentLevel::Finalized | CommitmentLevel::Confirmed | CommitmentLevel::Processed
) && self.use_deprecated_commitment()?
{
return Ok(CommitmentConfig::use_deprecated_commitment(
requested_commitment,
));
}
Ok(requested_commitment)
}
#[allow(deprecated)]
fn maybe_map_request(&self, mut request: RpcRequest) -> Result<RpcRequest, RpcError> {
if self.get_node_version()? < semver::Version::new(1, 7, 0) {
request = match request {
RpcRequest::GetBlock => RpcRequest::GetConfirmedBlock,
RpcRequest::GetBlocks => RpcRequest::GetConfirmedBlocks,
RpcRequest::GetBlocksWithLimit => RpcRequest::GetConfirmedBlocksWithLimit,
RpcRequest::GetSignaturesForAddress => {
RpcRequest::GetConfirmedSignaturesForAddress2
}
RpcRequest::GetTransaction => RpcRequest::GetConfirmedTransaction,
_ => request,
};
}
Ok(request)
}
/// # Examples
///
/// ```
/// # use solana_client::{
/// # client_error::ClientError,
/// # rpc_client::RpcClient,
/// # rpc_config::RpcSimulateTransactionConfig,
/// # };
/// # use solana_sdk::{
/// # signature::Signature,
/// # signer::keypair::Keypair,
/// # hash::Hash,
/// # system_transaction,
/// # };
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
/// // Transfer lamports from some account to a random account
/// let key = Keypair::new();
/// let to = solana_sdk::pubkey::new_rand();
/// let lamports = 50;
/// # let recent_blockhash = Hash::default();
/// let tx = system_transaction::transfer(&key, &to, lamports, recent_blockhash);
/// let signature = rpc_client.send_transaction(&tx)?;
/// let confirmed = rpc_client.confirm_transaction(&signature)?;
/// assert!(confirmed);
/// # Ok::<(), ClientError>(())
/// ```
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
Ok(self
.confirm_transaction_with_commitment(signature, self.commitment())?
.value)
}
/// # Examples
///
/// ```
/// # use solana_client::{
/// # client_error::ClientError,
/// # rpc_client::RpcClient,
/// # rpc_config::RpcSimulateTransactionConfig,
/// # };
/// # use solana_sdk::{
/// # commitment_config::CommitmentConfig,
/// # signature::Signature,
/// # signer::keypair::Keypair,
/// # hash::Hash,
/// # system_transaction,
/// # };
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
/// // Transfer lamports from some account to a random account
/// let key = Keypair::new();
/// let to = solana_sdk::pubkey::new_rand();
/// let lamports = 50;
/// # let recent_blockhash = Hash::default();
/// let tx = system_transaction::transfer(&key, &to, lamports, recent_blockhash);
/// let signature = rpc_client.send_transaction(&tx)?;
/// let commitment_config = CommitmentConfig::confirmed();
/// let confirmed = rpc_client.confirm_transaction_with_commitment(
/// &signature,
/// commitment_config,
/// )?;
/// assert!(confirmed.value);
/// # Ok::<(), ClientError>(())
/// ```
pub fn confirm_transaction_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> RpcResult<bool> {
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
Ok(Response {
context,
value: value[0]
.as_ref()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|result| result.status.is_ok())
.unwrap_or_default(),
})
}
/// # Examples
///
/// ```
/// # use solana_client::{
/// # client_error::ClientError,
/// # rpc_client::RpcClient,
/// # };
/// # use solana_sdk::{
/// # signature::Signature,
/// # signer::keypair::Keypair,
/// # hash::Hash,
/// # system_transaction,
/// # };
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
/// // Transfer lamports from some account to a random account
/// let key = Keypair::new();
/// let to = solana_sdk::pubkey::new_rand();
/// let lamports = 50;
/// # let recent_blockhash = Hash::default();
/// let tx = system_transaction::transfer(&key, &to, lamports, recent_blockhash);
/// let signature = rpc_client.send_transaction(&tx)?;
/// let confirmed = rpc_client.confirm_transaction(&signature)?;
/// assert!(confirmed);
/// # Ok::<(), ClientError>(())
/// ```
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
self.send_transaction_with_config(
transaction,
RpcSendTransactionConfig {
preflight_commitment: Some(
self.maybe_map_commitment(self.commitment())?.commitment,
),
..RpcSendTransactionConfig::default()
},
)
}
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
if self.get_node_version()? < semver::Version::new(1, 3, 16) {
Ok(UiTransactionEncoding::Base58)
} else {
Ok(UiTransactionEncoding::Base64)
}
}
/// # Examples
///
/// ```
/// # use solana_client::{
/// # client_error::ClientError,
/// # rpc_client::RpcClient,
/// # rpc_config::RpcSendTransactionConfig,
/// # };
/// # use solana_sdk::{
/// # signature::Signature,
/// # signer::keypair::Keypair,
/// # hash::Hash,
/// # system_transaction,
/// # };
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
/// // Transfer lamports from some account to a random account
/// let key = Keypair::new();
/// let to = solana_sdk::pubkey::new_rand();
/// let lamports = 50;
/// # let recent_blockhash = Hash::default();
/// let tx = system_transaction::transfer(&key, &to, lamports, recent_blockhash);
/// let config = RpcSendTransactionConfig {
/// skip_preflight: true,
/// .. RpcSendTransactionConfig::default()
/// };
/// let signature = rpc_client.send_transaction_with_config(
/// &tx,
/// config,
/// )?;
/// let confirmed = rpc_client.confirm_transaction(&signature)?;
/// assert!(confirmed);
/// # Ok::<(), ClientError>(())
/// ```
pub fn send_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let preflight_commitment = CommitmentConfig {
commitment: config.preflight_commitment.unwrap_or_default(),
};
let preflight_commitment = self.maybe_map_commitment(preflight_commitment)?;
let config = RpcSendTransactionConfig {
encoding: Some(encoding),
preflight_commitment: Some(preflight_commitment.commitment),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
let signature_base58_str: String = match self.send(
RpcRequest::SendTransaction,
json!([serialized_encoded, config]),
) {
Ok(signature_base58_str) => signature_base58_str,
Err(err) => {
if let ClientErrorKind::RpcError(RpcError::RpcResponseError {
code,
message,
data,
}) = &err.kind
{
debug!("{} {}", code, message);
if let RpcResponseErrorData::SendTransactionPreflightFailure(
RpcSimulateTransactionResult {
logs: Some(logs), ..
},
) = data
{
for (i, log) in logs.iter().enumerate() {
debug!("{:>3}: {}", i + 1, log);
}
debug!("");
}
}
return Err(err);
}
};
let signature = signature_base58_str
.parse::<Signature>()
.map_err(|err| Into::<ClientError>::into(RpcError::ParseError(err.to_string())))?;
// A mismatching RPC response signature indicates an issue with the RPC node, and
// should not be passed along to confirmation methods. The transaction may or may
// not have been submitted to the cluster, so callers should verify the success of
// the correct transaction signature independently.
if signature != transaction.signatures[0] {
Err(RpcError::RpcRequestError(format!(
"RPC node returned mismatched signature {:?}, expected {:?}",
signature, transaction.signatures[0]
))
.into())
} else {
Ok(transaction.signatures[0])
}
}
/// # Examples
///
/// ```
/// # use solana_client::{
/// # client_error::ClientError,
/// # rpc_client::RpcClient,
/// # rpc_response::RpcSimulateTransactionResult,
/// # };
/// # use solana_sdk::{
/// # signature::Signature,
/// # signer::keypair::Keypair,
/// # hash::Hash,
/// # system_transaction,
/// # };
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
/// // Transfer lamports from some account to a random account
/// let key = Keypair::new();
/// let to = solana_sdk::pubkey::new_rand();
/// let lamports = 50;
/// # let recent_blockhash = Hash::default();
/// let tx = system_transaction::transfer(&key, &to, lamports, recent_blockhash);
/// let result = rpc_client.simulate_transaction(&tx)?;
/// assert!(result.value.err.is_none());
/// # Ok::<(), ClientError>(())
/// ```
pub fn simulate_transaction(
&self,
transaction: &Transaction,
) -> RpcResult<RpcSimulateTransactionResult> {
self.simulate_transaction_with_config(
transaction,
RpcSimulateTransactionConfig {
commitment: Some(self.commitment()),
..RpcSimulateTransactionConfig::default()
},
)
}
/// # Examples
///
/// ```
/// # use solana_client::{
/// # client_error::ClientError,
/// # rpc_client::RpcClient,
/// # rpc_config::RpcSimulateTransactionConfig,
/// # rpc_response::RpcSimulateTransactionResult,
/// # };
/// # use solana_sdk::{
/// # signature::Signature,
/// # signer::keypair::Keypair,
/// # hash::Hash,
/// # system_transaction,
/// # };
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
/// // Transfer lamports from some account to a random account
/// let key = Keypair::new();
/// let to = solana_sdk::pubkey::new_rand();
/// let lamports = 50;
/// # let recent_blockhash = Hash::default();
/// let tx = system_transaction::transfer(&key, &to, lamports, recent_blockhash);
/// let config = RpcSimulateTransactionConfig {
/// sig_verify: false,
/// .. RpcSimulateTransactionConfig::default()
/// };
/// let result = rpc_client.simulate_transaction_with_config(
/// &tx,
/// config,
/// )?;
/// assert!(result.value.err.is_none());
/// # Ok::<(), ClientError>(())
/// ```
pub fn simulate_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSimulateTransactionConfig,
) -> RpcResult<RpcSimulateTransactionResult> {
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let commitment = config.commitment.unwrap_or_default();
let commitment = self.maybe_map_commitment(commitment)?;
let config = RpcSimulateTransactionConfig {
encoding: Some(encoding),
commitment: Some(commitment),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
self.send(
RpcRequest::SimulateTransaction,
json!([serialized_encoded, config]),
)
}
pub fn get_snapshot_slot(&self) -> ClientResult<Slot> {
self.send(RpcRequest::GetSnapshotSlot, Value::Null)
}
pub fn get_signature_status(
&self,
signature: &Signature,
) -> ClientResult<Option<transaction::Result<()>>> {
self.get_signature_status_with_commitment(signature, self.commitment())
}
pub fn get_signature_statuses(
&self,
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
self.send(RpcRequest::GetSignatureStatuses, json!([signatures]))
}
pub fn get_signature_statuses_with_history(
&self,
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
self.send(
RpcRequest::GetSignatureStatuses,
json!([signatures, {
"searchTransactionHistory": true
}]),
)
}
pub fn get_signature_status_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<transaction::Result<()>>> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
)?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|status_meta| status_meta.status))
}
pub fn get_signature_status_with_commitment_and_history(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
search_transaction_history: bool,
) -> ClientResult<Option<transaction::Result<()>>> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()], {
"searchTransactionHistory": search_transaction_history
}]),
)?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|status_meta| status_meta.status))
}
pub fn get_slot(&self) -> ClientResult<Slot> {
self.get_slot_with_commitment(self.commitment())
}
pub fn get_slot_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<Slot> {
self.send(
RpcRequest::GetSlot,
json!([self.maybe_map_commitment(commitment_config)?]),
)
}
pub fn get_block_height(&self) -> ClientResult<u64> {
self.get_block_height_with_commitment(self.commitment())
}
pub fn get_block_height_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.send(
RpcRequest::GetBlockHeight,
json!([self.maybe_map_commitment(commitment_config)?]),
)
}
pub fn get_slot_leaders(&self, start_slot: Slot, limit: u64) -> ClientResult<Vec<Pubkey>> {
self.send(RpcRequest::GetSlotLeaders, json!([start_slot, limit]))
.and_then(|slot_leaders: Vec<String>| {
slot_leaders
.iter()
.map(|slot_leader| {
Pubkey::from_str(slot_leader).map_err(|err| {
ClientErrorKind::Custom(format!(
"pubkey deserialization failed: {}",
err
))
.into()
})
})
.collect()
})
}
/// Get block production for the current epoch
pub fn get_block_production(&self) -> RpcResult<RpcBlockProduction> {
self.send(RpcRequest::GetBlockProduction, Value::Null)
}
pub fn get_block_production_with_config(
&self,
config: RpcBlockProductionConfig,
) -> RpcResult<RpcBlockProduction> {
self.send(RpcRequest::GetBlockProduction, json!(config))
}
pub fn get_stake_activation(
&self,
stake_account: Pubkey,
epoch: Option<Epoch>,
) -> ClientResult<RpcStakeActivation> {
self.send(
RpcRequest::GetStakeActivation,
json!([
stake_account.to_string(),
RpcEpochConfig {
epoch,
commitment: Some(self.commitment()),
}
]),
)
}
pub fn supply(&self) -> RpcResult<RpcSupply> {
self.supply_with_commitment(self.commitment())
}
pub fn supply_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<RpcSupply> {
self.send(
RpcRequest::GetSupply,
json!([self.maybe_map_commitment(commitment_config)?]),
)
}
pub fn get_largest_accounts_with_config(
&self,
config: RpcLargestAccountsConfig,
) -> RpcResult<Vec<RpcAccountBalance>> {
let commitment = config.commitment.unwrap_or_default();
let commitment = self.maybe_map_commitment(commitment)?;
let config = RpcLargestAccountsConfig {
commitment: Some(commitment),
..config
};
self.send(RpcRequest::GetLargestAccounts, json!([config]))
}
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
self.get_vote_accounts_with_commitment(self.commitment())
}
pub fn get_vote_accounts_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<RpcVoteAccountStatus> {
self.get_vote_accounts_with_config(RpcGetVoteAccountsConfig {
commitment: Some(self.maybe_map_commitment(commitment_config)?),
..RpcGetVoteAccountsConfig::default()
})
}
pub fn get_vote_accounts_with_config(
&self,
config: RpcGetVoteAccountsConfig,
) -> ClientResult<RpcVoteAccountStatus> {
self.send(RpcRequest::GetVoteAccounts, json!([config]))
}
pub fn wait_for_max_stake(
&self,
commitment: CommitmentConfig,
max_stake_percent: f32,
) -> ClientResult<()> {
let mut current_percent;
loop {
let vote_accounts = self.get_vote_accounts_with_commitment(commitment)?;
let mut max = 0;
let total_active_stake = vote_accounts
.current
.iter()
.chain(vote_accounts.delinquent.iter())
.map(|vote_account| {
max = std::cmp::max(max, vote_account.activated_stake);
vote_account.activated_stake
})
.sum::<u64>();
current_percent = 100f32 * max as f32 / total_active_stake as f32;
if current_percent < max_stake_percent {
break;
}
info!(
"Waiting for stake to drop below {} current: {:.1}",
max_stake_percent, current_percent
);
sleep(Duration::from_secs(10));
}
Ok(())
}
pub fn get_cluster_nodes(&self) -> ClientResult<Vec<RpcContactInfo>> {
self.send(RpcRequest::GetClusterNodes, Value::Null)
}
pub fn get_block(&self, slot: Slot) -> ClientResult<EncodedConfirmedBlock> {
self.get_block_with_encoding(slot, UiTransactionEncoding::Json)
}
pub fn get_block_with_encoding(
&self,
slot: Slot,
encoding: UiTransactionEncoding,
) -> ClientResult<EncodedConfirmedBlock> {
self.send(
self.maybe_map_request(RpcRequest::GetBlock)?,
json!([slot, encoding]),
)
}
pub fn get_block_with_config(
&self,
slot: Slot,
config: RpcBlockConfig,
) -> ClientResult<UiConfirmedBlock> {
self.send(
self.maybe_map_request(RpcRequest::GetBlock)?,
json!([slot, config]),
)
}
#[deprecated(since = "1.7.0", note = "Please use RpcClient::get_block() instead")]
#[allow(deprecated)]
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<EncodedConfirmedBlock> {
self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_block_with_encoding() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_block_with_encoding(
&self,
slot: Slot,
encoding: UiTransactionEncoding,
) -> ClientResult<EncodedConfirmedBlock> {
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_block_with_config() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_block_with_config(
&self,
slot: Slot,
config: RpcConfirmedBlockConfig,
) -> ClientResult<UiConfirmedBlock> {
self.send(RpcRequest::GetConfirmedBlock, json!([slot, config]))
}
pub fn get_blocks(&self, start_slot: Slot, end_slot: Option<Slot>) -> ClientResult<Vec<Slot>> {
self.send(
self.maybe_map_request(RpcRequest::GetBlocks)?,
json!([start_slot, end_slot]),
)
}
pub fn get_blocks_with_commitment(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> ClientResult<Vec<Slot>> {
let json = if end_slot.is_some() {
json!([
start_slot,
end_slot,
self.maybe_map_commitment(commitment_config)?
])
} else {
json!([start_slot, self.maybe_map_commitment(commitment_config)?])
};
self.send(self.maybe_map_request(RpcRequest::GetBlocks)?, json)
}
pub fn get_blocks_with_limit(&self, start_slot: Slot, limit: usize) -> ClientResult<Vec<Slot>> {
self.send(
self.maybe_map_request(RpcRequest::GetBlocksWithLimit)?,
json!([start_slot, limit]),
)
}
pub fn get_blocks_with_limit_and_commitment(
&self,
start_slot: Slot,
limit: usize,
commitment_config: CommitmentConfig,
) -> ClientResult<Vec<Slot>> {
self.send(
self.maybe_map_request(RpcRequest::GetBlocksWithLimit)?,
json!([
start_slot,
limit,
self.maybe_map_commitment(commitment_config)?
]),
)
}
#[deprecated(since = "1.7.0", note = "Please use RpcClient::get_blocks() instead")]
#[allow(deprecated)]
pub fn get_confirmed_blocks(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocks,
json!([start_slot, end_slot]),
)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_blocks_with_commitment() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_blocks_with_commitment(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> ClientResult<Vec<Slot>> {
let json = if end_slot.is_some() {
json!([
start_slot,
end_slot,
self.maybe_map_commitment(commitment_config)?
])
} else {
json!([start_slot, self.maybe_map_commitment(commitment_config)?])
};
self.send(RpcRequest::GetConfirmedBlocks, json)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_blocks_with_limit() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_blocks_with_limit(
&self,
start_slot: Slot,
limit: usize,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocksWithLimit,
json!([start_slot, limit]),
)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_blocks_with_limit_and_commitment() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_blocks_with_limit_and_commitment(
&self,
start_slot: Slot,
limit: usize,
commitment_config: CommitmentConfig,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocksWithLimit,
json!([
start_slot,
limit,
self.maybe_map_commitment(commitment_config)?
]),
)
}
pub fn get_signatures_for_address(
&self,
address: &Pubkey,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
self.get_signatures_for_address_with_config(
address,
GetConfirmedSignaturesForAddress2Config::default(),
)
}
pub fn get_signatures_for_address_with_config(
&self,
address: &Pubkey,
config: GetConfirmedSignaturesForAddress2Config,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
let config = RpcSignaturesForAddressConfig {
before: config.before.map(|signature| signature.to_string()),
until: config.until.map(|signature| signature.to_string()),
limit: config.limit,
commitment: config.commitment,
};
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
self.maybe_map_request(RpcRequest::GetSignaturesForAddress)?,
json!([address.to_string(), config]),
)?;
Ok(result)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_signatures_for_address() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_signatures_for_address2(
&self,
address: &Pubkey,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
self.get_confirmed_signatures_for_address2_with_config(
address,
GetConfirmedSignaturesForAddress2Config::default(),
)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_signatures_for_address_with_config() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_signatures_for_address2_with_config(
&self,
address: &Pubkey,
config: GetConfirmedSignaturesForAddress2Config,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
let config = RpcGetConfirmedSignaturesForAddress2Config {
before: config.before.map(|signature| signature.to_string()),
until: config.until.map(|signature| signature.to_string()),
limit: config.limit,
commitment: config.commitment,
};
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
RpcRequest::GetConfirmedSignaturesForAddress2,
json!([address.to_string(), config]),
)?;
Ok(result)
}
pub fn get_transaction(
&self,
signature: &Signature,
encoding: UiTransactionEncoding,
) -> ClientResult<EncodedConfirmedTransaction> {
self.send(
self.maybe_map_request(RpcRequest::GetTransaction)?,
json!([signature.to_string(), encoding]),
)
}
pub fn get_transaction_with_config(
&self,
signature: &Signature,
config: RpcTransactionConfig,
) -> ClientResult<EncodedConfirmedTransaction> {
self.send(
self.maybe_map_request(RpcRequest::GetTransaction)?,
json!([signature.to_string(), config]),
)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_transaction() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_transaction(
&self,
signature: &Signature,
encoding: UiTransactionEncoding,
) -> ClientResult<EncodedConfirmedTransaction> {
self.send(
RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
)
}
#[deprecated(
since = "1.7.0",
note = "Please use RpcClient::get_transaction_with_config() instead"
)]
#[allow(deprecated)]
pub fn get_confirmed_transaction_with_config(
&self,
signature: &Signature,
config: RpcConfirmedTransactionConfig,
) -> ClientResult<EncodedConfirmedTransaction> {
self.send(
RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), config]),
)
}
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
let request = RpcRequest::GetBlockTime;
let response = self.sender.send(request, json!([slot]));
response
.map(|result_json| {
if result_json.is_null() {
return Err(RpcError::ForUser(format!("Block Not Found: slot={}", slot)).into());
}
let result = serde_json::from_value(result_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
trace!("Response block timestamp {:?} {:?}", slot, result);
Ok(result)
})
.map_err(|err| err.into_with_request(request))?
}
pub fn get_epoch_info(&self) -> ClientResult<EpochInfo> {
self.get_epoch_info_with_commitment(self.commitment())
}
pub fn get_epoch_info_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<EpochInfo> {
self.send(
RpcRequest::GetEpochInfo,
json!([self.maybe_map_commitment(commitment_config)?]),
)
}
pub fn get_leader_schedule(
&self,
slot: Option<Slot>,
) -> ClientResult<Option<RpcLeaderSchedule>> {
self.get_leader_schedule_with_commitment(slot, self.commitment())
}
pub fn get_leader_schedule_with_commitment(
&self,
slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<RpcLeaderSchedule>> {
self.get_leader_schedule_with_config(
slot,
RpcLeaderScheduleConfig {
commitment: Some(self.maybe_map_commitment(commitment_config)?),
..RpcLeaderScheduleConfig::default()
},
)
}
pub fn get_leader_schedule_with_config(
&self,
slot: Option<Slot>,
config: RpcLeaderScheduleConfig,
) -> ClientResult<Option<RpcLeaderSchedule>> {
self.send(RpcRequest::GetLeaderSchedule, json!([slot, config]))
}
pub fn get_epoch_schedule(&self) -> ClientResult<EpochSchedule> {
self.send(RpcRequest::GetEpochSchedule, Value::Null)
}
pub fn get_recent_performance_samples(
&self,
limit: Option<usize>,
) -> ClientResult<Vec<RpcPerfSample>> {
self.send(RpcRequest::GetRecentPerformanceSamples, json!([limit]))
}
pub fn get_identity(&self) -> ClientResult<Pubkey> {
let rpc_identity: RpcIdentity = self.send(RpcRequest::GetIdentity, Value::Null)?;
rpc_identity.identity.parse::<Pubkey>().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Pubkey".to_string()).into(),
RpcRequest::GetIdentity,
)
})
}
pub fn get_inflation_governor(&self) -> ClientResult<RpcInflationGovernor> {
self.send(RpcRequest::GetInflationGovernor, Value::Null)
}
pub fn get_inflation_rate(&self) -> ClientResult<RpcInflationRate> {
self.send(RpcRequest::GetInflationRate, Value::Null)
}
pub fn get_inflation_reward(
&self,
addresses: &[Pubkey],
epoch: Option<Epoch>,
) -> ClientResult<Vec<Option<RpcInflationReward>>> {
let addresses: Vec<_> = addresses
.iter()
.map(|address| address.to_string())
.collect();
self.send(
RpcRequest::GetInflationReward,
json!([
addresses,
RpcEpochConfig {
epoch,
commitment: Some(self.commitment()),
}
]),
)
}
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
self.send(RpcRequest::GetVersion, Value::Null)
}
pub fn minimum_ledger_slot(&self) -> ClientResult<Slot> {
self.send(RpcRequest::MinimumLedgerSlot, Value::Null)
}
pub fn send_and_confirm_transaction(
&self,
transaction: &Transaction,
) -> ClientResult<Signature> {
const SEND_RETRIES: usize = 1;
const GET_STATUS_RETRIES: usize = usize::MAX;
'sending: for _ in 0..SEND_RETRIES {
let signature = self.send_transaction(transaction)?;
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
let (recent_blockhash, ..) = self
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
.value;
recent_blockhash
} else {
transaction.message.recent_blockhash
};
for status_retry in 0..GET_STATUS_RETRIES {
match self.get_signature_status(&signature)? {
Some(Ok(_)) => return Ok(signature),
Some(Err(e)) => return Err(e.into()),
None => {
let fee_calculator = self
.get_fee_calculator_for_blockhash_with_commitment(
&recent_blockhash,
CommitmentConfig::processed(),
)?
.value;
if fee_calculator.is_none() {
// Block hash is not found by some reason
break 'sending;
} else if cfg!(not(test))
// Ignore sleep at last step.
&& status_retry < GET_STATUS_RETRIES
{
// Retry twice a second
sleep(Duration::from_millis(500));
continue;
}
}
}
}
}
Err(RpcError::ForUser(
"unable to confirm transaction. \
This can happen in situations such as transaction expiration \
and insufficient fee-payer funds"
.to_string(),
)
.into())
}
/// Note that `get_account` returns `Err(..)` if the account does not exist whereas
/// `get_account_with_commitment` returns `Ok(None)` if the account does not exist.
pub fn get_account(&self, pubkey: &Pubkey) -> ClientResult<Account> {
self.get_account_with_commitment(pubkey, self.commitment())?
.value
.ok_or_else(|| RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into())
}
pub fn get_account_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<Account>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64Zstd),
commitment: Some(self.maybe_map_commitment(commitment_config)?),
data_slice: None,
};
let response = self.sender.send(
RpcRequest::GetAccountInfo,
json!([pubkey.to_string(), config]),
);
response
.map(|result_json| {
if result_json.is_null() {
return Err(
RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into(),
);
}
let Response {
context,
value: rpc_account,
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
trace!("Response account {:?} {:?}", pubkey, rpc_account);
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
Ok(Response {
context,
value: account,
})
})
.map_err(|err| {
Into::<ClientError>::into(RpcError::ForUser(format!(
"AccountNotFound: pubkey={}: {}",
pubkey, err
)))
})?
}
pub fn get_max_retransmit_slot(&self) -> ClientResult<Slot> {
self.send(RpcRequest::GetMaxRetransmitSlot, Value::Null)
}
pub fn get_max_shred_insert_slot(&self) -> ClientResult<Slot> {
self.send(RpcRequest::GetMaxShredInsertSlot, Value::Null)
}
pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult<Vec<Option<Account>>> {
Ok(self
.get_multiple_accounts_with_commitment(pubkeys, self.commitment())?
.value)
}
pub fn get_multiple_accounts_with_commitment(
&self,
pubkeys: &[Pubkey],
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<Option<Account>>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64Zstd),
commitment: Some(self.maybe_map_commitment(commitment_config)?),
data_slice: None,
};
let pubkeys: Vec<_> = pubkeys.iter().map(|pubkey| pubkey.to_string()).collect();
let response = self.send(RpcRequest::GetMultipleAccounts, json!([pubkeys, config]))?;
let Response {
context,
value: accounts,
} = serde_json::from_value::<Response<Vec<Option<UiAccount>>>>(response)?;
let accounts: Vec<Option<Account>> = accounts
.into_iter()
.map(|rpc_account| rpc_account.map(|a| a.decode()).flatten())
.collect();
Ok(Response {
context,
value: accounts,
})
}
pub fn get_account_data(&self, pubkey: &Pubkey) -> ClientResult<Vec<u8>> {
Ok(self.get_account(pubkey)?.data)
}
pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> ClientResult<u64> {
let request = RpcRequest::GetMinimumBalanceForRentExemption;
let minimum_balance_json = self
.sender
.send(request, json!([data_len]))
.map_err(|err| err.into_with_request(request))?;
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
trace!(
"Response minimum balance {:?} {:?}",
data_len,
minimum_balance
);
Ok(minimum_balance)
}
/// Request the balance of the account `pubkey`.
pub fn get_balance(&self, pubkey: &Pubkey) -> ClientResult<u64> {
Ok(self
.get_balance_with_commitment(pubkey, self.commitment())?
.value)
}
pub fn get_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<u64> {
self.send(
RpcRequest::GetBalance,
json!([
pubkey.to_string(),
self.maybe_map_commitment(commitment_config)?
]),
)
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
self.get_program_accounts_with_config(
pubkey,
RpcProgramAccountsConfig {
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64Zstd),
..RpcAccountInfoConfig::default()
},
..RpcProgramAccountsConfig::default()
},
)
}
pub fn get_program_accounts_with_config(
&self,
pubkey: &Pubkey,
config: RpcProgramAccountsConfig,
) -> ClientResult<Vec<(Pubkey, Account)>> {
let commitment = config
.account_config
.commitment
.unwrap_or_else(|| self.commitment());
let commitment = self.maybe_map_commitment(commitment)?;
let account_config = RpcAccountInfoConfig {
commitment: Some(commitment),
..config.account_config
};
let config = RpcProgramAccountsConfig {
account_config,
..config
};
let accounts: Vec<RpcKeyedAccount> = self.send(
RpcRequest::GetProgramAccounts,
json!([pubkey.to_string(), config]),
)?;
parse_keyed_accounts(accounts, RpcRequest::GetProgramAccounts)
}
/// Request the transaction count.
pub fn get_transaction_count(&self) -> ClientResult<u64> {
self.get_transaction_count_with_commitment(self.commitment())
}
pub fn get_transaction_count_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.send(
RpcRequest::GetTransactionCount,
json!([self.maybe_map_commitment(commitment_config)?]),
)
}
pub fn get_fees(&self) -> ClientResult<Fees> {
Ok(self.get_fees_with_commitment(self.commitment())?.value)
}
pub fn get_fees_with_commitment(&self, commitment_config: CommitmentConfig) -> RpcResult<Fees> {
let Response {
context,
value: fees,
} = self.send::<Response<RpcFees>>(
RpcRequest::GetFees,
json!([self.maybe_map_commitment(commitment_config)?]),
)?;
let blockhash = fees.blockhash.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Hash".to_string()).into(),
RpcRequest::GetFees,
)
})?;
Ok(Response {
context,
value: Fees {
blockhash,
fee_calculator: fees.fee_calculator,
last_valid_block_height: fees.last_valid_block_height,
},
})
}
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
let (blockhash, fee_calculator, _last_valid_slot) = self
.get_recent_blockhash_with_commitment(self.commitment())?
.value;
Ok((blockhash, fee_calculator))
}
pub fn get_recent_blockhash_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<(Hash, FeeCalculator, Slot)> {
let (context, blockhash, fee_calculator, last_valid_slot) = if let Ok(Response {
context,
value:
RpcFees {
blockhash,
fee_calculator,
last_valid_slot,
..
},
}) = self
.send::<Response<RpcFees>>(
RpcRequest::GetFees,
json!([self.maybe_map_commitment(commitment_config)?]),
) {
(context, blockhash, fee_calculator, last_valid_slot)
} else if let Ok(Response {
context,
value:
DeprecatedRpcFees {
blockhash,
fee_calculator,
last_valid_slot,
},
}) = self.send::<Response<DeprecatedRpcFees>>(
RpcRequest::GetFees,
json!([self.maybe_map_commitment(commitment_config)?]),
) {
(context, blockhash, fee_calculator, last_valid_slot)
} else if let Ok(Response {
context,
value:
RpcBlockhashFeeCalculator {
blockhash,
fee_calculator,
},
}) = self.send::<Response<RpcBlockhashFeeCalculator>>(
RpcRequest::GetRecentBlockhash,
json!([self.maybe_map_commitment(commitment_config)?]),
) {
(context, blockhash, fee_calculator, 0)
} else {
return Err(ClientError::new_with_request(
RpcError::ParseError("RpcBlockhashFeeCalculator or RpcFees".to_string()).into(),
RpcRequest::GetRecentBlockhash,
));
};
let blockhash = blockhash.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Hash".to_string()).into(),
RpcRequest::GetRecentBlockhash,
)
})?;
Ok(Response {
context,
value: (blockhash, fee_calculator, last_valid_slot),
})
}
pub fn get_fee_calculator_for_blockhash(
&self,
blockhash: &Hash,
) -> ClientResult<Option<FeeCalculator>> {
Ok(self
.get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment())?
.value)
}
pub fn get_fee_calculator_for_blockhash_with_commitment(
&self,
blockhash: &Hash,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<FeeCalculator>> {
let Response { context, value } = self.send::<Response<Option<RpcFeeCalculator>>>(
RpcRequest::GetFeeCalculatorForBlockhash,
json!([
blockhash.to_string(),
self.maybe_map_commitment(commitment_config)?
]),
)?;
Ok(Response {
context,
value: value.map(|rf| rf.fee_calculator),
})
}
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
let Response {
context,
value: RpcFeeRateGovernor { fee_rate_governor },
} =
self.send::<Response<RpcFeeRateGovernor>>(RpcRequest::GetFeeRateGovernor, Value::Null)?;
Ok(Response {
context,
value: fee_rate_governor,
})
}
pub fn get_new_blockhash(&self, blockhash: &Hash) -> ClientResult<(Hash, FeeCalculator)> {
let mut num_retries = 0;
let start = Instant::now();
while start.elapsed().as_secs() < 5 {
if let Ok((new_blockhash, fee_calculator)) = self.get_recent_blockhash() {
if new_blockhash != *blockhash {
return Ok((new_blockhash, fee_calculator));
}
}
debug!("Got same blockhash ({:?}), will retry...", blockhash);
// Retry ~twice during a slot
sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2));
num_retries += 1;
}
Err(RpcError::ForUser(format!(
"Unable to get new blockhash after {}ms (retried {} times), stuck at {}",
start.elapsed().as_millis(),
num_retries,
blockhash
))
.into())
}
pub fn get_first_available_block(&self) -> ClientResult<Slot> {
self.send(RpcRequest::GetFirstAvailableBlock, Value::Null)
}
pub fn get_genesis_hash(&self) -> ClientResult<Hash> {
let hash_str: String = self.send(RpcRequest::GetGenesisHash, Value::Null)?;
let hash = hash_str.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Hash".to_string()).into(),
RpcRequest::GetGenesisHash,
)
})?;
Ok(hash)
}
pub fn get_health(&self) -> ClientResult<()> {
self.send::<String>(RpcRequest::GetHealth, Value::Null)
.map(|_| ())
}
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
Ok(self
.get_token_account_with_commitment(pubkey, self.commitment())?
.value)
}
pub fn get_token_account_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<UiTokenAccount>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(self.maybe_map_commitment(commitment_config)?),
data_slice: None,
};
let response = self.sender.send(
RpcRequest::GetAccountInfo,
json!([pubkey.to_string(), config]),
);
response
.map(|result_json| {
if result_json.is_null() {
return Err(
RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into(),
);
}
let Response {
context,
value: rpc_account,
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
trace!("Response account {:?} {:?}", pubkey, rpc_account);
let response = {
if let Some(rpc_account) = rpc_account {
if let UiAccountData::Json(account_data) = rpc_account.data {
let token_account_type: TokenAccountType =
serde_json::from_value(account_data.parsed)?;
if let TokenAccountType::Account(token_account) = token_account_type {
return Ok(Response {
context,
value: Some(token_account),
});
}
}
}
Err(Into::<ClientError>::into(RpcError::ForUser(format!(
"Account could not be parsed as token account: pubkey={}",
pubkey
))))
};
response?
})
.map_err(|err| {
Into::<ClientError>::into(RpcError::ForUser(format!(
"AccountNotFound: pubkey={}: {}",
pubkey, err
)))
})?
}
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
Ok(self
.get_token_account_balance_with_commitment(pubkey, self.commitment())?
.value)
}
pub fn get_token_account_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<UiTokenAmount> {
self.send(
RpcRequest::GetTokenAccountBalance,
json!([
pubkey.to_string(),
self.maybe_map_commitment(commitment_config)?
]),
)
}
pub fn get_token_accounts_by_delegate(
&self,
delegate: &Pubkey,
token_account_filter: TokenAccountsFilter,
) -> ClientResult<Vec<RpcKeyedAccount>> {
Ok(self
.get_token_accounts_by_delegate_with_commitment(
delegate,
token_account_filter,
self.commitment(),
)?
.value)
}
pub fn get_token_accounts_by_delegate_with_commitment(
&self,
delegate: &Pubkey,
token_account_filter: TokenAccountsFilter,
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<RpcKeyedAccount>> {
let token_account_filter = match token_account_filter {
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
TokenAccountsFilter::ProgramId(program_id) => {
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
}
};
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(self.maybe_map_commitment(commitment_config)?),
data_slice: None,
};
self.send(
RpcRequest::GetTokenAccountsByOwner,
json!([delegate.to_string(), token_account_filter, config]),
)
}
pub fn get_token_accounts_by_owner(
&self,
owner: &Pubkey,
token_account_filter: TokenAccountsFilter,
) -> ClientResult<Vec<RpcKeyedAccount>> {
Ok(self
.get_token_accounts_by_owner_with_commitment(
owner,
token_account_filter,
self.commitment(),
)?
.value)
}
pub fn get_token_accounts_by_owner_with_commitment(
&self,
owner: &Pubkey,
token_account_filter: TokenAccountsFilter,
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<RpcKeyedAccount>> {
let token_account_filter = match token_account_filter {
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
TokenAccountsFilter::ProgramId(program_id) => {
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
}
};
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(self.maybe_map_commitment(commitment_config)?),
data_slice: None,
};
self.send(
RpcRequest::GetTokenAccountsByOwner,
json!([owner.to_string(), token_account_filter, config]),
)
}
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
Ok(self
.get_token_supply_with_commitment(mint, self.commitment())?
.value)
}
pub fn get_token_supply_with_commitment(
&self,
mint: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<UiTokenAmount> {
self.send(
RpcRequest::GetTokenSupply,
json!([
mint.to_string(),
self.maybe_map_commitment(commitment_config)?
]),
)
}
pub fn request_airdrop(&self, pubkey: &Pubkey, lamports: u64) -> ClientResult<Signature> {
self.request_airdrop_with_config(
pubkey,
lamports,
RpcRequestAirdropConfig {
commitment: Some(self.commitment()),
..RpcRequestAirdropConfig::default()
},
)
}
pub fn request_airdrop_with_blockhash(
&self,
pubkey: &Pubkey,
lamports: u64,
recent_blockhash: &Hash,
) -> ClientResult<Signature> {
self.request_airdrop_with_config(
pubkey,
lamports,
RpcRequestAirdropConfig {
commitment: Some(self.commitment()),
recent_blockhash: Some(recent_blockhash.to_string()),
},
)
}
pub fn request_airdrop_with_config(
&self,
pubkey: &Pubkey,
lamports: u64,
config: RpcRequestAirdropConfig,
) -> ClientResult<Signature> {
let commitment = config.commitment.unwrap_or_default();
let commitment = self.maybe_map_commitment(commitment)?;
let config = RpcRequestAirdropConfig {
commitment: Some(commitment),
..config
};
self.send(
RpcRequest::RequestAirdrop,
json!([pubkey.to_string(), lamports, config]),
)
.and_then(|signature: String| {
Signature::from_str(&signature).map_err(|err| {
ClientErrorKind::Custom(format!("signature deserialization failed: {}", err)).into()
})
})
.map_err(|_| {
RpcError::ForUser(
"airdrop request failed. \
This can happen when the rate limit is reached."
.to_string(),
)
.into()
})
}
fn poll_balance_with_timeout_and_commitment(
&self,
pubkey: &Pubkey,
polling_frequency: &Duration,
timeout: &Duration,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
let now = Instant::now();
loop {
match self.get_balance_with_commitment(pubkey, commitment_config) {
Ok(bal) => {
return Ok(bal.value);
}
Err(e) => {
sleep(*polling_frequency);
if now.elapsed() > *timeout {
return Err(e);
}
}
};
}
}
pub fn poll_get_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.poll_balance_with_timeout_and_commitment(
pubkey,
&Duration::from_millis(100),
&Duration::from_secs(1),
commitment_config,
)
}
pub fn wait_for_balance_with_commitment(
&self,
pubkey: &Pubkey,
expected_balance: Option<u64>,
commitment_config: CommitmentConfig,
) -> Option<u64> {
const LAST: usize = 30;
for run in 0..LAST {
let balance_result = self.poll_get_balance_with_commitment(pubkey, commitment_config);
if expected_balance.is_none() {
return balance_result.ok();
}
trace!(
"wait_for_balance_with_commitment [{}] {:?} {:?}",
run,
balance_result,
expected_balance
);
if let (Some(expected_balance), Ok(balance_result)) = (expected_balance, balance_result)
{
if expected_balance == balance_result {
return Some(balance_result);
}
}
}
None
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature(&self, signature: &Signature) -> ClientResult<()> {
self.poll_for_signature_with_commitment(signature, self.commitment())
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> ClientResult<()> {
let now = Instant::now();
loop {
if let Ok(Some(_)) =
self.get_signature_status_with_commitment(signature, commitment_config)
{
break;
}
if now.elapsed().as_secs() > 15 {
return Err(RpcError::ForUser(format!(
"signature not found after {} seconds",
now.elapsed().as_secs()
))
.into());
}
sleep(Duration::from_millis(250));
}
Ok(())
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature_confirmation(
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> ClientResult<usize> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
let response = self.get_num_blocks_since_signature_confirmation(signature);
match response {
Ok(count) => {
if confirmed_blocks != count {
info!(
"signature {} confirmed {} out of {} after {} ms",
signature,
count,
min_confirmed_blocks,
now.elapsed().as_millis()
);
now = Instant::now();
confirmed_blocks = count;
}
if count >= min_confirmed_blocks {
break;
}
}
Err(err) => {
debug!("check_confirmations request failed: {:?}", err);
}
};
if now.elapsed().as_secs() > 20 {
info!(
"signature {} confirmed {} out of {} failed after {} ms",
signature,
confirmed_blocks,
min_confirmed_blocks,
now.elapsed().as_millis()
);
if confirmed_blocks > 0 {
return Ok(confirmed_blocks);
} else {
return Err(RpcError::ForUser(format!(
"signature not found after {} seconds",
now.elapsed().as_secs()
))
.into());
}
}
sleep(Duration::from_millis(250));
}
Ok(confirmed_blocks)
}
pub fn get_num_blocks_since_signature_confirmation(
&self,
signature: &Signature,
) -> ClientResult<usize> |
pub fn send_and_confirm_transaction_with_spinner(
&self,
transaction: &Transaction,
) -> ClientResult<Signature> {
self.send_and_confirm_transaction_with_spinner_and_commitment(
transaction,
self.commitment(),
)
}
pub fn send_and_confirm_transaction_with_spinner_and_commitment(
&self,
transaction: &Transaction,
commitment: CommitmentConfig,
) -> ClientResult<Signature> {
self.send_and_confirm_transaction_with_spinner_and_config(
transaction,
commitment,
RpcSendTransactionConfig {
preflight_commitment: Some(commitment.commitment),
..RpcSendTransactionConfig::default()
},
)
}
pub fn send_and_confirm_transaction_with_spinner_and_config(
&self,
transaction: &Transaction,
commitment: CommitmentConfig,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
self.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
.value
.0
} else {
transaction.message.recent_blockhash
};
let signature = self.send_transaction_with_config(transaction, config)?;
self.confirm_transaction_with_spinner(&signature, &recent_blockhash, commitment)?;
Ok(signature)
}
pub fn confirm_transaction_with_spinner(
&self,
signature: &Signature,
recent_blockhash: &Hash,
commitment: CommitmentConfig,
) -> ClientResult<()> {
let desired_confirmations = if commitment.is_finalized() {
MAX_LOCKOUT_HISTORY + 1
} else {
1
};
let mut confirmations = 0;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message(format!(
"[{}/{}] Finalizing transaction {}",
confirmations, desired_confirmations, signature,
));
let now = Instant::now();
let confirm_transaction_initial_timeout = self
.config
.confirm_transaction_initial_timeout
.unwrap_or_default();
let (signature, status) = loop {
// Get recent commitment in order to count confirmations for successful transactions
let status = self
.get_signature_status_with_commitment(signature, CommitmentConfig::processed())?;
if status.is_none() {
let blockhash_not_found = self
.get_fee_calculator_for_blockhash_with_commitment(
recent_blockhash,
CommitmentConfig::processed(),
)?
.value
.is_none();
if blockhash_not_found && now.elapsed() >= confirm_transaction_initial_timeout {
break (signature, status);
}
} else {
break (signature, status);
}
if cfg!(not(test)) {
sleep(Duration::from_millis(500));
}
};
if let Some(result) = status {
if let Err(err) = result {
return Err(err.into());
}
} else {
return Err(RpcError::ForUser(
"unable to confirm transaction. \
This can happen in situations such as transaction expiration \
and insufficient fee-payer funds"
.to_string(),
)
.into());
}
let now = Instant::now();
loop {
// Return when specified commitment is reached
// Failed transactions have already been eliminated, `is_some` check is sufficient
if self
.get_signature_status_with_commitment(signature, commitment)?
.is_some()
{
progress_bar.set_message("Transaction confirmed");
progress_bar.finish_and_clear();
return Ok(());
}
progress_bar.set_message(format!(
"[{}/{}] Finalizing transaction {}",
min(confirmations + 1, desired_confirmations),
desired_confirmations,
signature,
));
sleep(Duration::from_millis(500));
confirmations = self
.get_num_blocks_since_signature_confirmation(signature)
.unwrap_or(confirmations);
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
return Err(
RpcError::ForUser("transaction not finalized. \
This can happen when a transaction lands in an abandoned fork. \
Please retry.".to_string()).into(),
);
}
}
}
pub fn send<T>(&self, request: RpcRequest, params: Value) -> ClientResult<T>
where
T: serde::de::DeserializeOwned,
{
assert!(params.is_array() || params.is_null());
let response = self
.sender
.send(request, params)
.map_err(|err| err.into_with_request(request))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_request(err.into(), request))
}
}
fn serialize_encode_transaction(
transaction: &Transaction,
encoding: UiTransactionEncoding,
) -> ClientResult<String> {
let serialized = serialize(transaction)
.map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?;
let encoded = match encoding {
UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(),
UiTransactionEncoding::Base64 => base64::encode(serialized),
_ => {
return Err(ClientErrorKind::Custom(format!(
"unsupported transaction encoding: {}. Supported encodings: base58, base64",
encoding
))
.into())
}
};
Ok(encoded)
}
#[derive(Debug, Default)]
pub struct GetConfirmedSignaturesForAddress2Config {
pub before: Option<Signature>,
pub until: Option<Signature>,
pub limit: Option<usize>,
pub commitment: Option<CommitmentConfig>,
}
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
progress_bar.enable_steady_tick(100);
progress_bar
}
fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
if tls {
format!("https://{}", rpc_addr)
} else {
format!("http://{}", rpc_addr)
}
}
fn parse_keyed_accounts(
accounts: Vec<RpcKeyedAccount>,
request: RpcRequest,
) -> ClientResult<Vec<(Pubkey, Account)>> {
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
let pubkey = pubkey.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Pubkey".to_string()).into(),
request,
)
})?;
pubkey_accounts.push((
pubkey,
account.decode().ok_or_else(|| {
ClientError::new_with_request(
RpcError::ParseError("Account from rpc".to_string()).into(),
request,
)
})?,
));
}
Ok(pubkey_accounts)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{client_error::ClientErrorKind, mock_sender::PUBKEY};
use assert_matches::assert_matches;
use jsonrpc_core::{futures::prelude::*, Error, IoHandler, Params};
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
use serde_json::Number;
use solana_sdk::{
instruction::InstructionError, signature::Keypair, system_transaction,
transaction::TransactionError,
};
use std::{io, sync::mpsc::channel, thread};
#[test]
fn test_send() {
_test_send();
}
#[tokio::test(flavor = "current_thread")]
#[should_panic(expected = "can call blocking only when running on the multi-threaded runtime")]
async fn test_send_async_current_thread_should_panic() {
_test_send();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_send_async_multi_thread() {
_test_send();
}
fn _test_send() {
let (sender, receiver) = channel();
thread::spawn(move || {
let rpc_addr = "0.0.0.0:0".parse().unwrap();
let mut io = IoHandler::default();
// Successful request
io.add_method("getBalance", |_params: Params| {
future::ok(Value::Number(Number::from(50)))
});
// Failed request
io.add_method("getRecentBlockhash", |params: Params| {
if params != Params::None {
future::err(Error::invalid_request())
} else {
future::ok(Value::String(
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx".to_string(),
))
}
});
let server = ServerBuilder::new(io)
.threads(1)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.start_http(&rpc_addr)
.expect("Unable to start RPC server");
sender.send(*server.address()).unwrap();
server.wait();
});
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance: u64 = rpc_client
.send(
RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
)
.unwrap();
assert_eq!(balance, 50);
let blockhash: String = rpc_client
.send(RpcRequest::GetRecentBlockhash, Value::Null)
.unwrap();
assert_eq!(blockhash, "deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
// Send erroneous parameter
let blockhash: ClientResult<String> =
rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"]));
assert!(blockhash.is_err());
}
#[test]
fn test_send_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let signature = rpc_client.send_transaction(&tx);
assert_eq!(signature.unwrap(), tx.signatures[0]);
let rpc_client = RpcClient::new_mock("fails".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
// Test bad signature returned from rpc node
let rpc_client = RpcClient::new_mock("malicious".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
}
#[test]
fn test_get_recent_blockhash() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let expected_blockhash: Hash = PUBKEY.parse().unwrap();
let (blockhash, _fee_calculator) = rpc_client.get_recent_blockhash().expect("blockhash ok");
assert_eq!(blockhash, expected_blockhash);
let rpc_client = RpcClient::new_mock("fails".to_string());
assert!(rpc_client.get_recent_blockhash().is_err());
}
#[test]
fn test_custom_request() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let slot = rpc_client.get_slot().unwrap();
assert_eq!(slot, 0);
let custom_slot = rpc_client
.send::<Slot>(RpcRequest::Custom { method: "getSlot" }, Value::Null)
.unwrap();
assert_eq!(slot, custom_slot);
}
#[test]
fn test_get_signature_status() {
let signature = Signature::default();
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, Some(Ok(())));
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, None);
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, Some(Err(TransactionError::AccountInUse)));
}
#[test]
fn test_send_and_confirm_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&tx);
result.unwrap();
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
assert!(result.is_err());
let rpc_client = RpcClient::new_mock("instruction_error".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
assert_matches!(
result.unwrap_err().kind(),
ClientErrorKind::TransactionError(TransactionError::InstructionError(
0,
InstructionError::UninitializedAccount
))
);
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
if let ClientErrorKind::Io(err) = result.unwrap_err().kind() {
assert_eq!(err.kind(), io::ErrorKind::Other);
}
}
#[test]
fn test_rpc_client_thread() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
thread::spawn(move || rpc_client);
}
}
| {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
)?;
let confirmations = result.value[0]
.clone()
.ok_or_else(|| {
ClientError::new_with_request(
ClientErrorKind::Custom("signature not found".to_string()),
RpcRequest::GetSignatureStatuses,
)
})?
.confirmations
.unwrap_or(MAX_LOCKOUT_HISTORY + 1);
Ok(confirmations)
} |
ISwaggerResponses.ts | import {IResponseOptions} from "@tsed/common";
import {Header, Schema} from "swagger-schema-official";
declare global {
namespace TsED {
interface ResponseHeader extends Header {}
interface ResponseOptions {
description: string;
schema?: Schema;
examples?: {[exampleName: string]: {}};
}
}
} | /**
* @deprecated
*/
export interface ISwaggerResponses extends IResponseOptions {} | |
broker.go | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"time"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/pkg/apis"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// BrokerOption enables further configuration of a Broker.
type BrokerOption func(*v1alpha1.Broker)
// NewBroker creates a Broker with BrokerOptions.
func NewBroker(name, namespace string, o ...BrokerOption) *v1alpha1.Broker {
b := &v1alpha1.Broker{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
}
for _, opt := range o {
opt(b)
}
b.SetDefaults(context.Background())
return b
}
// WithInitBrokerConditions initializes the Broker's conditions.
func WithInitBrokerConditions(b *v1alpha1.Broker) |
func WithBrokerDeletionTimestamp(b *v1alpha1.Broker) {
t := metav1.NewTime(time.Unix(1e9, 0))
b.ObjectMeta.SetDeletionTimestamp(&t)
}
// WithBrokerChannelProvisioner sets the Broker's ChannelTemplate provisioner.
func WithBrokerChannelProvisioner(provisioner *corev1.ObjectReference) BrokerOption {
return func(b *v1alpha1.Broker) {
b.Spec.ChannelTemplate = &v1alpha1.ChannelSpec{
Provisioner: provisioner,
}
}
}
// WithBrokerAddress sets the Broker's address.
func WithBrokerAddress(address string) BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.SetAddress(&apis.URL{
Scheme: "http",
Host: address,
})
}
}
// WithBrokerReady sets .Status to ready.
func WithBrokerReady(b *v1alpha1.Broker) {
b.Status = *v1alpha1.TestHelper.ReadyBrokerStatus()
}
// WithTriggerChannelFailed calls .Status.MarkTriggerChannelFailed on the Broker.
func WithTriggerChannelFailed(reason, msg string) BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.MarkTriggerChannelFailed(reason, msg)
}
}
// WithFilterFailed calls .Status.MarkFilterFailed on the Broker.
func WithFilterFailed(reason, msg string) BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.MarkFilterFailed(reason, msg)
}
}
// WithIngressFailed calls .Status.MarkIngressFailed on the Broker.
func WithIngressFailed(reason, msg string) BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.MarkIngressFailed(reason, msg)
}
}
// WithIngressChannelFailed calls .Status.MarkIngressChannelFailed on the Broker.
func WithIngressChannelFailed(reason, msg string) BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.MarkIngressChannelFailed(reason, msg)
}
}
// WithTriggerChannelReady calls .Status.PropagateTriggerChannelReadiness on the Broker.
func WithTriggerChannelReady() BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.PropagateTriggerChannelReadiness(v1alpha1.TestHelper.ReadyChannelStatus())
}
}
func WithFilterDeploymentAvailable() BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.PropagateFilterDeploymentAvailability(v1alpha1.TestHelper.AvailableDeployment())
}
}
func WithIngressDeploymentAvailable() BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.PropagateIngressDeploymentAvailability(v1alpha1.TestHelper.AvailableDeployment())
}
}
func WithBrokerIngressChannelReady() BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.PropagateIngressChannelReadiness(v1alpha1.TestHelper.ReadyChannelStatus())
}
}
func WithBrokerIngressSubscriptionFailed(reason, msg string) BrokerOption {
return func(b *v1alpha1.Broker) {
b.Status.MarkIngressSubscriptionFailed(reason, msg)
}
}
| {
b.Status.InitializeConditions()
} |
invitation.rs | use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use rocket::response::status::Created;
use rocket::State;
use rocket_contrib::json::Json;
use super::super::config_parser::Config;
use super::super::guards::bearer;
use super::super::guards::bearer::Claims;
use super::super::guards::permission::Permissions;
use super::super::models::invitation;
use super::super::models::invitation::Invitation;
use super::super::models::resource::{ActionType, ResourceType};
use super::super::storage::Database;
use super::Error;
#[post("/users/<user_id>/invitations")]
pub fn create_invitation(
db: State<Database>,
user_id: i64,
claims: Claims,
permissins: Permissions,
) -> Result<Created<Json<Invitation>>, Error> {
if permissins.contains(ResourceType::Application, ActionType::CREATE) {
if claims.uid == user_id {
let pg_conn = db.get_conn()?;
let code: String = thread_rng().sample_iter(&Alphanumeric).take(30).collect();
let new_invitation = invitation::create(&*pg_conn, claims.role_id, user_id, &code)?;
let url = String::from("/invitations");
Ok(Created(url, Some(Json(new_invitation))))
} else {
Err(Error::Privilege)
}
} else {
Err(Error::Forbidden)
}
}
#[get("/users/<user_id>/invitations")]
pub fn select_invitations(
db: State<Database>,
user_id: i64,
claims: Claims,
permissins: Permissions,
) -> Result<Json<Vec<Invitation>>, Error> {
if permissins.contains(ResourceType::Application, ActionType::SELECT) {
if claims.uid == user_id {
let pg_conn = db.get_conn()?;
let invitations = invitation::select(&*pg_conn, user_id)?;
Ok(Json(invitations))
} else {
Err(Error::Privilege)
}
} else {
Err(Error::Forbidden)
}
}
#[delete("/users/<user_id>/invitations/<invitation_id>")]
pub fn remove_invitation(
db: State<Database>,
user_id: i64,
invitation_id: i64,
claims: Claims,
permissins: Permissions,
) -> Result<Json<Invitation>, Error> | {
if permissins.contains(ResourceType::Application, ActionType::DELETE) {
if claims.uid == user_id {
let pg_conn = db.get_conn()?;
let removed_invitation = invitation::remove(&*pg_conn, invitation_id, user_id)?;
Ok(Json(removed_invitation))
} else {
Err(Error::Privilege)
}
} else {
Err(Error::Forbidden)
}
} |
|
CrissCrossResNet2D.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from einops import rearrange, repeat
class CrissCrossAttention(nn.Module):
def | (self, in_dim):
super(CrissCrossAttention, self).__init__()
self.query_conv = nn.Conv2d(in_channels=in_dim,
out_channels=in_dim // 8,
kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim,
out_channels=in_dim // 8,
kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim,
out_channels=in_dim,
kernel_size=1)
self.softmax = nn.Softmax(dim=3)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
device = x.device
b, _, h, w = x.shape
q = self.query_conv(x)
q_h = rearrange(q, "b c h w -> (b w) h c")
q_w = rearrange(q, "b c h w -> (b h) w c")
k = self.key_conv(x)
k_h = rearrange(k, "b c h w -> (b w) c h")
k_w = rearrange(k, "b c h w -> (b h) c w")
v = self.value_conv(x)
v_h = rearrange(v, "b c h w -> (b w) c h")
v_w = rearrange(v, "b c h w -> (b h) c w")
inf = repeat(torch.diag(
torch.tensor(float("-inf"), device=device).repeat(h), 0),
"h1 h2 -> (b w) h1 h2",
b=b,
w=w)
e_h = rearrange(torch.bmm(q_h, k_h) + inf,
"(b w) h1 h2 -> b h1 w h2",
b=b)
e_w = rearrange(torch.bmm(q_w, k_w), "(b h) w1 w2 -> b h w1 w2", b=b)
attn = self.softmax(torch.cat([e_h, e_w], 3))
attn_h, attn_w = attn.chunk(2, dim=-1)
attn_h = rearrange(attn_h, "b h1 w h2 -> (b w) h1 h2")
attn_w = rearrange(attn_w, "b h w1 w2 -> (b h) w1 w2")
out_h = torch.bmm(v_h, rearrange(attn_h, "bw h1 h2 -> bw h2 h1"))
out_h = rearrange(out_h, "(b w) c h -> b c h w", b=b)
out_w = torch.bmm(v_w, rearrange(attn_w, "bh w1 w2 -> bh w2 w1"))
out_w = rearrange(out_w, "(b h) c w -> b c h w", b=b)
return_attn = torch.stack([
rearrange(attn_h, "(b w) h1 h2 -> b h2 h1 w", b=b),
rearrange(attn_w, "(b h) w1 w2 -> b w2 h w1", b=b)
],
dim=1)
return self.gamma * (out_h + out_w) + x, return_attn
class RCCAModule(nn.Module):
def __init__(self, in_channels, kernel_size=3, return_attn=False):
super(RCCAModule, self).__init__()
self.return_attn = return_attn
inter_channels = in_channels // 4
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels,
inter_channels,
kernel_size=(kernel_size, kernel_size),
stride=(1, 1),
padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),
bias=False), nn.BatchNorm2d(inter_channels), nn.ReLU())
self.cca = CrissCrossAttention(inter_channels)
self.conv2 = nn.Sequential(
nn.Conv2d(inter_channels,
in_channels,
kernel_size=(kernel_size, kernel_size),
stride=(1, 1),
padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),
bias=False), nn.BatchNorm2d(in_channels), nn.ReLU())
def forward(self, x):
output = self.conv1(x)
attns = []
for _ in range(2):
output, attn = checkpoint(self.cca, output)
attns.append(attn)
output = self.conv2(output)
if self.return_attn:
return output, attns
else:
return output
| __init__ |
tr-TR.js | $.extend(true,$.summernote.lang,{
'tr-TR':{ /* Turkish */
imageAttributes:{
dialogTitle: 'Resim Özellikleri',
tooltip: 'Resim Özellikleri',
tabImage: 'Yükleme',
src: 'Kaynak',
browse: 'Araştır',
title: 'Başlık',
alt: 'Alt. Metin',
dimensions: 'boyutlar',
tabAttributes: 'Öznitellikler',
class: 'Sınıf',
style: 'Stil',
role: 'Rol',
tabLink: 'bağlantı',
linkHref: 'URL',
linkTarget: 'Hedef',
linkTargetInfo: 'Seçenekler: _self, _blank, _top, _parent',
linkClass: 'Sınıf',
linkStyle: 'Stil',
linkRel: 'Rol',
linkRelInfo: 'Seçenekler: alternate, author, bookmark, help, license, next, nofollow, noreferrer, prefetch, prev, search, tag',
linkRole: 'Rol',
tabUpload: 'Yükleme',
upload: 'Yükleme',
tabBrowse: 'Araştır',
editBtn: 'tamam'
} | }
}); |
|
main.rs | fn main() | {
println!("Hello rrig!");
} |
|
version_mediatypes.py | # coding: utf-8
import pprint
import re
import six
class VersionMediatypes:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'base': 'str'
}
attribute_map = {
'type': 'type',
'base': 'base'
}
def __init__(self, type=None, base=None):
"""VersionMediatypes - a model defined in huaweicloud sdk"""
self._type = None
self._base = None
self.discriminator = None
self.type = type
self.base = base
@property
def type(self):
"""Gets the type of this VersionMediatypes.
媒体类型。
:return: The type of this VersionMediatypes.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this VersionMediatypes.
媒体类型。
:param type: The type of this VersionMediatypes.
:type: str
"""
self._type = type
@property
def base(self):
"""Gets the base of this VersionMediatypes.
基础类型。
:return: The base of this VersionMediatypes.
:rtype: str
"""
return self._base
@base.setter
def base(self, base):
"""Sets the base of this VersionMediatypes.
基础类型。
:param base: The base of this VersionMediatypes.
:type: str
"""
self._base = base
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` a | nt`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VersionMediatypes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| nd `ppri |
utils.py | from datetime import datetime
def validate_dt(date):
""" Validate datetime string
:param date: The datetime string
:type date: str
:returns: True if the date is correct datetime string,
False otherwise
:rtype: bool
"""
pattern = '000101000000'
# letters in date
if not date.isdecimal():
return False
# at least year must be specified
if len(date) < 2 or len(date) > 12:
return False
if len(date) % 2 > 0:
return False
chk = date + pattern[len(date):]
try:
datetime.strptime(chk, '%y%m%d%H%M%S')
except ValueError:
return False
return True
if __name__ == '__main__':
| print('\nDate Validator Check --- START')
print('------------------------------\n')
dates = [
'99', '1312', '010212', '200229', '131024122203', '0',
'03014', '01021312121222', '201301', '200230', '310131271212'
]
for date in dates:
print('%-15s - %s' % (date,
'valid' if validate_dt(date) else 'invalid'))
print('\n----------------------------')
print('Date Validator Check --- END\n') |
|
queries.ts | import { attributeFragment } from "@saleor/attributes/queries";
import makeQuery from "@saleor/hooks/makeQuery";
import gql from "graphql-tag";
import { pageInfoFragment, TypedQuery } from "../queries";
import { ProductTypeCreateData } from "./types/ProductTypeCreateData";
import {
ProductTypeDetails,
ProductTypeDetailsVariables
} from "./types/ProductTypeDetails";
import {
ProductTypeList,
ProductTypeListVariables
} from "./types/ProductTypeList";
export const productTypeFragment = gql`
fragment ProductTypeFragment on ProductType {
id
name
hasVariants
isShippingRequired
taxType {
description
taxCode
}
} | ${attributeFragment}
${productTypeFragment}
fragment ProductTypeDetailsFragment on ProductType {
...ProductTypeFragment
productAttributes {
...AttributeFragment
}
variantAttributes {
...AttributeFragment
}
weight {
unit
value
}
}
`;
export const productTypeListQuery = gql`
${pageInfoFragment}
${productTypeFragment}
query ProductTypeList(
$after: String
$before: String
$first: Int
$last: Int
$filter: ProductTypeFilterInput
$sort: ProductTypeSortingInput
) {
productTypes(
after: $after
before: $before
first: $first
last: $last
filter: $filter
sortBy: $sort
) {
edges {
node {
...ProductTypeFragment
}
}
pageInfo {
...PageInfoFragment
}
}
}
`;
export const useProductTypeListQuery = makeQuery<
ProductTypeList,
ProductTypeListVariables
>(productTypeListQuery);
export const productTypeDetailsQuery = gql`
${productTypeDetailsFragment}
query ProductTypeDetails($id: ID!) {
productType(id: $id) {
...ProductTypeDetailsFragment
}
shop {
defaultWeightUnit
}
taxTypes {
taxCode
description
}
}
`;
export const TypedProductTypeDetailsQuery = TypedQuery<
ProductTypeDetails,
ProductTypeDetailsVariables
>(productTypeDetailsQuery);
export const productTypeCreateDataQuery = gql`
query ProductTypeCreateData {
shop {
defaultWeightUnit
}
taxTypes {
taxCode
description
}
}
`;
export const TypedProductTypeCreateDataQuery = TypedQuery<
ProductTypeCreateData,
{}
>(productTypeCreateDataQuery); | `;
export const productTypeDetailsFragment = gql` |
mail.py | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from datetime import datetime
import logging
import json
with open('config.json', 'r') as config:
params = json.load(config)["params"]
# params
log_dir = params["log_dir"]
def | (Mail_sender_id, Mail_sender_password, receiver_mail_id, message_to_send, company, file):
try:
#The mail addresses and password
sender_address = Mail_sender_id
sender_pass = Mail_sender_password
receiver_address = receiver_mail_id
mail_content = message_to_send
#Setup the MIME
message = MIMEMultipart()
message['From'] = sender_address
message['To'] = receiver_address
message['Subject'] = f'Information regarding {company}'
#The subject line
#The body and the attachments for the mail
message.attach(MIMEText(mail_content, 'plain'))
attach_file_name = file
attach_file = open(attach_file_name, 'rb') # Open the file as binary mode
payload = MIMEBase('application', 'octate-stream')
payload.set_payload((attach_file).read())
encoders.encode_base64(payload) #encode the attachment
#add payload header with filename
payload.add_header('Content-Decomposition', 'attachment', filename=attach_file_name)
message.attach(payload)
#Create SMTP session for sending the mail
session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port
session.starttls() #enable security
session.login(sender_address, sender_pass) #login with mail_id and password
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
session.quit()
logging.basicConfig(filename=log_dir + 'clean_log_' + datetime.today().strftime("%d_%m_%Y"), filemode='w', format='%(asctime)s - %(message)s', level=logging.INFO)
logging.warning('Mail Sent')
print('Mail Sent')
except Exception as e:
print(f'Could not send mail : {e}')
logging.warning(f'Mail could not Sent {e}') | send_mail |
rem.rs | use std::ops::Rem;
use num_traits::{CheckedRem, NumCast};
use crate::datatypes::PrimitiveType;
use crate::{
array::{Array, PrimitiveArray},
compute::{
arithmetics::{ArrayCheckedRem, ArrayRem},
arity::{binary, binary_checked, unary, unary_checked},
},
};
use strength_reduce::{
StrengthReducedU16, StrengthReducedU32, StrengthReducedU64, StrengthReducedU8,
};
use super::NativeArithmetics;
/// Remainder of two primitive arrays with the same type.
/// Panics if the divisor is zero of one pair of values overflows.
///
/// # Examples
/// ```
/// use arrow2::compute::arithmetics::basic::rem;
/// use arrow2::array::Int32Array;
///
/// let a = Int32Array::from(&[Some(10), Some(7)]);
/// let b = Int32Array::from(&[Some(5), Some(6)]);
/// let result = rem(&a, &b);
/// let expected = Int32Array::from(&[Some(0), Some(1)]);
/// assert_eq!(result, expected)
/// ```
pub fn rem<T>(lhs: &PrimitiveArray<T>, rhs: &PrimitiveArray<T>) -> PrimitiveArray<T>
where
T: NativeArithmetics + Rem<Output = T>,
{
binary(lhs, rhs, lhs.data_type().clone(), |a, b| a % b)
}
/// Checked remainder of two primitive arrays. If the result from the remainder
/// overflows, the result for the operation will change the validity array
/// making this operation None
///
/// # Examples
/// ```
/// use arrow2::compute::arithmetics::basic::checked_rem;
/// use arrow2::array::Int8Array;
///
/// let a = Int8Array::from(&[Some(-100i8), Some(10i8)]);
/// let b = Int8Array::from(&[Some(100i8), Some(0i8)]);
/// let result = checked_rem(&a, &b);
/// let expected = Int8Array::from(&[Some(-0i8), None]);
/// assert_eq!(result, expected);
/// ```
pub fn checked_rem<T>(lhs: &PrimitiveArray<T>, rhs: &PrimitiveArray<T>) -> PrimitiveArray<T>
where
T: NativeArithmetics + CheckedRem<Output = T>,
{
let op = move |a: T, b: T| a.checked_rem(&b);
binary_checked(lhs, rhs, lhs.data_type().clone(), op)
}
impl<T> ArrayRem<PrimitiveArray<T>> for PrimitiveArray<T>
where
T: NativeArithmetics + Rem<Output = T>,
{
fn rem(&self, rhs: &PrimitiveArray<T>) -> Self {
rem(self, rhs)
}
}
impl<T> ArrayCheckedRem<PrimitiveArray<T>> for PrimitiveArray<T>
where
T: NativeArithmetics + CheckedRem<Output = T>,
{
fn checked_rem(&self, rhs: &PrimitiveArray<T>) -> Self |
}
/// Remainder a primitive array of type T by a scalar T.
/// Panics if the divisor is zero.
///
/// # Examples
/// ```
/// use arrow2::compute::arithmetics::basic::rem_scalar;
/// use arrow2::array::Int32Array;
///
/// let a = Int32Array::from(&[None, Some(6), None, Some(7)]);
/// let result = rem_scalar(&a, &2i32);
/// let expected = Int32Array::from(&[None, Some(0), None, Some(1)]);
/// assert_eq!(result, expected)
/// ```
pub fn rem_scalar<T>(lhs: &PrimitiveArray<T>, rhs: &T) -> PrimitiveArray<T>
where
T: NativeArithmetics + Rem<Output = T> + NumCast,
{
let rhs = *rhs;
match T::PRIMITIVE {
PrimitiveType::UInt64 => {
let lhs = lhs.as_any().downcast_ref::<PrimitiveArray<u64>>().unwrap();
let rhs = rhs.to_u64().unwrap();
let reduced_rem = StrengthReducedU64::new(rhs);
// small hack to avoid a transmute of `PrimitiveArray<u64>` to `PrimitiveArray<T>`
let r = unary(lhs, |a| a % reduced_rem, lhs.data_type().clone());
(&r as &dyn Array)
.as_any()
.downcast_ref::<PrimitiveArray<T>>()
.unwrap()
.clone()
}
PrimitiveType::UInt32 => {
let lhs = lhs.as_any().downcast_ref::<PrimitiveArray<u32>>().unwrap();
let rhs = rhs.to_u32().unwrap();
let reduced_rem = StrengthReducedU32::new(rhs);
let r = unary(lhs, |a| a % reduced_rem, lhs.data_type().clone());
// small hack to avoid an unsafe transmute of `PrimitiveArray<u64>` to `PrimitiveArray<T>`
(&r as &dyn Array)
.as_any()
.downcast_ref::<PrimitiveArray<T>>()
.unwrap()
.clone()
}
PrimitiveType::UInt16 => {
let lhs = lhs.as_any().downcast_ref::<PrimitiveArray<u16>>().unwrap();
let rhs = rhs.to_u16().unwrap();
let reduced_rem = StrengthReducedU16::new(rhs);
let r = unary(lhs, |a| a % reduced_rem, lhs.data_type().clone());
// small hack to avoid an unsafe transmute of `PrimitiveArray<u16>` to `PrimitiveArray<T>`
(&r as &dyn Array)
.as_any()
.downcast_ref::<PrimitiveArray<T>>()
.unwrap()
.clone()
}
PrimitiveType::UInt8 => {
let lhs = lhs.as_any().downcast_ref::<PrimitiveArray<u8>>().unwrap();
let rhs = rhs.to_u8().unwrap();
let reduced_rem = StrengthReducedU8::new(rhs);
let r = unary(lhs, |a| a % reduced_rem, lhs.data_type().clone());
// small hack to avoid an unsafe transmute of `PrimitiveArray<u8>` to `PrimitiveArray<T>`
(&r as &dyn Array)
.as_any()
.downcast_ref::<PrimitiveArray<T>>()
.unwrap()
.clone()
}
_ => unary(lhs, |a| a % rhs, lhs.data_type().clone()),
}
}
/// Checked remainder of a primitive array of type T by a scalar T. If the
/// divisor is zero then the validity array is changed to None.
///
/// # Examples
/// ```
/// use arrow2::compute::arithmetics::basic::checked_rem_scalar;
/// use arrow2::array::Int8Array;
///
/// let a = Int8Array::from(&[Some(-100i8)]);
/// let result = checked_rem_scalar(&a, &100i8);
/// let expected = Int8Array::from(&[Some(0i8)]);
/// assert_eq!(result, expected);
/// ```
pub fn checked_rem_scalar<T>(lhs: &PrimitiveArray<T>, rhs: &T) -> PrimitiveArray<T>
where
T: NativeArithmetics + CheckedRem<Output = T>,
{
let rhs = *rhs;
let op = move |a: T| a.checked_rem(&rhs);
unary_checked(lhs, op, lhs.data_type().clone())
}
impl<T> ArrayRem<T> for PrimitiveArray<T>
where
T: NativeArithmetics + Rem<Output = T> + NumCast,
{
fn rem(&self, rhs: &T) -> Self {
rem_scalar(self, rhs)
}
}
impl<T> ArrayCheckedRem<T> for PrimitiveArray<T>
where
T: NativeArithmetics + CheckedRem<Output = T>,
{
fn checked_rem(&self, rhs: &T) -> Self {
checked_rem_scalar(self, rhs)
}
}
| {
checked_rem(self, rhs)
} |
inprocstream.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package scc
import (
"errors"
"fmt"
pb "github.com/mcc-github/blockchain-protos-go/peer"
)
type SendPanicFailure string
func (e SendPanicFailure) Error() string {
return fmt.Sprintf("send failure %s", string(e))
}
type inProcStream struct {
recv <-chan *pb.ChaincodeMessage
send chan<- *pb.ChaincodeMessage
}
func newInProcStream(recv <-chan *pb.ChaincodeMessage, send chan<- *pb.ChaincodeMessage) *inProcStream {
return &inProcStream{recv, send}
}
func (s *inProcStream) Send(msg *pb.ChaincodeMessage) (err error) {
defer func() {
if r := recover(); r != nil {
err = SendPanicFailure(fmt.Sprintf("%s", r))
return
}
}()
s.send <- msg
return
}
func (s *inProcStream) Recv() (*pb.ChaincodeMessage, error) {
msg, ok := <-s.recv
if !ok |
return msg, nil
}
func (s *inProcStream) CloseSend() error {
return nil
}
| {
return nil, errors.New("channel is closed")
} |
test_blue_analyse.py | # These tests check that the Blue Monitor action is working vs Abstract Red Actions.
# tests need to check that a range of inputs result in the correct changes to the state and return the correct obs
# tests should establish varying environmental states that results in these actions performing differently
import inspect
from CybORG import CybORG
from CybORG.Shared.Actions import DiscoverRemoteSystems, DiscoverNetworkServices, ExploitRemoteService, \
PrivilegeEscalate, Impact, Analyse
from CybORG.Shared.Enums import TrinaryEnum, FileType, Path
from CybORG.Tests.EphemeralPort import Win2008EphemeralPort, LinuxEphemeralPort
from CybORG.Tests.test_sim.test_Acceptance.test_reward_function import security_values, availability_values
import pytest
from CybORG.Agents import B_lineAgent
@pytest.mark.skip()
def test_blue_analyse_on_red_killchain(security_values,availability_values):
# create cyborg environment
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1b.yaml'
cyborg = CybORG(path, 'sim')
# Run blue monitor with no red activity.
blue_session = cyborg.get_observation('Blue')['Defender']['Sessions'][0]['ID']
blue_hosts = cyborg.get_action_space('Blue')['hostname']
def blue_analyse(expected_result, host, reward=0.0):
blue_action = Analyse(session=blue_session, agent='Blue', hostname=host)
results = cyborg.step('Blue', blue_action)
assert not results.done
assert results.reward == round(reward, 1)
assert results.observation == expected_result
windows_reverse_shell = {'Density': 0.9,
'File Name': 'cmd.exe',
'Known File': FileType.UNKNOWN,
'Known Path': Path.TEMP,
'Path': 'C:\\temp\\',
'Signed': False}
linux_reverse_shell = {'Density': 0.9,
'File Name': 'cmd.sh',
'Known File': FileType.UNKNOWN,
'Known Path': Path.TEMP,
'Path': '/tmp/',
'Signed': False}
windows_potato_exploit = {'Density': 0.9,
'File Name': 'escalate.exe',
'Known File': FileType.UNKNOWN,
'Known Path': Path.TEMP,
'Path': 'C:\\temp\\',
'Signed': False}
linux_kernel_exploit = {'Density': 0.9,
'File Name': 'escalate.sh',
'Known File': FileType.UNKNOWN,
'Known Path': Path.TEMP,
'Path': '/tmp/',
'Signed': False}
# test discover remote systems
# act on all subnets in action space
action_space = cyborg.get_action_space('Red')
initial_observation = cyborg.get_observation('Red')
session = list(action_space['session'].keys())[0]
for subnet in action_space["subnet"]:
# ping sweep leaves no artifacts for scenario1b
expected_observation = {'success': TrinaryEnum.TRUE}
action = DiscoverRemoteSystems(subnet=subnet, agent='Red', session=session)
cyborg.step(action=action, agent='Red')
# test if output of observation matches expected output
for host in blue_hosts:
blue_analyse(expected_observation, host)
# test discover network services
# act on ip addresses discovered in first subnet
addresses = [i for i in action_space["ip_address"]]
attacker_address = cyborg.environment_controller.hostname_ip_map['User0']
for address in addresses:
expected_observation = {'success': TrinaryEnum.TRUE}
action = DiscoverNetworkServices(agent='Red', ip_address=address, session=session)
cyborg.step(action=action, agent='Red')
# test if output of observation matches expected output
for host in blue_hosts:
blue_analyse(expected_observation, host)
# test exploit action by acting on all known ip_addresses
# own all hosts on the client subnet
reward = 0
for address in addresses:
action = ExploitRemoteService(agent='Red', ip_address=address, session=session)
cyborg.step(action=action, agent='Red')
# test if output of observation matches expected output
hostname = {v: i for i, v in cyborg.environment_controller.hostname_ip_map.items()}[address]
if not action_space["ip_address"][address]:
expected_observation = {'success': TrinaryEnum.TRUE}
elif address == cyborg.environment_controller.hostname_ip_map['User0'] or address == \
cyborg.environment_controller.hostname_ip_map['User1'] or address == \
cyborg.environment_controller.hostname_ip_map['User2']:
expected_observation = {hostname: {'Files': [windows_reverse_shell]},
'success': TrinaryEnum.TRUE}
reward += security_values[hostname][0]
elif address == cyborg.environment_controller.hostname_ip_map['User3'] or address == \
cyborg.environment_controller.hostname_ip_map['User4']:
expected_observation = {hostname: {'Files': [linux_reverse_shell]},
'success': TrinaryEnum.TRUE}
else:
raise NotImplementedError
blue_analyse(expected_observation, hostname, reward)
# priv esc on all hosts in client subnet
action_space = cyborg.get_action_space('Red')
hostnames = action_space['hostname']
count = 2
for hostname in hostnames:
action = PrivilegeEscalate(agent='Red', hostname=hostname, session=session)
cyborg.step(action=action, agent='Red')
if hostnames[hostname]:
last_action = cyborg.get_last_action('Red')
assert last_action == action
# test if output of observation matches expected output
if 'User' in hostname:
if '0' in hostname or '1' in hostname or '2' in hostname:
expected_observation = {hostname: {'Files': [windows_reverse_shell]},
'success': TrinaryEnum.TRUE}
else:
expected_observation = {hostname: {'Files': [linux_reverse_shell,
linux_kernel_exploit]},
'success': TrinaryEnum.TRUE}
reward += security_values[hostname][1]
else:
expected_observation = {'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, host=hostname, reward=reward)
# recon hosts in enterprise subnet
action_space = cyborg.get_action_space('Red')
addresses = [i for i in action_space["ip_address"]]
for address in addresses:
if not action_space["ip_address"][address] \
or address == cyborg.environment_controller.hostname_ip_map['User0'] \
or address == cyborg.environment_controller.hostname_ip_map['User1'] \
or address == cyborg.environment_controller.hostname_ip_map['User2'] \
or address == cyborg.environment_controller.hostname_ip_map['User3'] \
or address == cyborg.environment_controller.hostname_ip_map['User4']:
continue
else:
expected_observation = {'success': TrinaryEnum.TRUE}
hostname = {v: i for i, v in cyborg.environment_controller.hostname_ip_map.items()}[address]
action = DiscoverNetworkServices(agent='Red', ip_address=address, session=session)
result = cyborg.step(action=action, agent='Red')
# test if output of observation matches expected output
blue_analyse(expected_observation, hostname, reward)
# exploit hosts in enterprise subnet
action_space = cyborg.get_action_space('Red')
addresses = [i for i in action_space["ip_address"]]
for address in addresses:
hostname = {v: i for i, v in cyborg.environment_controller.hostname_ip_map.items()}[address]
if not action_space["ip_address"][address] \
or address == cyborg.environment_controller.hostname_ip_map['User0'] \
or address == cyborg.environment_controller.hostname_ip_map['User1'] \
or address == cyborg.environment_controller.hostname_ip_map['User2'] \
or address == cyborg.environment_controller.hostname_ip_map['User3'] \
or address == cyborg.environment_controller.hostname_ip_map['User4']:
continue
elif address == cyborg.environment_controller.hostname_ip_map['Enterprise0']:
expected_observation = {'success': TrinaryEnum.TRUE}
count += 1
elif address == cyborg.environment_controller.hostname_ip_map['Enterprise1']:
expected_observation = {hostname: {'Files': [windows_reverse_shell]},
'success': TrinaryEnum.TRUE}
count += 1
else:
raise ValueError
action = ExploitRemoteService(agent='Red', ip_address=address, session=session)
cyborg.step(action=action, agent='Red')
# test if output of observation matches expected output
blue_analyse(expected_observation, hostname, reward)
# priv esc hosts in enterprise subnet
enterprise_subnet = cyborg.environment_controller.subnet_cidr_map['Enterprise']
action_space = cyborg.get_action_space('Red')
hostnames = action_space['hostname']
count = 6
for hostname in hostnames:
if 'User' in hostname:
continue
action = PrivilegeEscalate(agent='Red', hostname=hostname, session=session)
result = cyborg.step(action=action, agent='Red')
# test if output of observation matches expected output
if hostname == 'Enterprise0':
expected_observation = {'Enterprise0': {'Files': [linux_kernel_exploit]},
'success': TrinaryEnum.TRUE}
reward += -1.0
elif hostname == 'Enterprise1':
expected_observation = {'Enterprise1': {'Files': [windows_reverse_shell,
windows_potato_exploit]},
'success': TrinaryEnum.TRUE}
reward += -1.0
else:
expected_observation = {'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, hostname, reward)
# scan enterprise subnet
action_space = cyborg.get_action_space('Red')
assert enterprise_subnet in action_space['subnet']
assert action_space['subnet'][enterprise_subnet]
action = DiscoverRemoteSystems(agent='Red', subnet=enterprise_subnet, session=session)
result = cyborg.step('Red', action)
expected_observation = {'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, 'Enterprise2', reward)
# scan the third enterprise host
enterprise2_ip = cyborg.environment_controller.hostname_ip_map['Enterprise2']
assert enterprise2_ip in action_space['ip_address']
assert action_space['ip_address'][enterprise2_ip]
action = DiscoverNetworkServices(agent='Red', ip_address=enterprise2_ip, session=session)
result = cyborg.step('Red', action)
expected_observation = {'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, 'Enterprise2', reward)
# exploit the 3rd enterprise host
action = ExploitRemoteService(agent='Red', ip_address=enterprise2_ip, session=session)
result = cyborg.step('Red', action)
expected_observation = {'Enterprise2': {'Files': [windows_reverse_shell]},
'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, 'Enterprise2', reward)
# priv esc on enterprise host
assert 'Enterprise2' in action_space['hostname']
assert action_space['hostname']['Enterprise2']
action = PrivilegeEscalate(agent='Red', hostname='Enterprise2', session=session)
result = cyborg.step('Red', action)
expected_observation = {'Enterprise2': {'Files': [windows_reverse_shell,
windows_potato_exploit]},
'success': TrinaryEnum.TRUE}
reward += -1.0
blue_analyse(expected_observation, 'Enterprise2', reward)
# scan the op server
op_server_ip = cyborg.environment_controller.hostname_ip_map['Op_Server0']
assert op_server_ip in action_space['ip_address']
assert action_space['ip_address'][op_server_ip]
action = DiscoverNetworkServices(agent='Red', ip_address=op_server_ip, session=session)
result = cyborg.step('Red', action)
expected_observation = {'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, 'Op_Server0', reward)
# exploit the op server
count = 9
action = ExploitRemoteService(agent='Red', ip_address=op_server_ip, session=session)
result = cyborg.step('Red', action)
expected_observation = {'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, 'Op_Server0', reward)
# priv esc on op server
action = PrivilegeEscalate(agent='Red', hostname='Op_Server0', session=session)
result = cyborg.step('Red', action)
expected_observation = {'Op_Server0': {'Files': [linux_kernel_exploit]},
'success': TrinaryEnum.TRUE}
reward += -1.0
blue_analyse(expected_observation, 'Op_Server0', reward)
action = Impact(agent='Red', hostname='Op_Server0', session=session)
result = cyborg.step('Red', action)
expected_observation = {'Op_Server0': {'Files': [linux_kernel_exploit]},
'success': TrinaryEnum.TRUE}
blue_analyse(expected_observation, 'Op_Server0', reward=reward-10.0)
@pytest.fixture()
def cyborg(request,agents = {'Red':B_lineAgent},seed = 1):
|
def test_analyse_bug_aug19(cyborg):
cyborg.reset()
for i in range(10):
action = Analyse(session=0,agent='Blue',hostname='Enterprise0')
results = cyborg.step(action=action,agent='Blue')
obs = results.observation
for hostid, host in obs.items():
if hostid == 'success':
continue
if hostid != 'Enterprise0':
assert 'Processes' in host, f'repeats: {i}'
for process in host['Processes']:
assert 'Connections' in process
| path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1b.yaml'
cyborg = CybORG(path, 'sim', agents=agents)
cyborg.set_seed(seed)
return cyborg |
clubs.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_weapon import Weapon
from ... import dice as D, material as M
class BaseClub(Weapon):
pass
class Club(BaseClub):
|
class Aklys(BaseClub):
def __init__(self):
super().__init__('aklys', weight=15,
damage=D.Dice.from_str('d3'), material=M.Iron, hit=0)
| def __init__(self):
super().__init__('club', weight=30, damage=D.Dice.from_str('d3'), material=M.Wood, hit=0) |
app.rs | use crate::vic::RASTER_LENGTH;
use crate::vic::TOTAL_HEIGHT;
use crate::C64;
use common::app::Controller;
use image::RgbaImage;
use piston::Event;
use piston::Loop;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
pub struct C64Controller {
c64: C64,
running: bool,
interrupted: Arc<AtomicBool>,
}
impl C64Controller {
pub fn new(c64: C64) -> Self |
fn run_frame(&mut self) {
if !self.running {
return;
}
for _ in 0..RASTER_LENGTH * TOTAL_HEIGHT {
if let Err(e) = self.c64.tick() {
eprintln!("ERROR: {}. C64 halted.", e);
eprintln!("{}", self.display_machine_state());
self.running = false;
return;
}
}
}
}
impl Controller for C64Controller {
fn frame_image(&self) -> &RgbaImage {
self.c64.frame_image()
}
fn reset(&mut self) {
self.c64.reset();
self.running = true;
}
fn interrupted(&self) -> Arc<AtomicBool> {
self.interrupted.clone()
}
fn event(&mut self, event: &Event) {
match event {
Event::Loop(Loop::Update(_)) => {
self.run_frame();
}
_ => {}
}
}
fn display_machine_state(&self) -> String {
format!("{}\n{}", self.c64.cpu(), self.c64.cpu().memory())
}
}
#[cfg(test)]
mod tests {}
| {
Self {
c64,
running: false,
interrupted: Arc::new(AtomicBool::new(false)),
}
} |
sort_test.go | package migrate
import (
"sort"
. "gopkg.in/check.v1"
)
type SortSuite struct{}
var _ = Suite(&SortSuite{})
func (s *SortSuite) TestSortMigrations(c *C) {
var migrations = byId([]*Migration{
&Migration{Id: "10_abc", Up: nil, Down: nil},
&Migration{Id: "120_cde", Up: nil, Down: nil},
&Migration{Id: "1_abc", Up: nil, Down: nil},
&Migration{Id: "efg", Up: nil, Down: nil},
&Migration{Id: "2_cde", Up: nil, Down: nil},
&Migration{Id: "35_cde", Up: nil, Down: nil},
&Migration{Id: "3_efg", Up: nil, Down: nil},
&Migration{Id: "4_abc", Up: nil, Down: nil},
}) | c.Assert(migrations[1].Id, Equals, "2_cde")
c.Assert(migrations[2].Id, Equals, "3_efg")
c.Assert(migrations[3].Id, Equals, "4_abc")
c.Assert(migrations[4].Id, Equals, "10_abc")
c.Assert(migrations[5].Id, Equals, "35_cde")
c.Assert(migrations[6].Id, Equals, "120_cde")
c.Assert(migrations[7].Id, Equals, "efg")
} |
sort.Sort(migrations)
c.Assert(migrations, HasLen, 8)
c.Assert(migrations[0].Id, Equals, "1_abc") |
commands.py | import click
from achilles.model import AchillesModel
from achilles.utils import get_dataset_dim
@click.option(
"--gpu",
"-g",
metavar="",
default=None,
required=False,
show_default=True,
help="SET CUDA_VISIBLE_DEVICES to train model on specific" " GPU (e.g. 0 or 0,1)",
)
@click.option(
"--gpus",
metavar="",
default=1,
required=False,
show_default=True,
help="Build the model for distributed training across multiple GPUs",
)
@click.option(
"--threads",
"-t",
metavar="",
default=2,
required=False,
show_default=True,
help="Feed batches into training function using multiple processes",
)
@click.option(
"--batch_size",
"-b",
metavar="",
default=200,
required=False,
show_default=True,
help="Batch size for training, major determinant of RAM used on GPU",
)
@click.option(
"--epochs",
"-e",
metavar="",
default=100,
required=False,
show_default=True,
help="Number of epochs to train model for",
)
@click.option(
"--loss_function",
"--loss",
metavar="",
default="binary_crossentropy",
required=False,
show_default=True,
help="Compile model with loss function for training",
)
@click.option(
"--optimizer",
metavar="",
default="adam",
required=False,
show_default=True,
help="Compile model with optimizer for training",
)
@click.option(
"--recurrent_dropout",
metavar="",
default=0.2,
required=False,
show_default=True,
help="Internal dropout applied to LSTM layers",
)
@click.option(
"--dropout",
metavar="",
default=0.2,
required=False,
show_default=True,
help="Dropout applied to LSTM layers",
)
@click.option(
"--bidirectional",
metavar="",
is_flag=True,
required=False,
show_default=True,
help="Bidirectional LSTM",
)
@click.option(
"--units",
metavar="",
default=200,
required=False,
show_default=True,
help="Number of units per LSTMs",
)
@click.option(
"--channels",
metavar="",
default=256,
required=False,
show_default=True,
help="Number channels per Residual Block",
)
@click.option(
"--lstm",
metavar="",
default=1,
required=False,
show_default=True,
help="Number of stacked LSTMs connected to Residual Blocks",
)
@click.option(
"--residual_block",
metavar="",
default=1,
required=False,
show_default=True,
help="Number of stacked ResidualBlocks in initial layers",
)
@click.option(
"--activation",
"-a",
metavar="",
default="softmax",
required=False,
show_default=True,
help="Activation function applied to final fully connected " "classification layer",
)
@click.option(
"--verbose",
"-v",
metavar="",
is_flag=True,
required=False,
show_default=True,
help="Show training progress output and model architecture in Keras",
)
@click.option(
"--load",
"-l",
metavar="",
default="",
required=False,
show_default=True,
help="Trained model weights from Keras, HDF5 to"
" continue training, or re-train model",
)
@click.option(
"--outdir",
"-o",
metavar="",
default="training_model",
required=True,
show_default=True,
help="Output directory",
)
@click.option(
"--run_id",
"-i",
metavar="",
default="model",
required=True,
show_default=True,
help="Training run ID",
)
@click.option(
"--file",
"-f",
metavar="",
default=None,
required=True,
show_default=True,
help="Input training / validation HDF5 dataset",
)
@click.command()
def | (
file,
run_id,
outdir,
load,
verbose,
activation,
residual_block,
lstm,
channels,
units,
bidirectional,
dropout,
optimizer,
loss_function,
epochs,
batch_size,
threads,
gpus,
gpu,
):
"""Train neural network classifiers in Achilles"""
achilles = AchillesModel(data_file=file)
window_size = get_dataset_dim(file)[2] # 4D tensors
if load:
achilles.load_model(load)
else:
achilles.build(
window_size=window_size,
activation=activation,
gpus=gpus,
nb_residual_block=residual_block,
nb_channels=channels,
nb_rnn=lstm,
rnn_units=units,
dropout=dropout,
bidirectional=bidirectional,
)
# Compile model with loss function and optimizer
achilles.compile(optimizer=optimizer, loss=loss_function)
achilles.train(
epochs=epochs,
batch_size=batch_size,
workers=threads,
run_id=run_id,
outdir=outdir,
verbose=verbose,
gpu=gpu,
)
| train |
collection.d.ts | import { WebElement } from 'selenium-webdriver';
import { ElementCondition } from './conditions';
import { Configuration } from './configuration';
import { Element } from './element';
import { Locator } from './locators/locator';
import { Assertable, Entity, Matchable } from './entity';
export declare class Collection extends Entity implements Assertable, Matchable {
private readonly locator;
protected readonly configuration: Configuration;
constructor(locator: Locator<Promise<WebElement[]>>, configuration: Configuration);
with(customConfig: Partial<Configuration>): Collection;
getAsCashedArray(): Promise<Element[]>;
elementAt(index: number): Element;
first(): Element; | toString(): string;
} | filteredBy(...conditions: ElementCondition[]): Collection;
elementBy(...conditions: ElementCondition[]): Element;
getWebElements(): Promise<WebElement[]>; |
error.go | package redis
import (
"context"
"io"
"net"
"strings"
"github.com/lilirui/redis/v8/internal/pool"
"github.com/lilirui/redis/v8/internal/proto"
)
// ErrClosed performs any operation on the closed client will return this error.
var ErrClosed = pool.ErrClosed
type Error interface {
error
// RedisError is a no-op function but
// serves to distinguish types that are Redis
// errors from ordinary errors: a type is a
// Redis error if it has a RedisError method.
RedisError()
}
var _ Error = proto.RedisError("")
func shouldRetry(err error, retryTimeout bool) bool {
switch err {
case io.EOF, io.ErrUnexpectedEOF:
return true
case nil, context.Canceled, context.DeadlineExceeded:
return false
}
if v, ok := err.(timeoutError); ok |
s := err.Error()
if s == "ERR max number of clients reached" {
return true
}
if strings.HasPrefix(s, "LOADING ") {
return true
}
if strings.HasPrefix(s, "READONLY ") {
return true
}
if strings.HasPrefix(s, "CLUSTERDOWN ") {
return true
}
if strings.HasPrefix(s, "TRYAGAIN ") {
return true
}
return false
}
func isRedisError(err error) bool {
_, ok := err.(proto.RedisError)
return ok
}
func isBadConn(err error, allowTimeout bool, addr string) bool {
switch err {
case nil:
return false
case context.Canceled, context.DeadlineExceeded:
return true
}
if isRedisError(err) {
switch {
case isReadOnlyError(err):
// Close connections in read only state in case domain addr is used
// and domain resolves to a different Redis Server. See #790.
return true
case isMovedSameConnAddr(err, addr):
// Close connections when we are asked to move to the same addr
// of the connection. Force a DNS resolution when all connections
// of the pool are recycled
return true
default:
return false
}
}
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
return !netErr.Temporary()
}
}
return true
}
func isMovedError(err error) (moved bool, ask bool, addr string) {
if !isRedisError(err) {
return
}
s := err.Error()
switch {
case strings.HasPrefix(s, "MOVED "):
moved = true
case strings.HasPrefix(s, "ASK "):
ask = true
default:
return
}
ind := strings.LastIndex(s, " ")
if ind == -1 {
return false, false, ""
}
addr = s[ind+1:]
return
}
func isLoadingError(err error) bool {
return strings.HasPrefix(err.Error(), "LOADING ")
}
func isReadOnlyError(err error) bool {
return strings.HasPrefix(err.Error(), "READONLY ")
}
func isMovedSameConnAddr(err error, addr string) bool {
redisError := err.Error()
if !strings.HasPrefix(redisError, "MOVED ") {
return false
}
return strings.HasSuffix(redisError, " "+addr)
}
//------------------------------------------------------------------------------
type timeoutError interface {
Timeout() bool
}
| {
if v.Timeout() {
return retryTimeout
}
return true
} |
util.go | package cmd
import (
"fmt"
"github.com/spf13/viper"
"github.com/jetstack/vault-unsealer/pkg/kv"
"github.com/jetstack/vault-unsealer/pkg/kv/aws_kms"
"github.com/jetstack/vault-unsealer/pkg/kv/aws_ssm"
"github.com/jetstack/vault-unsealer/pkg/kv/cloudkms"
"github.com/jetstack/vault-unsealer/pkg/kv/gcs"
"github.com/jetstack/vault-unsealer/pkg/vault"
)
func vaultConfigForConfig(cfg *viper.Viper) (vault.Config, error) {
return vault.Config{
KeyPrefix: "vault",
SecretShares: appConfig.GetInt(cfgSecretShares),
SecretThreshold: appConfig.GetInt(cfgSecretThreshold),
InitRootToken: appConfig.GetString(cfgInitRootToken),
StoreRootToken: appConfig.GetBool(cfgStoreRootToken),
OverwriteExisting: appConfig.GetBool(cfgOverwriteExisting),
}, nil
}
func kvStoreForConfig(cfg *viper.Viper) (kv.Service, error) {
if cfg.GetString(cfgMode) == cfgModeValueGoogleCloudKMSGCS {
g, err := gcs.New(
cfg.GetString(cfgGoogleCloudStorageBucket),
cfg.GetString(cfgGoogleCloudStoragePrefix),
)
if err != nil {
return nil, fmt.Errorf("error creating google cloud storage kv store: %s", err.Error())
}
kms, err := cloudkms.New(g,
cfg.GetString(cfgGoogleCloudKMSProject),
cfg.GetString(cfgGoogleCloudKMSLocation),
cfg.GetString(cfgGoogleCloudKMSKeyRing),
cfg.GetString(cfgGoogleCloudKMSCryptoKey),
)
if err != nil {
return nil, fmt.Errorf("error creating google cloud kms kv store: %s", err.Error())
}
return kms, nil
}
if cfg.GetString(cfgMode) == cfgModeValueAWSKMSSSM {
ssm, err := aws_ssm.New(cfg.GetString(cfgAWSSSMKeyPrefix))
if err != nil |
kms, err := aws_kms.New(ssm, cfg.GetString(cfgAWSKMSKeyID))
if err != nil {
return nil, fmt.Errorf("error creating AWS KMS ID kv store: %s", err.Error())
}
return kms, nil
}
return nil, fmt.Errorf("Unsupported backend mode: '%s'", cfg.GetString(cfgMode))
}
| {
return nil, fmt.Errorf("error creating AWS SSM kv store: %s", err.Error())
} |
rollup.config.js | // note: this will require a install of the rollup utility to process | import babel from 'rollup-plugin-babel';
import commonjs from 'rollup-plugin-commonjs';
export default {
input: 'src/index.js',
plugins: [
babel({
externalHelpers: false,
exclude : 'node_modules/**'
}),
commonjs()
],
output: [
{ file: 'dist/vuex-i18n.es.js', format: 'es' },
{ file: 'dist/vuex-i18n.cjs.js', format: 'cjs' },
{ file: 'dist/vuex-i18n.umd.js', format: 'umd', name: 'vuexI18n' }
]
}; | // our plugin code
|
prod_test_driver.rs | use ic_fondue::prod_tests::cli::CliArgs;
use ic_fondue::prod_tests::driver_setup::create_driver_context_from_cli;
use ic_fondue::prod_tests::evaluation::{evaluate, TestResult};
use ic_fondue::prod_tests::pot_dsl::*;
use ic_tests::create_subnet::{self, create_subnet_test};
use ic_tests::nns_fault_tolerance_test;
use ic_tests::nns_follow_test::{self, test as follow_test};
use ic_tests::nns_voting_test::{self, test as voting_test};
use ic_tests::node_assign_test::{self, test as node_assign_test};
use ic_tests::node_restart_test::{self, test as node_restart_test};
use ic_tests::rosetta_test;
use ic_tests::security::nns_voting_fuzzing_poc_test;
use ic_tests::token_balance_test::{self, test as token_balance_test};
use ic_tests::upgrade_reject::{self, upgrade_reject};
use ic_tests::{
basic_health_test::{self, basic_health_test},
execution,
};
use ic_tests::{
cycles_minting_test, feature_flags, nns_canister_upgrade_test, registry_authentication_test,
ssh_access_to_nodes, subnet_creation, transaction_ledger_correctness_test, wasm_generator_test,
};
use regex::Regex;
use std::collections::HashMap;
use std::time::Duration;
use structopt::StructOpt;
fn main() -> anyhow::Result<()> {
let cli_args = CliArgs::from_args();
let validated_args = cli_args.validate()?;
let mut writer = None;
if let Some(ref p) = validated_args.result_file {
let f = std::fs::OpenOptions::new()
.create(true)
.write(true)
.open(p)?;
writer = Some(std::io::BufWriter::new(Box::new(f)));
}
let mut suite = match get_test_suites().remove(&validated_args.suite) {
Some(s) => s,
None => anyhow::bail!(format!("Test suite {} is undefined", &validated_args.suite)),
};
apply_filters(
&mut suite,
&validated_args.include_pattern,
&validated_args.ignore_pattern,
&validated_args.skip_pattern,
);
let context = create_driver_context_from_cli(validated_args, get_hostname());
let result = evaluate(&context, suite);
if let Some(mut w) = writer {
serde_json::to_writer_pretty(&mut w, &result)?;
}
if result.result == TestResult::Failed {
anyhow::bail!(format!("Test suite {} failed", result.name))
} else {
Ok(())
}
}
fn get_hostname() -> Option<String> {
std::env::var("HOSTNAME").ok()
}
fn apply_filters(
suite: &mut Suite,
include: &Option<Regex>,
ignore: &Option<Regex>,
skip: &Option<Regex>,
) {
for p in suite.pots.iter_mut() {
let tests = match &mut p.testset {
TestSet::Parallel(tests) => tests,
TestSet::Sequence(tests) => tests,
};
for t in tests.iter_mut() {
let path = TestPath::new()
.join(suite.name.clone())
.join(p.name.clone())
.join(t.name.clone());
t.execution_mode = resolve_execution_mode(&format!("{}", path), include, ignore, skip);
}
// At least one test is qualified for running. A corresponding pot needs to be
// set up.
if tests.iter().any(|t| t.execution_mode == ExecutionMode::Run) {
continue;
}
// At least one test is skipped. The pot needs to be included in a summary.
if tests
.iter()
.any(|t| t.execution_mode == ExecutionMode::Skip)
{
p.execution_mode = ExecutionMode::Skip;
continue;
}
p.execution_mode = ExecutionMode::Ignore;
}
}
fn resolve_execution_mode(
name: &str,
include: &Option<Regex>,
ignore: &Option<Regex>,
skip: &Option<Regex>,
) -> ExecutionMode {
if let Some(i) = include {
if i.is_match(name) {
return ExecutionMode::Run;
}
return ExecutionMode::Ignore;
}
if let Some(i) = ignore {
if i.is_match(name) {
return ExecutionMode::Ignore;
}
}
if let Some(s) = skip {
if s.is_match(name) {
return ExecutionMode::Skip;
}
}
ExecutionMode::Run
}
fn get_test_suites() -> HashMap<String, Suite> {
let mut m = HashMap::new();
m.insert(
"pre_master".to_string(),
suite(
"pre_master",
vec![
pot(
"create_subnet",
create_subnet::config(),
par(vec![
t("create_subnet", create_subnet_test),
]),
),
execution::upgraded_pots::general_execution_pot(),
execution::upgraded_pots::cycles_restrictions_pot(),
execution::upgraded_pots::inter_canister_queries(),
execution::upgraded_pots::compute_allocation_pot(),
pot(
"node_assign_pot",
node_assign_test::config(),
par(vec![t("node_assign_test", node_assign_test)]),
),
pot(
"nns_follow_pot",
nns_follow_test::config(),
par(vec![t("follow_test", follow_test)]),
),
pot(
"nns_voting_pot",
nns_voting_test::config(),
par(vec![t("voting_test", voting_test)]),
),
pot(
"nns_token_balance_pot",
token_balance_test::config(),
par(vec![t("token_balance_test", token_balance_test)]),
),
pot(
"node_restart_pot",
node_restart_test::config(),
par(vec![t("node_restart_test", node_restart_test)]),
),
pot(
"cycles_minting_pot",
cycles_minting_test::config(),
par(vec![t("cycles_minting_test", cycles_minting_test::test)]),
),
pot(
"nns_subnet_creation_pot",
subnet_creation::config(),
par(vec![t(
"create_subnet_with_assigned_nodes_fails",
subnet_creation::create_subnet_with_assigned_nodes_fails,
)]),
),
pot(
"nns_voting_fuzzing_poc_pot",
nns_voting_fuzzing_poc_test::config(),
par(vec![t(
"nns_voting_fuzzing_poc_test",
nns_voting_fuzzing_poc_test::test,
)]),
),
pot(
"nns_canister_upgrade_pot",
nns_canister_upgrade_test::config(),
par(vec![t(
"nns_canister_upgrade_test",
nns_canister_upgrade_test::test,
)]),
),
pot(
"certified_registry_pot",
registry_authentication_test::config(),
par(vec![t(
"registry_authentication_test",
registry_authentication_test::test,
)]),
),
pot(
"transaction_ledger_correctness_pot",
transaction_ledger_correctness_test::config(),
par(vec![t(
"transaction_ledger_correctness_test",
transaction_ledger_correctness_test::test,
)]),
),
pot(
"ssh_access_to_nodes_pot",
ssh_access_to_nodes::config(),
seq(vec![
t(
"root_cannot_authenticate",
ssh_access_to_nodes::root_cannot_authenticate,
),
t(
"readonly_cannot_authenticate_without_a_key",
ssh_access_to_nodes::readonly_cannot_authenticate_without_a_key,
),
t(
"readonly_cannot_authenticate_with_random_key",
ssh_access_to_nodes::readonly_cannot_authenticate_with_random_key,
),
t(
"keys_in_the_subnet_record_can_be_updated",
ssh_access_to_nodes::keys_in_the_subnet_record_can_be_updated,
),
t(
"keys_for_unassigned_nodes_can_be_updated",
ssh_access_to_nodes::keys_for_unassigned_nodes_can_be_updated,
),
t(
"multiple_keys_can_access_one_account",
ssh_access_to_nodes::multiple_keys_can_access_one_account,
),
t(
"multiple_keys_can_access_one_account_on_unassigned_nodes",
ssh_access_to_nodes::multiple_keys_can_access_one_account_on_unassigned_nodes,
),
t(
"updating_readonly_does_not_remove_backup_keys",
ssh_access_to_nodes::updating_readonly_does_not_remove_backup_keys,
),
t(
"can_add_50_readonly_and_backup_keys",
ssh_access_to_nodes::can_add_100_readonly_and_backup_keys,
),
t(
"cannot_add_51_readonly_or_backup_keys",
ssh_access_to_nodes::cannot_add_101_readonly_or_backup_keys,
),
]),
),
],
),
);
m.insert(
"hourly".to_string(),
suite(
"hourly",
vec![
pot(
"basic_health_pot",
basic_health_test::config(),
par(vec![
t("basic_health_test", basic_health_test),
t("basic_health_test2", basic_health_test), | nns_fault_tolerance_test::config(),
par(vec![t(
"nns_fault_tolerance_test",
nns_fault_tolerance_test::test,
)]),
),
pot(
"create_subnet",
create_subnet::config(),
par(vec![t("create_subnet", create_subnet_test)]),
),
pot(
"upgrade_reject",
upgrade_reject::config(),
par(vec![t("upgrade_reject", upgrade_reject)]),
),
pot(
"basic_pot_with_all_features_enabled",
feature_flags::basic_config_with_all_features_enabled(),
par(vec![t(
"mock_ecdsa_signatures_are_supported",
feature_flags::mock_ecdsa_signatures_are_supported,
)]),
),
],
),
);
// The tests in this suite require canisters to be build prior to
// running the tests which is why we separate it out.
m.insert(
"wasm_generator".to_string(),
suite(
"wasm_generator",
vec![pot_with_time_limit(
"wasm_generator_pot",
wasm_generator_test::config(),
par(vec![t("wasm_generator_test", wasm_generator_test::test)]),
Duration::from_secs(7200),
)],
),
);
m.insert(
"rosetta".to_string(),
suite(
"rosetta",
vec![pot(
"rosetta_pot",
rosetta_test::config(),
par(vec![t(
"rosetta_test_everything",
rosetta_test::test_everything,
)]),
)],
),
);
m
} | ]),
),
pot(
"nns_fault_tolerance_pot", |
clip_test.rs | use assert_cmd::Command;
use predicates::prelude::*;
use std::fs;
#[test]
fn no_args() -> Result<(), Box<dyn std::error::Error>> {
Command::cargo_bin("rust-echo")?
.assert()
.failure()
.stderr(predicate::str::contains("USAGE:"));
Ok(())
}
/// Compare contents of test file with `stdout`
fn run(args: &[&str], file_name: &str) -> Result<(), Box<dyn std::error::Error>> {
let expected = fs::read_to_string(file_name)?;
Command::cargo_bin("rust-echo")?
.args(args)
.assert()
.success()
.stdout(expected);
Ok(())
}
#[test]
fn test1() -> Result<(), Box<dyn std::error::Error>> {
run(&["hello world"], "tests/expected/test1.txt")
}
#[test]
fn test2() -> Result<(), Box<dyn std::error::Error>> {
run(&["hello", "world"], "tests/expected/test2.txt")
}
#[test]
fn | () -> Result<(), Box<dyn std::error::Error>> {
run(&["-n", "hello", "world"], "tests/expected/test3.txt")
}
| test3 |
client.rs | //! Client for fetching container modules from OCI
use async_trait::async_trait;
use oci_distribution::client::ImageData;
use oci_distribution::manifest;
use oci_distribution::secrets::RegistryAuth;
use oci_distribution::Reference;
/// An image client capable of fetching images from a storage location
#[async_trait]
pub trait Client {
/// Fetch the image data and, if available, image digest for the given image
/// reference from a storage location.
///
/// # Example
/// ```rust
/// use async_trait::async_trait;
/// use kubelet::store::oci::Client;
/// use oci_distribution::Reference;
/// use oci_distribution::client::ImageData;
/// use oci_distribution::secrets::RegistryAuth;
///
/// struct InMemoryClient(std::collections::HashMap<Reference, ImageData>);
///
/// #[async_trait]
/// impl Client for InMemoryClient {
/// async fn pull(&mut self, image_ref: &Reference, _auth: &RegistryAuth) -> anyhow::Result<ImageData> {
/// let image_data = self
/// .0
/// .get(image_ref)
/// .ok_or(anyhow::anyhow!("Couldn't find image"))?;
/// Ok(image_data.clone())
/// }
/// }
/// ```
async fn pull(
&mut self,
image_ref: &Reference,
auth: &RegistryAuth,
) -> anyhow::Result<ImageData>;
/// Fetch the digest for the given image reference from a storage location.
///
/// The default implementation pulls the image data and digest, and returns
/// the digest. This is inefficient for most real-world clients, and so should
/// be overridden.
async fn | (
&mut self,
image_ref: &Reference,
auth: &RegistryAuth,
) -> anyhow::Result<String> {
let image_data = self.pull(image_ref, auth).await?;
image_data
.digest
.ok_or_else(|| anyhow::anyhow!("image {} does not have a digest", image_ref))
}
}
#[async_trait]
impl Client for oci_distribution::Client {
async fn pull(&mut self, image: &Reference, auth: &RegistryAuth) -> anyhow::Result<ImageData> {
self.pull(image, auth, vec![manifest::WASM_LAYER_MEDIA_TYPE])
.await
}
async fn fetch_digest(
&mut self,
image: &Reference,
auth: &RegistryAuth,
) -> anyhow::Result<String> {
self.fetch_manifest_digest(image, auth).await
}
}
| fetch_digest |
0002_category_category_image.py | # Generated by Django 2.1.5 on 2019-02-01 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category', | name='category_image',
field=models.ImageField(blank=True, null=True, upload_to='categories'),
),
] |
|
node_controller.py | from abc import ABC, abstractmethod
from typing import Any, Callable, List, Optional, SupportsAbs, Tuple, TypeVar
import libvirt
from assisted_test_infra.test_infra import BaseEntityConfig
from assisted_test_infra.test_infra.controllers.node_controllers.disk import Disk
from assisted_test_infra.test_infra.controllers.node_controllers.node import Node
from assisted_test_infra.test_infra.helper_classes.config.controller_config import BaseNodeConfig
from service_client import log
class NodeController(ABC):
T = TypeVar("T", bound=SupportsAbs[BaseNodeConfig])
def __init__(self, config: T, entity_config: BaseEntityConfig):
self._config = config
self._entity_config = entity_config
def log_configuration(self):
log.info(f"controller configuration={self._config}")
@property
def workers_count(self):
return self._config.workers_count
@property
def masters_count(self):
return self._config.masters_count
@property
def is_ipv4(self):
return self._config.is_ipv4
@property
def is_ipv6(self):
return self._config.is_ipv6
@abstractmethod
def list_nodes(self) -> List[Node]:
pass
@abstractmethod
def list_disks(self, node_name: str) -> List[Disk]:
pass
@abstractmethod
def list_networks(self) -> List[Any]:
pass
@abstractmethod
def list_leases(self, network_name: str) -> List[Any]:
pass
@abstractmethod
def shutdown_node(self, node_name: str) -> None:
pass
@abstractmethod
def shutdown_all_nodes(self) -> None:
pass
@abstractmethod
def start_node(self, node_name: str, check_ips: bool) -> None:
pass
@abstractmethod
def start_all_nodes(self) -> List[Node]:
pass
@abstractmethod
def restart_node(self, node_name: str) -> None:
pass
@abstractmethod
def format_node_disk(self, node_name: str, disk_index: int = 0) -> None:
pass
@abstractmethod
def format_all_node_disks(self) -> None:
pass
@abstractmethod
def attach_test_disk(self, node_name: str, disk_size: int, bootable=False, persistent=False, with_wwn=False):
"""
Attaches a test disk. That disk can later be detached with `detach_all_test_disks`
:param with_wwn: Weather the disk should have a WWN(World Wide Name), Having a WWN creates a disk by-id link
:param node_name: Node to attach disk to
:param disk_size: Size of disk to attach
:param bootable: Whether to format an MBR sector at the beginning of the disk
:param persistent: Whether the disk should survive shutdowns
"""
pass
@abstractmethod
def detach_all_test_disks(self, node_name: str):
"""
Detaches all test disks created by `attach_test_disk`
:param node_name: Node to detach disk from
"""
pass
@abstractmethod
def get_ingress_and_api_vips(self) -> dict:
pass
@abstractmethod
def destroy_all_nodes(self) -> None:
pass
@abstractmethod
def get_cluster_network(self) -> str:
pass
@abstractmethod
def setup_time(self) -> str:
pass
@abstractmethod
def prepare_nodes(self):
pass
@abstractmethod
def is_active(self, node_name) -> bool:
pass
@abstractmethod
def set_boot_order(self, node_name, cd_first=False) -> None:
pass
@abstractmethod
def set_per_device_boot_order(self, node_name, key: Callable[[Disk], int]) -> None:
"""
Set the boot priority for every disk
It sorts the disk according to the key function result
:param node_name: The node to change its boot order
:param key: a key function that gets a Disk object and decide it's priority
"""
pass
@abstractmethod
def get_node_ips_and_macs(self, node_name) -> Tuple[List[str], List[str]]:
pass
@abstractmethod
def set_single_node_ip(self, ip) -> None:
pass
@abstractmethod
def get_host_id(self, node_name: str) -> str:
pass
@abstractmethod
def get_cpu_cores(self, node_name: str) -> int:
pass
@abstractmethod
def set_cpu_cores(self, node_name: str, core_count: int) -> None:
pass
@abstractmethod
def get_ram_kib(self, node_name: str) -> int:
pass
@abstractmethod
def set_ram_kib(self, node_name: str, ram_kib: int) -> None:
pass
def get_primary_machine_cidr(self) -> Optional[str]:
# Default to auto resolve by the cluster. see cluster.get_primary_machine_cidr
return None
def get_provisioning_cidr(self) -> Optional[str]:
return None
@abstractmethod
def attach_interface(self, node_name, network_xml: str) -> Tuple[libvirt.virNetwork, str]:
pass
@abstractmethod
def add_interface(self, node_name, network_name, target_interface: str) -> str:
pass
@abstractmethod
def undefine_interface(self, node_name: str, mac: str):
pass
@abstractmethod
def create_network(self, network_xml: str) -> libvirt.virNetwork:
|
@abstractmethod
def get_network_by_name(self, network_name: str) -> libvirt.virNetwork:
pass
@abstractmethod
def destroy_network(self, network: libvirt.virNetwork):
pass
def notify_iso_ready(self) -> None:
pass
def set_dns(self, api_vip: str, ingress_vip: str) -> None:
pass
def set_dns_for_user_managed_network(self) -> None:
pass
| pass |
MatchSocks.py | #Match socks to pant colour. | import numpy as np
from PIL import Image
import urllib.request
import os
directory = 'layers/layers_for_art_engine/Pant'
for filename in os.listdir(directory):
image = os.path.join(directory, filename)
pant = Image.open(image)
socks = Image.open('layers/socks.png') #change the file path with your own of course!
width, height = socks.size
pant_color = pant.getpixel((200, 350))
for x in range(width):
for y in range(height):
current_color = socks.getpixel((x, y))
r = pant_color[0]
g = pant_color[1]
b = pant_color[2]
a = current_color[-1]
if current_color != (255, 255, 255, 0):
socks.putpixel((x, y), (r, g, b, a))
pant.paste(socks, (0, 0), socks) #combine the new coloured socks with the pant layer.
pant.save(image) | |
index.js | export { default } from './CheckoutContainer'; | ||
addMergeStepToFlow.spec.tsx | import "cypress-wait-until";
import {Application} from "../../../support/application.config";
import {
toolbar,
createEditStepDialog,
tiles
} from "../../../support/components/common/index";
import curatePage from "../../../support/pages/curate";
import loadPage from "../../../support/pages/load";
import runPage from "../../../support/pages/run";
import LoginPage from "../../../support/pages/login";
const mergeStep = "mergeCustomerTest";
const flowName1 = "mergeE2ETest";
const flowName2 = "mergeE2ETestRun";
describe("Add Merge step to a flow", () => {
before(() => {
cy.visit("/");
cy.contains(Application.title);
cy.loginAsDeveloper().withRequest();
LoginPage.postLogin();
cy.waitForAsyncRequest();
});
beforeEach(() => {
cy.loginAsDeveloper().withRequest();
cy.waitForAsyncRequest();
cy.intercept("/api/jobs/**").as("getJobs");
});
afterEach(() => {
cy.resetTestUser();
cy.waitForAsyncRequest();
});
after(() => {
cy.loginAsDeveloper().withRequest();
cy.deleteSteps("merging", "mergeCustomerTest");
cy.deleteFlows("mergeE2ETest", "mergeE2ETestRun");
cy.resetTestUser();
cy.waitForAsyncRequest();
});
it("Navigating to Customer Merge tab", () => {
cy.waitUntil(() => toolbar.getCurateToolbarIcon()).click();
cy.waitUntil(() => curatePage.getEntityTypePanel("Customer").should("be.visible"));
curatePage.toggleEntityTypeId("Customer");
curatePage.selectMergeTab("Customer");
});
it("Create a new merge step", () => {
curatePage.addNewStep().should("be.visible").click();
createEditStepDialog.stepNameInput().type(mergeStep, {timeout: 2000});
createEditStepDialog.stepDescriptionInput().type("merge order step example", {timeout: 2000});
createEditStepDialog.setSourceRadio("Query");
createEditStepDialog.setQueryInput(`cts.collectionQuery(['${mergeStep}'])`);
createEditStepDialog.setTimestampInput().type("/envelop/headers/createdOn", {timeout: 2000});
createEditStepDialog.saveButton("merging").click();
cy.waitForAsyncRequest();
createEditStepDialog.cancelButton("merging").click();
curatePage.verifyStepNameIsVisible(mergeStep);
});
it("Create merge step with duplicate name and verify duplicate name modal is displayed", () => {
cy.waitUntil(() => curatePage.addNewStep()).click();
createEditStepDialog.stepNameInput().type(mergeStep);
createEditStepDialog.stepDescriptionInput().type("merge order step example");
createEditStepDialog.setSourceRadio("Query");
createEditStepDialog.setQueryInput("test");
createEditStepDialog.saveButton("merging").click();
cy.waitForAsyncRequest();
loadPage.duplicateStepErrorMessage();
loadPage.confirmationOptions("OK").click();
loadPage.duplicateStepErrorMessageClosed();
});
it("Add the Merge step to new flow and Run the step(new)", {defaultCommandTimeout: 120000}, () => {
curatePage.addToNewFlow("Customer", mergeStep);
cy.waitForAsyncRequest();
cy.findByText("New Flow").should("be.visible");
runPage.setFlowName(flowName1);
runPage.setFlowDescription(`${flowName1} description`);
cy.wait(500);
loadPage.confirmationOptions("Save").click();
cy.wait(500);
cy.waitForAsyncRequest();
cy.verifyStepAddedToFlow("Merge", mergeStep, flowName1);
cy.waitForAsyncRequest();
runPage.runStep(mergeStep);
cy.verifyStepRunResult("success", "Merging", mergeStep);
tiles.closeRunMessage();
});
it("Delete the step and Navigate back to merge tab", () => {
runPage.deleteStep(mergeStep).click();
loadPage.confirmationOptions("Yes").click();
cy.waitForAsyncRequest();
cy.waitUntil(() => toolbar.getCurateToolbarIcon()).click();
cy.waitUntil(() => curatePage.getEntityTypePanel("Customer").should("be.visible"));
curatePage.toggleEntityTypeId("Customer");
curatePage.selectMergeTab("Customer");
});
it("Add the Merge step to an existing flow and Run the step(existing)", {defaultCommandTimeout: 120000}, () => {
curatePage.openExistingFlowDropdown("Customer", mergeStep);
curatePage.getExistingFlowFromDropdown(flowName1).click();
curatePage.addStepToFlowConfirmationMessage();
curatePage.confirmAddStepToFlow(mergeStep, flowName1);
cy.waitForAsyncRequest();
cy.verifyStepAddedToFlow("Merge", mergeStep, flowName1);
cy.waitForAsyncRequest();
runPage.runStep(mergeStep);
cy.verifyStepRunResult("success", "Merging", mergeStep);
tiles.closeRunMessage();
});
it("Delete the merge step", () => {
runPage.deleteStep(mergeStep).click();
loadPage.confirmationOptions("Yes").click(); | it("Add the Merge step to new flow from card run button", {defaultCommandTimeout: 120000}, () => {
runPage.createFlowButton().click();
runPage.newFlowModal().should("be.visible");
runPage.setFlowName(flowName2);
runPage.setFlowDescription(`${flowName2} description`);
cy.wait(500);
loadPage.confirmationOptions("Save").click();
cy.wait(500);
cy.waitForAsyncRequest();
cy.waitUntil(() => runPage.getFlowName(flowName2).should("be.visible"));
runPage.addStep(flowName2);
runPage.addStepToFlow(mergeStep);
cy.verifyStepAddedToFlow("Merge", mergeStep, flowName2);
runPage.runStep(mergeStep);
cy.verifyStepRunResult("success", "Merging", mergeStep);
tiles.closeRunMessage();
});
it("Delete the merge step and Navigating to merge tab", () => {
runPage.deleteStep(mergeStep).click();
loadPage.confirmationOptionsAll("Yes").eq(0).click();
cy.waitForAsyncRequest();
cy.waitUntil(() => toolbar.getCurateToolbarIcon()).click();
cy.waitUntil(() => curatePage.getEntityTypePanel("Customer").should("be.visible"));
curatePage.toggleEntityTypeId("Customer");
curatePage.selectMergeTab("Customer");
});
it("Add the Merge step to an existing flow from card run button and should automatically run", {defaultCommandTimeout: 120000}, () => {
curatePage.runStepInCardView(mergeStep).click();
curatePage.runStepSelectFlowConfirmation().should("be.visible");
curatePage.selectFlowToRunIn(flowName2);
cy.waitForAsyncRequest();
cy.waitUntil(() => runPage.getFlowName(flowName2).should("be.visible"));
cy.verifyStepRunResult("success", "Merging", mergeStep);
tiles.closeRunMessage();
cy.verifyStepAddedToFlow("Merge", mergeStep, flowName2);
});
it("Navigating to merge tab", () => {
cy.waitUntil(() => toolbar.getCurateToolbarIcon()).click();
cy.waitUntil(() => curatePage.getEntityTypePanel("Customer").should("be.visible"));
curatePage.toggleEntityTypeId("Customer");
curatePage.selectMergeTab("Customer");
});
it("Run the Merge step from card run button and should automatically run in the flow where step exists", {defaultCommandTimeout: 120000}, () => {
curatePage.runStepInCardView(mergeStep).click();
curatePage.runStepExistsOneFlowConfirmation().should("be.visible");
curatePage.confirmContinueRun();
cy.waitForAsyncRequest();
cy.waitUntil(() => runPage.getFlowName(flowName2).should("be.visible"));
cy.verifyStepRunResult("success", "Merging", mergeStep);
tiles.closeRunMessage();
cy.verifyStepAddedToFlow("Merge", mergeStep, flowName2);
});
it("Navigating to merge tab", () => {
cy.waitUntil(() => toolbar.getCurateToolbarIcon()).click();
cy.waitUntil(() => curatePage.getEntityTypePanel("Customer").should("be.visible"));
curatePage.toggleEntityTypeId("Customer");
curatePage.selectMergeTab("Customer");
});
it("Add the merge step to a second flow and verify it was added", () => {
curatePage.openExistingFlowDropdown("Customer", mergeStep);
curatePage.getExistingFlowFromDropdown(flowName1).click();
curatePage.addStepToFlowConfirmationMessage();
curatePage.confirmAddStepToFlow(mergeStep, flowName1);
cy.waitForAsyncRequest();
cy.verifyStepAddedToFlow("Merge", mergeStep, flowName1);
});
it("Navigating to merge tab", () => {
cy.waitUntil(() => toolbar.getCurateToolbarIcon()).click();
cy.waitUntil(() => curatePage.getEntityTypePanel("Customer").should("be.visible"));
curatePage.toggleEntityTypeId("Customer");
curatePage.selectMergeTab("Customer");
});
it("Run the Merge step from card run button and should display all flows where step exists, choose one to automatically run in", {defaultCommandTimeout: 120000}, () => {
curatePage.runStepInCardView(mergeStep).click();
curatePage.runStepExistsMultFlowsConfirmation().should("be.visible");
curatePage.selectFlowToRunIn(flowName1);
cy.waitForAsyncRequest();
cy.waitUntil(() => runPage.getFlowName(flowName1).should("be.visible"));
cy.verifyStepRunResult("success", "Merging", mergeStep);
tiles.closeRunMessage();
cy.verifyStepAddedToFlow("Merge", mergeStep, flowName1);
});
}); | cy.waitForAsyncRequest();
runPage.expandFlow(flowName1);
}); |
projection.js | // See https://js.arcgis.com/4.11/esri/copyright.txt for details.
//>>built
define("require exports ../core/promiseUtils ./pe ./SpatialReference ./support/GeographicTransformation".split(" "),function(q,b,r,e,n,h){function k(c,f,a,d,b){void 0===d&&(d=null);void 0===b&&(b=!1);if(null===d){var e=h.cacheKey(f,a);void 0!==l[e]?d=l[e]:(d=p(f,a,null),null===d&&(d=new h),l[e]=d)}return g._project(c,f,a instanceof n||!1===b?a:new n(a),d,b)}function p(c,f,a){void 0===a&&(a=null);c=g._getTransformation(c,f,a);return null!==c?h.fromGE(c):null}Object.defineProperty(b,"__esModule",{value:!0});
var g=null;b.isLoaded=function(){return!!g&&e.isLoaded()};b.isSupported=function(){return e.isSupported()};var m=null;b.load=function(){return m?m:m=r.create(function(c,f){var a=e.load();q(["./geometryEngine"],function(b){g=b;a.then(function(){g._enableProjection(e);c()},function(a){f(a)})})})};b.project=function(c,b,a){void 0===a&&(a=null);return c instanceof Array?0===c.length?[]:k(c,c[0].spatialReference,b,a):k([c],c.spatialReference,b,a)[0]};var l={};b.projectMany=k;b.getTransformation=p;b.getTransformations=
function(c,b,a){void 0===a&&(a=null);c=g._getTransformationBySuitability(c,b,a);if(null!==c){b=[];for(a=0;a<c.length;a++)b.push(h.fromGE(c[a]));return b}return[]}}); | // All material copyright ESRI, All Rights Reserved, unless otherwise specified. |
|
configmap.go | /*
Copyright 2020 VMware, Inc.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/json"
"strconv"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
akov1alpha1 "github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/ako-operator/api/v1alpha1"
"github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/utils"
)
func SetIfRebootRequired(oldCm corev1.ConfigMap, newCm corev1.ConfigMap) {
skipList := []string{DeleteConfig, LogLevel}
oldCksum := getChecksum(oldCm, skipList)
newCksum := getChecksum(newCm, skipList)
if oldCksum != newCksum {
// reboot is required
rebootRequired = true
}
}
func createOrUpdateConfigMap(ctx context.Context, ako akov1alpha1.AKOConfig, log logr.Logger, r *AKOConfigReconciler) error {
log.V(1).Info("building a new configmap for AKO")
var oldCM corev1.ConfigMap
if err := r.Get(ctx, getConfigMapName(), &oldCM); err != nil {
log.V(0).Info("error getting a configmap with name", "name", ConfigMapName, "err", err)
} else {
log.V(1).Info("old configmap", "old cm", oldCM)
if oldCM.GetName() != "" {
log.V(0).Info("a configmap with the same name already exists, it will be updated", "name",
oldCM.GetName())
}
}
cm, err := BuildConfigMap(ako)
if err != nil {
log.Error(err, "error in building configmap")
}
err = ctrl.SetControllerReference(&ako, &cm, r.Scheme)
if err != nil {
log.Error(err, "error in setting controller reference, configmap changes would be ignored")
}
if oldCM.GetName() != "" {
SetIfRebootRequired(oldCM, cm)
// "avi-k8s-config" configmap already exists, we just need to update that
// updating shouldn't change the existing finalizers
existingFinalizers := oldCM.GetFinalizers()
for _, f := range existingFinalizers {
if !utils.HasElem(cm.GetFinalizers(), f) {
cm.Finalizers = append(cm.Finalizers, f)
}
}
err := r.Update(ctx, &cm)
if err != nil {
log.Error(err, "unable to update configmap", "namespace", cm.GetNamespace(), "name",
cm.GetName())
return err
}
} else {
err := r.Create(ctx, &cm)
if err != nil {
log.Error(err, "unable to create configmap", "namespace", cm.GetNamespace(), "name",
cm.GetName())
return err
}
}
var newCM corev1.ConfigMap
err = r.Get(ctx, getConfigMapName(), &newCM)
if err != nil {
log.V(0).Info("error getting a configmap with name", "name", ConfigMapName, "err", err)
return err
}
// update this object in the global list
objList := getObjectList()
objList[types.NamespacedName{
Name: cm.GetName(),
Namespace: cm.GetNamespace(),
}] = &newCM
return nil
}
func BuildConfigMap(ako akov1alpha1.AKOConfig) (corev1.ConfigMap, error) {
cm := corev1.ConfigMap{ObjectMeta: v1.ObjectMeta{
Name: ConfigMapName,
Namespace: AviSystemNS,
}}
cm.Data = make(map[string]string)
cm.Data[ControllerIP] = ako.Spec.ControllerSettings.ControllerIP
cm.Data[ControllerVersion] = ako.Spec.ControllerSettings.ControllerVersion
cm.Data[CniPlugin] = ako.Spec.AKOSettings.CNIPlugin
enableEVH := "false"
if ako.Spec.AKOSettings.EnableEVH {
enableEVH = "true"
}
cm.Data[EnableEVH] = enableEVH
layer7Only := "false"
if ako.Spec.AKOSettings.Layer7Only {
layer7Only = "true"
}
cm.Data[Layer7Only] = layer7Only
servicesAPI := "false"
if ako.Spec.AKOSettings.ServicesAPI {
servicesAPI = "true"
}
cm.Data[ServicesAPI] = servicesAPI
vipPerNamespace := "false"
if ako.Spec.AKOSettings.VipPerNamespace {
vipPerNamespace = "true"
}
cm.Data[VipPerNamespace] = vipPerNamespace
cm.Data[ShardVSSize] = string(ako.Spec.L7Settings.ShardVSSize)
cm.Data[PassthroughShardSize] = string(ako.Spec.L7Settings.PassthroughShardSize)
fullSyncFreq := ako.Spec.AKOSettings.FullSyncFrequency
cm.Data[FullSyncFrequency] = fullSyncFreq
cm.Data[CloudName] = ako.Spec.ControllerSettings.CloudName
cm.Data[ClusterName] = ako.Spec.AKOSettings.ClusterName
cm.Data[DefaultDomain] = ako.Spec.L4Settings.DefaultDomain
disableStaticRouteSync := "false"
if ako.Spec.AKOSettings.DisableStaticRouteSync {
disableStaticRouteSync = "true"
}
cm.Data[DisableStaticRouteSync] = disableStaticRouteSync
defaultIngController := "false"
if ako.Spec.L7Settings.DefaultIngController {
defaultIngController = "true"
}
cm.Data[DefaultIngController] = defaultIngController
cm.Data[LogLevel] = string(ako.Spec.LogLevel)
deleteConfig := "false"
if ako.Spec.AKOSettings.DeleteConfig {
deleteConfig = "true"
}
cm.Data[DeleteConfig] = deleteConfig
enableRHI := "false"
if ako.Spec.NetworkSettings.EnableRHI {
enableRHI = "true"
}
cm.Data[EnableRHI] = enableRHI
cm.Data[NsxtT1LR] = ako.Spec.NetworkSettings.NsxtT1LR
var err error
type VipNetworkListRow struct {
Cidr string `json:"cidr"`
NetworkName string `json:"networkName"`
}
vipListRows := []VipNetworkListRow{}
vipListBytes := []byte{}
for _, row := range ako.Spec.NetworkSettings.VipNetworkList {
vipListRows = append(vipListRows, VipNetworkListRow{
Cidr: row.Cidr,
NetworkName: row.NetworkName,
})
}
if len(vipListRows) != 0 {
vipListBytes, err = json.Marshal(vipListRows)
if err != nil {
return cm, err
}
}
cm.Data[VipNetworkList] = string(vipListBytes)
bgpPeerLabelsBytes, err := json.Marshal(ako.Spec.NetworkSettings.BGPPeerLabels)
if err != nil {
return cm, err
}
cm.Data[BgpPeerLabels] = string(bgpPeerLabelsBytes)
cm.Data[ServiceType] = string(ako.Spec.L7Settings.ServiceType)
cm.Data[NodeKey] = ako.Spec.NodePortSelector.Key
cm.Data[NodeValue] = ako.Spec.NodePortSelector.Value
cm.Data[ServiceEngineGroupName] = ako.Spec.ControllerSettings.ServiceEngineGroupName
apiServerPort := ako.Spec.AKOSettings.APIServerPort
if apiServerPort > 0 {
cm.Data[APIServerPort] = strconv.Itoa(apiServerPort)
} else {
cm.Data[APIServerPort] = "8080"
}
type NodeNetworkListRow struct {
Cidrs []string `json:"cidrs"`
NetworkName string `json:"networkName"`
}
nwListRows := []NodeNetworkListRow{}
nwListBytes := []byte{}
for _, row := range ako.Spec.NetworkSettings.NodeNetworkList {
nwListRows = append(nwListRows, NodeNetworkListRow{
Cidrs: row.Cidrs,
NetworkName: row.NetworkName,
})
}
if len(nwListRows) != 0 {
nwListBytes, err = json.Marshal(nwListRows)
if err != nil {
return cm, err
}
}
cm.Data[NodeNetworkList] = string(nwListBytes)
noPGForSni := "false"
if ako.Spec.L7Settings.NoPGForSNI {
noPGForSni = "true"
}
cm.Data[NoPGForSni] = noPGForSni
cm.Data[NSSyncLabelKey] = ako.Spec.AKOSettings.NSSelector.LabelKey
cm.Data[NSSyncLabelValue] = ako.Spec.AKOSettings.NSSelector.LabelValue
cm.Data[TenantName] = ako.Spec.ControllerSettings.TenantName
cm.Data[AutoFQDN] = ako.Spec.L4Settings.AutoFQDN
return cm, nil
}
func checkDeprecatedFields(ako akov1alpha1.AKOConfig, log logr.Logger) {
if ako.Spec.L4Settings.AdvancedL4 {
log.V(0).Info("", "WARN: ", "akoconfig.Spec.L4Settings.AdvancedL4 will be deprecated")
}
if ako.Spec.L7Settings.SyncNamespace != "" {
log.V(0).Info("", "WARN: ", "akoconfig.Spec.L7Settings.SyncNamespace will be deprecated")
} | } | |
handler_test.go | package function
import (
"encoding/json"
"os"
"testing"
)
func TestGetEvent_ReadLabels(t *testing.T) {
want := map[string]string{
"com.openfaas.scale": "true",
}
val, _ := json.Marshal(want)
os.Setenv("Http_Labels", string(val))
eventInfo, err := getEventFromEnv()
if err != nil {
t.Errorf(err.Error())
t.Fail()
}
for k, v := range want {
if _, ok := eventInfo.Labels[k]; !ok {
t.Errorf("want %s to be present in event.Labels", k)
continue
}
if vv, _ := eventInfo.Labels[k]; vv != v {
t.Errorf("value of %s, want: %s, got %s", k, v, vv)
}
}
}
func TestGetEvent_ReadAnnotations(t *testing.T) {
want := map[string]string{
"topic": "function.deployed",
}
val, _ := json.Marshal(want)
os.Setenv("Http_Annotations", string(val))
eventInfo, err := getEventFromEnv()
if err != nil {
t.Errorf(err.Error())
t.Fail()
}
for k, v := range want {
if _, ok := eventInfo.Annotations[k]; !ok {
t.Errorf("want %s to be present in event.Labels", k)
continue
}
if vv, _ := eventInfo.Annotations[k]; vv != v {
t.Errorf("value of %s, want: %s, got %s", k, v, vv)
}
}
}
func TestGetEvent_ReadSecrets(t *testing.T) {
valSt := []string{"s1", "s2"}
val, _ := json.Marshal(valSt)
os.Setenv("Http_Secrets", string(val))
owner := "alexellis"
os.Setenv("Http_Owner", owner)
installationID := "123456"
os.Setenv("Http_Installation_id", installationID)
eventInfo, err := getEventFromEnv()
if err != nil {
t.Errorf(err.Error())
t.Fail()
}
expected := []string{owner + "-s1", owner + "-s2"}
for _, val := range eventInfo.Secrets {
found := false
for _, expectedVal := range expected {
if expectedVal == val {
found = true
}
}
if !found {
t.Errorf("Wanted secret: %s, didn't find it in list", val)
}
}
}
func TestGetEvent_EmptyEnvVars(t *testing.T) {
_, err := getEventFromEnv()
if err != nil {
t.Errorf(err.Error())
t.Fail()
}
}
func Test_GetImageName(t *testing.T) {
var imageNameTestcases = []struct {
Name string
PushRepositoryURL string
RepositoryURL string
ImageName string
Output string
}{
{
"Test Docker Hub with user-prefix",
"docker.io/of-community/",
"docker.io/of-community/",
"docker.io/of-community/function-name/",
"docker.io/of-community/function-name/",
},
{
"Testcase1",
"registry:5000",
"127.0.0.1:5000",
"registry:5000/username/function-name/",
"127.0.0.1:5000/username/function-name/",
},
{
"Testcase2",
"registry:31115",
"127.0.0.1:31115",
"registry:31115/username/function-name/",
"127.0.0.1:31115/username/function-name/",
},
{
"Testcase3",
"registry:31115",
"127.0.0.1",
"registry:31115/username/function-name/",
"127.0.0.1/username/function-name/",
},
}
for _, testcase := range imageNameTestcases {
t.Run(testcase.Name, func(t *testing.T) {
output := getImageName(testcase.RepositoryURL, testcase.PushRepositoryURL, testcase.ImageName)
if output != testcase.Output {
t.Errorf("%s failed!. got: %s, want: %s", testcase.Name, output, testcase.Output)
}
})
}
}
func Test_ValidImage(t *testing.T) {
imageNames := map[string]bool{
// error cases
"failed to solve: rpc error: code = Unknown desc = exit code 2": false,
"failed to solve: rpc error: code = Unknown desc = exit status 2": false,
"failed to solve:": false,
"error:": false,
"code =": false,
"-1": false,
"": false,
" ": false,
// "docker-registry:5000/admin/alexellis-sofia-test1-go-world:0.1-374448ba4d75bcf49611525a5b2448d9c3d0bf28": true,
// url (with/without tag)
"docker.io/ofcommunity/someuser/repo-name-function_name": true,
"docker.io/ofcommunity/someuser/repo-name-function_name:latest": true,
"docker.io/ofcommunity/someuser/repo-name-function_name:latest-7f7ec13d12b1397408e54b79686d43e41974bfa0": true,
// url with port (with/without tag)
"docker.io:80/ofcommunity/someuser/repo-name-function_name": true,
"docker.io:80/ofcommunity/someuser/repo-name-function_name:latest": true,
"docker.io:80/ofcommunity/someuser/repo-name-function_name:latest-7f7ec13d12b1397408e54b79686d43e41974bfa0": true,
// url with ip (with/without tag)
"127.0.0.1/someuser/repo-name-function_name": true,
"127.0.0.1/someuser/repo-name-function_name:latest": true,
"127.0.0.1/someuser/repo-name-function_name:latest-7f7ec13d12b1397408e54b79686d43e41974bfa0": true,
// url with ip and port (with/without tag)
"127.0.0.1:5000/someuser/repo-name-function_name": true,
"127.0.0.1:5000/someuser/repo-name-function_name:latest": true,
"127.0.0.1:5000/someuser/repo-name-function_name:latest-7f7ec13d12b1397408e54b79686d43e41974bfa0": true,
// docker user specific (with/without tag)
"someuser/repo-name-function_name": true,
"someuser/repo-name-function_name:latest": true,
"someuser/repo-name-function_name:latest-7f7ec13d12b1397408e54b79686d43e41974bfa0": true,
// open faas cloud function name (with/without tag)
"repo-name-function_name": true,
"repo-name-function_name:latest": true,
"repo-name-function_name:latest-7f7ec13d12b1397408e54b79686d43e41974bfa0": true,
// simple function name (with/without tag)
"function_name": true,
"function_name:latest": true,
"function_name:latest-7f7ec13d12b1397408e54b79686d43e41974bfa0": true,
}
for image, want := range imageNames {
got := validImage(image)
if got != want {
t.Errorf("Validating image %s - want: %v, got: %v", image, want, got)
}
}
}
func Test_getReadOnlyRootFS_default(t *testing.T) {
os.Setenv("readonly_root_filesystem", "1") | want := true
if val != want {
t.Errorf("want %t, but got %t", want, val)
t.Fail()
}
}
func Test_getReadOnlyRootFS_override(t *testing.T) {
os.Setenv("readonly_root_filesystem", "false")
val := getReadOnlyRootFS()
want := false
if val != want {
t.Errorf("want %t, but got %t", want, val)
t.Fail()
}
}
func Test_getMemoryLimit_Swarm(t *testing.T) {
tests := []struct {
title string
memoryLimit string
expectedLimit string
}{
{
title: "Kubernetes environment variables missing and limit is set",
memoryLimit: "30",
expectedLimit: "30m",
},
{
title: "Kubernetes environment variables missing and limit is unset",
memoryLimit: "",
expectedLimit: "128m",
},
}
envVar := "function_memory_limit_mb"
for _, test := range tests {
t.Run(test.title, func(t *testing.T) {
os.Setenv(envVar, test.memoryLimit)
limit := getMemoryLimit()
if limit != test.expectedLimit {
t.Errorf("Test failed! Expected: `%v` got: `%v`.", test.expectedLimit, limit)
}
})
}
}
func Test_getMemoryLimit_Kubernetes(t *testing.T) {
tests := []struct {
title string
exampleVariable string
memoryLimit string
expectedLimit string
}{
{
title: "Kubernetes environment variables present and limit is set",
exampleVariable: "KUBERNETES_SERVICE_PORT",
memoryLimit: "30",
expectedLimit: "30Mi",
},
{
title: "Kubernetes environment variables present and limit is unset",
exampleVariable: "KUBERNETES_SERVICE_PORT",
memoryLimit: "",
expectedLimit: "128Mi",
},
}
exampleValue := "example_value"
envVar := "function_memory_limit_mb"
for _, test := range tests {
t.Run(test.title, func(t *testing.T) {
os.Setenv(test.exampleVariable, exampleValue)
os.Setenv(envVar, test.memoryLimit)
limit := getMemoryLimit()
if limit != test.expectedLimit {
t.Errorf("Test failed! Expected: `%v` got: `%v`.", test.expectedLimit, limit)
}
})
}
}
func Test_getCPULimit_Kubernetes(t *testing.T) {
tests := []struct {
title string
limitValue string
expectedLimit string
wantAvailable bool
}{
{
title: "Override test - Kubernetes environment variables present and limit is set",
limitValue: "250",
expectedLimit: "250m",
wantAvailable: true,
},
{
title: "Defaults test - Kubernetes environment variables present and limit is unset",
limitValue: "",
expectedLimit: "",
wantAvailable: false,
},
}
for _, test := range tests {
t.Run(test.title, func(t *testing.T) {
os.Setenv("KUBERNETES_SERVICE_PORT", "6443")
os.Setenv("function_cpu_limit_milli", test.limitValue)
limit := getCPULimit()
if limit.Available != test.wantAvailable {
t.Errorf("Limits not available, want: %v, got: %v", test.wantAvailable, limit.Available)
}
if limit.Limit != test.expectedLimit {
t.Errorf("Limits not correct, want: `%v` got: `%v`.", test.expectedLimit, limit.Limit)
}
})
}
}
func Test_existingVariable_Existent(t *testing.T) {
tests := []struct {
title string
value string
}{
{
title: "Variable exist and set",
value: "example",
},
{
title: "Variable exist but unset",
value: "",
},
}
key := "env_var"
expectedBool := true
for _, test := range tests {
t.Run(test.title, func(t *testing.T) {
os.Setenv(key, test.value)
_, exists := os.LookupEnv(key)
//exists := existingVariable(key)
if exists != expectedBool {
t.Errorf("Variable existance should be : `%v` got: `%v`", expectedBool, exists)
}
})
}
}
func Test_existingVariable_nonExistent(t *testing.T) {
t.Run("Variable does not exist", func(t *testing.T) {
expectedBool := false
key := "place_holder"
_, exists := os.LookupEnv(key)
if exists != expectedBool {
t.Errorf("Should be:`%v` got:`%v`", expectedBool, exists)
}
})
}
func Test_getConfig(t *testing.T) {
var configOpts = []struct {
name string
value string
defaultValue string
isConfugured bool
}{
{
name: "scaling_max_limit",
value: "",
defaultValue: "4",
isConfugured: true,
},
{
name: "scaling_max_limit",
value: "10",
defaultValue: "4",
isConfugured: true,
},
{
name: "random_config",
value: "",
defaultValue: "18",
isConfugured: false,
},
}
for _, config := range configOpts {
t.Run(config.name, func(t *testing.T) {
if config.isConfugured {
os.Setenv(config.name, config.value)
}
value := getConfig(config.name, config.defaultValue)
want := config.defaultValue
if len(config.value) > 0 {
want = config.value
}
if value != want {
t.Errorf("want %s, but got %s", want, value)
}
})
}
}
func Test_buildAnnotations_RemovesNonWhitelisted(t *testing.T) {
whitelist := []string{"topic"}
userValues := map[string]string{
"com.url": "value",
}
out := buildAnnotations(whitelist, userValues)
if _, ok := out["com.url"]; ok {
t.Fail()
}
}
func Test_buildAnnotations_AllowsWhitelisted(t *testing.T) {
whitelist := []string{
"topic",
"schedule",
}
userValues := map[string]string{
"topic": "function.deployed",
"schedule": "has schedule",
}
out := buildAnnotations(whitelist, userValues)
topicVal, ok := out["topic"]
if !ok {
t.Errorf("want user annotation: topic")
t.Fail()
}
if topicVal != userValues["topic"] {
t.Errorf("want user annotation: topic - got %s, want %s", topicVal, userValues["topic"])
t.Fail()
}
scheduleVal, ok := out["schedule"]
if !ok {
t.Errorf("want user annotation: schedule")
t.Fail()
}
if scheduleVal != userValues["schedule"] {
t.Errorf("want user annotation: schedule - got %s, want %s", scheduleVal, userValues["schedule"])
t.Fail()
}
} |
val := getReadOnlyRootFS() |
schema.go | package schema
import (
"encoding/json"
"io"
"net/url"
"os"
"reflect"
"strconv"
"github.com/lestrrat-go/jsref"
"github.com/lestrrat-go/jsref/provider"
"github.com/lestrrat-go/pdebug"
"github.com/pkg/errors"
)
// This is used to check against result of reflect.MapIndex
var zeroval = reflect.Value{}
var _schema Schema
var _hyperSchema Schema
func init() {
buildJSSchema()
buildHyperSchema()
}
// New creates a new schema object
func | () *Schema {
s := Schema{}
s.initialize()
return &s
}
func (s *Schema) initialize() {
resolver := jsref.New()
mp := provider.NewMap()
mp.Set(SchemaURL, &_schema)
mp.Set(HyperSchemaURL, &_hyperSchema)
resolver.AddProvider(mp)
s.resolvedSchemas = make(map[string]interface{})
s.resolver = resolver
}
// ReadFile reads the file `f` and parses its content to create
// a new Schema object
func ReadFile(f string) (*Schema, error) {
in, err := os.Open(f)
if err != nil {
return nil, err
}
defer in.Close()
return Read(in)
}
// Read reads from `in` and parses its content to create
// a new Schema object
func Read(in io.Reader) (*Schema, error) {
s := New()
if err := s.Decode(in); err != nil {
return nil, err
}
return s, nil
}
// Decode reads from `in` and parses its content to
// initialize the schema object
func (s *Schema) Decode(in io.Reader) error {
dec := json.NewDecoder(in)
if err := dec.Decode(s); err != nil {
return err
}
s.applyParentSchema()
return nil
}
func (s *Schema) setParent(v *Schema) {
s.parent = v
}
func (s *Schema) applyParentSchema() {
// Find all components that may be a Schema
for _, v := range s.Definitions {
v.setParent(s)
v.applyParentSchema()
}
if props := s.AdditionalProperties; props != nil {
if sc := props.Schema; sc != nil {
sc.setParent(s)
sc.applyParentSchema()
}
}
if items := s.AdditionalItems; items != nil {
if sc := items.Schema; sc != nil {
sc.setParent(s)
sc.applyParentSchema()
}
}
if items := s.Items; items != nil {
for _, v := range items.Schemas {
v.setParent(s)
v.applyParentSchema()
}
}
for _, v := range s.Properties {
v.setParent(s)
v.applyParentSchema()
}
for _, v := range s.AllOf {
v.setParent(s)
v.applyParentSchema()
}
for _, v := range s.AnyOf {
v.setParent(s)
v.applyParentSchema()
}
for _, v := range s.OneOf {
v.setParent(s)
v.applyParentSchema()
}
if v := s.Not; v != nil {
v.setParent(s)
v.applyParentSchema()
}
}
// BaseURL returns the base URL registered for this schema
func (s *Schema) BaseURL() *url.URL {
scope := s.Scope()
u, err := url.Parse(scope)
if err != nil {
// XXX hmm, not sure what to do here
u = &url.URL{}
}
return u
}
// Root returns the upmost parent schema object within the
// hierarchy of schemas. For example, the `item` element
// in a schema for an array is also a schema, and you could
// reference elements in parent schemas.
func (s *Schema) Root() *Schema {
if s.parent == nil {
if pdebug.Enabled {
pdebug.Printf("Schema %p is root", s)
}
return s
}
return s.parent.Root()
}
func (s *Schema) findSchemaByID(id string) (*Schema, error) {
if s.ID == id {
return s, nil
}
// XXX Quite unimplemented
return nil, errors.Errorf("schema %s not found", strconv.Quote(id))
}
// ResolveURL takes a url string, and resolves it if it's
// a relative URL
func (s *Schema) ResolveURL(v string) (u *url.URL, err error) {
if pdebug.Enabled {
g := pdebug.IPrintf("START Schema.ResolveURL '%s'", v)
defer func() {
if err != nil {
g.IRelease("END Schema.ResolveURL '%s': error %s", v, err)
} else {
g.IRelease("END Schema.ResolveURL '%s' -> '%s'", v, u)
}
}()
}
base := s.BaseURL()
if pdebug.Enabled {
pdebug.Printf("Using base URL '%s'", base)
}
u, err = base.Parse(v)
if err != nil {
return nil, err
}
return u, nil
}
// IsResolved returns true if this schema has no Reference.
func (s *Schema) IsResolved() bool {
return s.Reference == ""
}
// Resolve returns the schema after it has been resolved.
// If s.Reference is the empty string, the current schema is returned.
//
// `ctx` is an optional context to resolve the reference with. If not
// specified, the root schema as returned by `Root` will be used.
func (s *Schema) Resolve(ctx interface{}) (ref *Schema, err error) {
if s.Reference == "" {
return s, nil
}
if pdebug.Enabled {
g := pdebug.IPrintf("START Schema.Resolve (%s)", s.Reference)
defer func() {
if err != nil {
g.IRelease("END Schema.Resolve (%s): %s", s.Reference, err)
} else {
g.IRelease("END Schema.Resolve (%s)", s.Reference)
}
}()
}
var thing interface{}
var ok bool
s.resolveLock.Lock()
thing, ok = s.resolvedSchemas[s.Reference]
s.resolveLock.Unlock()
if ok {
ref, ok = thing.(*Schema)
if ok {
if pdebug.Enabled {
pdebug.Printf("Cache HIT on '%s'", s.Reference)
}
} else {
if pdebug.Enabled {
pdebug.Printf("Negative Cache HIT on '%s'", s.Reference)
}
return nil, thing.(error)
}
} else {
if pdebug.Enabled {
pdebug.Printf("Cache MISS on '%s'", s.Reference)
}
var err error
if ctx == nil {
ctx = s.Root()
}
thing, err := s.resolver.Resolve(ctx, s.Reference)
if err != nil {
err = errors.Wrapf(err, "failed to resolve reference %s", strconv.Quote(s.Reference))
s.resolveLock.Lock()
s.resolvedSchemas[s.Reference] = err
s.resolveLock.Unlock()
return nil, err
}
ref, ok = thing.(*Schema)
if !ok {
err = errors.Wrapf(err, "resolved reference %s is not a schema", strconv.Quote(s.Reference))
s.resolveLock.Lock()
s.resolvedSchemas[s.Reference] = err
s.resolveLock.Unlock()
return nil, err
}
s.resolveLock.Lock()
s.resolvedSchemas[s.Reference] = ref
s.resolveLock.Unlock()
}
return ref, nil
}
// IsPropRequired can be used to query this schema if a
// given property name is required.
func (s *Schema) IsPropRequired(pname string) bool {
for _, name := range s.Required {
if name == pname {
return true
}
}
return false
}
// Scope returns the scope ID for this schema
func (s *Schema) Scope() string {
if pdebug.Enabled {
g := pdebug.IPrintf("START Schema.Scope")
defer g.IRelease("END Schema.Scope")
}
if s.ID != "" || s.parent == nil {
if pdebug.Enabled {
pdebug.Printf("Returning $id '%s'", s.ID)
}
return s.ID
}
return s.parent.Scope()
}
| New |
dojo-config.js | var dojoConfig = {
paths: {
plugins: location.pathname + 'scripts/lib/plugins' | }
}; |
|
test_view.py | import paste.fixture
import pylons.config as config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
result = self.app.get(url)
assert self.resource_view['title'] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
'''
Test can_view with acceptable formats when datastore_active is False
(DataProxy in use).
'''
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
'''
Test can_view with incorrect formats when datastore_active is False.
'''
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def | (self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
| test_create_datastore_only_view |
AndroidPushNotification.ts | import { getChildLogger } from "@aaa-backend-stack/logger";
const logger = getChildLogger("@aaa-backend-stack/pushes");
import * as serverdate from "@aaa-backend-stack/serverdate";
import * as _ from "lodash";
import { URL } from "url";
const fetch = require("node-fetch"); // use fetch instead of request api
import { apnProvider } from "./apnProvider";
import { getHooks } from "./hooks";
class | extends Error {
public response: any;
constructor(public message: string, public res: any) {
super(message);
// see https://github.com/Microsoft/TypeScript/wiki/Breaking-Changes#extending-built-ins-like-error-array-and-map-may-no-longer-work
// no loger extend build ins as this will always lead to problems!
(Object as any).setPrototypeOf(this, ApiError.prototype);
this.name = "ApiError";
this.message = message;
this.stack = (<any>new Error()).stack;
this.response = res;
}
}
export interface IAndroidResult {
error: any;
registration_id: string;
}
export interface IAndroidJSONResults {
results: IAndroidResult[];
}
export type ISendMessageResult = IAndroidJSONResults | boolean;
export class AndroidPushNotification {
private msg = {
data: null,
registration_ids: [] // client tokens
};
private currentTry: number = 0;
private isSent = false;
private _url = apnProvider.CONFIG.android.url;
private _apiKey = apnProvider.CONFIG.android.apiKey;
private _shouldSend = apnProvider.CONFIG.android.send;
constructor(tokens: string[], payload: any, url = apnProvider.CONFIG.android.url, apiKey = apnProvider.CONFIG.android.apiKey, shouldSend = apnProvider.CONFIG.android.send) {
this.msg.data = payload;
this.msg.registration_ids = tokens;
this._url = url;
this._apiKey = apiKey;
this._shouldSend = shouldSend;
}
send() {
if (this.isSent) {
return Promise.reject("GCM Error: Cannot send the same AndroidPushNotification twice!");
}
logger.info({ notification: this.msg }, "push (android): GCM sending push to " + this.msg.registration_ids.length + " devices");
if (this.msg.registration_ids.length === 0) {
logger.info("push (android): GCM: don't send because 0 devices");
return Promise.resolve(true);
}
if (this._shouldSend === false) {
logger.info("push (android): GCM: don't send because config.android.send === false");
return Promise.resolve(true);
}
return this.trySendMessage();
}
private async trySendMessage(): Promise<ISendMessageResult> {
let self = this;
this.currentTry += 1;
try {
const headers = {
"Authorization": "key=" + this._apiKey,
"Accept": "application/json",
"Content-Type": "application/json"
};
const body = this.msg;
logger.debug({
headers,
body,
currentTry: this.currentTry
}, "push (android): attempting to send push...");
const response = await fetch(this._url, {
method: "POST",
timeout: 15000, // max amout to make request...
headers: headers,
body: JSON.stringify(body),
agent: apnProvider.CONFIG.android.proxy ? apnProvider.CONFIG.android.proxy.agent : null
});
this.isSent = true; // flag as sent!
if (response.status !== 200) {
throw new ApiError(response.statusText, response);
}
logger.debug({ response }, `push (android): GCM response received: ${response.status}`);
const json: IAndroidJSONResults = await response.json();
await this.processResponse(json);
return json;
} catch (reason) {
if (reason instanceof ApiError) {
// console.log(reason.response.status);
if (reason.response.status === 400) {
logger.error("push (android): ANDROID GCM PUSH ERROR - INVALID JSON", this.msg);
throw reason;
} else if (reason.response.status === 401) {
logger.error("push (android): ANDROID GCM PUSH ERROR - INVALID AUTH");
throw reason;
} else if (reason.response.status >= 500 && reason.response.status <= 599) {
return this.retry("GCM Received status " + reason.response.status, self.parseRetryAfter(reason.response.headers));
} else {
return this.retry("GCM Received unexpected status " + reason.response.status, null);
}
}
// unexpected error.
throw reason;
}
}
private processResponse(json: IAndroidJSONResults) {
// console.log("GCM processResponse", response.body);
let promises: Promise<any>[] = [];
let result: IAndroidResult;
let oldClientToken;
for (let i = 0; i < json.results.length; i++) {
result = json.results[i];
oldClientToken = this.msg.registration_ids[i];
if (result.error) {
logger.warn({
result, oldClientToken
}, "push (android): ANDROID GCM PUSH REMOVE (executing expiredTokenHooks hook)");
_.each(getHooks().expiredTokenHooks, (hookFn) => {
promises.push(hookFn("android", oldClientToken));
});
} else if (result.registration_id) {
// Remove old token and save new one
logger.warn({ result, oldClientToken, newClientToken: result.registration_id }, "push (android): ANDROID GCM PUSH UPDATE (executing updatedTokenHooks hook)");
_.each(getHooks().updatedTokenHooks, (hookFn) => {
promises.push(hookFn("android", oldClientToken, result.registration_id));
});
}
}
return promises;
}
private parseRetryAfter(headers) {
/* tslint:disable:no-string-literal */
let backOff = null;
if (headers["retry-after"]) {
let seconds = parseInt(headers["retry-after"], 10);
if (!isNaN(seconds)) {
backOff = seconds * 1000;
} else {
let date = serverdate.getMoment(headers["retry-after"]);
if (date.isValid()) {
let now = serverdate.getMoment();
if (date.isAfter(now)) { backOff = date.diff(now); }
}
}
}
return backOff;
/* tslint:enable:no-string-literal */
}
private retry(err, backOff): Promise<ISendMessageResult> | boolean {
if (this.currentTry < apnProvider.CONFIG.android.retryCount) {
if (!backOff) {
backOff = process.env.NODE_ENV === "test" ? 0 : apnProvider.CONFIG.android.backOff * Math.pow(2, this.currentTry);
}
logger.warn({ backOff, err }, "push (android): ANDROID GCM PUSH ERROR - RETRY IN");
return Promise.delay(backOff).then(() => {
return this.trySendMessage();
});
} else {
logger.error({ err }, "push (android): ANDROID GCM PUSH ERROR - FINAL");
return false;
}
}
}
| ApiError |
entropy_coder_train.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train an entropy coder model."""
import time
import tensorflow as tf
from research.compression.entropy_coder.core import code_loader
from research.compression.entropy_coder.core import config_helper
# pylint: enable=unused-import
from research.compression.entropy_coder.model import model_factory
FLAGS = tf.app.flags.FLAGS
# Hardware resources configuration.
tf.app.flags.DEFINE_string('master', '',
"""Name of the TensorFlow master to use.""")
tf.app.flags.DEFINE_string('train_dir', None,
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_integer('task', None,
"""Task id of the replica running the training.""")
tf.app.flags.DEFINE_integer('ps_tasks', 0, """Number of tasks in the ps job.
If 0 no ps job is used.""")
# Model selection and configuration.
tf.app.flags.DEFINE_string('model', None, """Underlying encoder model.""")
tf.app.flags.DEFINE_string('model_config', None,
"""Model config protobuf given as text file.""")
# Training data and parameters configuration.
tf.app.flags.DEFINE_string('input_config', None,
"""Path to the training input config file.""")
tf.app.flags.DEFINE_string('train_config', None,
"""Path to the training experiment config file.""")
def train():
if FLAGS.train_dir is None:
raise ValueError('Parameter train_dir must be provided')
if FLAGS.task is None:
raise ValueError('Parameter task must be provided')
if FLAGS.model is None:
raise ValueError('Parameter model must be provided')
input_config_string = config_helper.GetConfigString(FLAGS.input_config)
input_config = config_helper.InputConfig(input_config_string)
# Training parameters.
train_config_string = config_helper.GetConfigString(FLAGS.train_config)
train_config = config_helper.TrainConfig(train_config_string)
batch_size = train_config.batch_size
initial_learning_rate = train_config.learning_rate
decay_rate = train_config.decay_rate
samples_per_decay = train_config.samples_per_decay
# Parameters for learning-rate decay.
# The formula is decay_rate ** floor(steps / decay_steps).
decay_steps = samples_per_decay / batch_size
decay_steps = max(decay_steps, 1)
first_code = code_loader.ReadFirstCode(input_config.data)
first_code_height = (
first_code.features.feature['code_shape'].int64_list.value[0])
first_code_width = (
first_code.features.feature['code_shape'].int64_list.value[1])
max_bit_depth = (
first_code.features.feature['code_shape'].int64_list.value[2])
print('Maximum code depth: {}'.format(max_bit_depth))
with tf.Graph().as_default():
ps_ops = ["Variable", "VariableV2", "AutoReloadVariable", "VarHandleOp"]
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
ps_ops=ps_ops)):
codes = code_loader.LoadBinaryCode(
input_config=input_config,
batch_size=batch_size)
if input_config.unique_code_size:
print('Input code size: {} x {}'.format(first_code_height,
first_code_width))
codes.set_shape(
[batch_size, first_code_height, first_code_width, max_bit_depth])
else:
codes.set_shape([batch_size, None, None, max_bit_depth])
codes_effective_shape = tf.shape(codes)
global_step = tf.contrib.framework.create_global_step()
# Apply learning-rate decay.
learning_rate = tf.train.exponential_decay(
learning_rate=initial_learning_rate,
global_step=global_step,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=True)
tf.summary.scalar('Learning Rate', learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
epsilon=1.0)
# Create the entropy coder model.
model = model_factory.GetModelRegistry().CreateModel(FLAGS.model)
model_config_string = config_helper.GetConfigString(FLAGS.model_config)
model.Initialize(global_step, optimizer, model_config_string)
model.BuildGraph(codes)
summary_op = tf.summary.merge_all()
# Verify that the model can actually be trained.
if model.train_op is None:
raise ValueError('Input model {} is not trainable'.format(FLAGS.model))
# We disable the summary thread run by Supervisor class by passing
# summary_op=None. We still pass save_summaries_secs because it is used by
# the global step counter thread.
is_chief = (FLAGS.task == 0)
sv = tf.train.Supervisor(logdir=FLAGS.train_dir,
is_chief=is_chief,
global_step=global_step,
# saver=model.saver,
summary_op=None,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30)
sess = sv.PrepareSession(FLAGS.master)
sv.StartQueueRunners(sess)
| # Once everything has been setup properly, save the configs.
if is_chief:
config_helper.SaveConfig(FLAGS.train_dir, 'input_config.json',
input_config_string)
config_helper.SaveConfig(FLAGS.train_dir, 'model_config.json',
model_config_string)
config_helper.SaveConfig(FLAGS.train_dir, 'train_config.json',
train_config_string)
# Train the model.
next_summary_time = time.time()
while not sv.ShouldStop():
feed_dict = None
# Once in a while, update the summaries on the chief worker.
if is_chief and next_summary_time < time.time():
summary_str = sess.run(summary_op, feed_dict=feed_dict)
sv.SummaryComputed(sess, summary_str)
next_summary_time = time.time() + sv.save_summaries_secs
else:
tf_tensors = {
'train': model.train_op,
'code_length': model.average_code_length
}
np_tensors = sess.run(tf_tensors, feed_dict=feed_dict)
print(np_tensors['code_length'])
sv.Stop()
def main(argv=None): # pylint: disable=unused-argument
train()
if __name__ == '__main__':
tf.app.run() | step = sess.run(global_step)
print('Trainer initial step: {}.'.format(step))
|
http_server.d.ts | /// <reference path="./throwable.d.ts" />
/// <reference path="./http_server_request.d.ts" />
/// <reference path="./server_web_socket.d.ts" />
/// <reference path="./measured.d.ts" />
/// <reference path="./http_server_request_stream.d.ts" />
/// <reference path="./server_web_socket_stream.d.ts" />
declare module "vertx-js/http_server" {
export = HttpServer;
}
/**
* An HTTP and WebSockets server.
* <p>
* You receive HTTP requests by providing a requestHandler. As requests arrive on the server the handler
* will be called with the requests.
* <p>
* You receive WebSockets by providing a websocketHandler. As WebSocket connections arrive on the server, the
* WebSocket is passed to the handler.
*/
interface HttpServer
extends
Measured
{
/**
* Whether the metrics are enabled for this measured object
*/
isMetricsEnabled(): boolean;
/**
* Return the request stream for the server. As HTTP requests are received by the server,
* instances of HttpServerRequest will be created and passed to the stream .
*/
requestStream(): HttpServerRequestStream;
/**
* Set the request handler for the server to <code>requestHandler</code>. As HTTP requests are received by the server,
* instances of HttpServerRequest will be created and passed to this handler.
*/
requestHandler(handler: (e: HttpServerRequest) => void): HttpServer;
/**
* Return the websocket stream for the server. If a websocket connect handshake is successful a
* new ServerWebSocket instance will be created and passed to the stream .
*/
websocketStream(): ServerWebSocketStream;
| * Set the websocket handler for the server to <code>wsHandler</code>. If a websocket connect handshake is successful a
* new ServerWebSocket instance will be created and passed to the handler.
*/
websocketHandler(handler: (e: ServerWebSocket) => void): HttpServer;
/**
* Tell the server to start listening. The server will listen on the port and host specified in the
* HttpServerOptions that was used when creating the server.
* <p>
* The listen happens asynchronously and the server may not be listening until some time after the call has returned.
*/
listen(): HttpServer;
/**
* Tell the server to start listening. The server will listen on the port and host specified here,
* ignoring any value set in the HttpServerOptions that was used when creating the server.
* <p>
* The listen happens asynchronously and the server may not be listening until some time after the call has returned.
*/
listen(port: number, host: string): HttpServer;
/**
* Like listen but supplying a handler that will be called when the server is actually
* listening (or has failed).
*/
listen(port: number, host: string, listenHandler: (res: HttpServer, err?: Throwable) => void): HttpServer;
/**
* Like listen but the server will listen on host "0.0.0.0" and port specified here ignoring
* any value in the HttpServerOptions that was used when creating the server.
*/
listen(port: number): HttpServer;
/**
* Like listen but supplying a handler that will be called when the server is actually listening (or has failed).
*/
listen(port: number, listenHandler: (res: HttpServer, err?: Throwable) => void): HttpServer;
/**
* Like listen but supplying a handler that will be called when the server is actually listening (or has failed).
*/
listen(listenHandler: (res: HttpServer, err?: Throwable) => void): HttpServer;
/**
* Close the server. Any open HTTP connections will be closed.
* <p>
* The close happens asynchronously and the server may not be closed until some time after the call has returned.
*/
close(): void;
/**
* Like close but supplying a handler that will be called when the server is actually closed (or has failed).
*/
close(completionHandler: (res: void, err?: Throwable) => void): void;
}
declare var HttpServer: {
} | /** |
criteria.service.ts | import { Injectable } from '@angular/core';
import { HttpClient, HttpParams} from '@angular/common/http';
import { BASE_URL } from '../utils/constants';
import { retry } from 'rxjs/internal/operators/retry';
import { map, catchError } from 'rxjs/operators';
import { PLACMError } from 'models/error';
import { throwError } from 'rxjs';
const criteriaUrl = BASE_URL + 'sc/';
@Injectable({
providedIn: 'root'
})
export class CriteriaService {
constructor(private http: HttpClient) { }
getData(serverName: string, filters?: any, comparing?: boolean): Promise<any> {
let opts = new HttpParams();
opts = opts.append('name', serverName);
let compare = comparing ? 'Compare' : '';
if(filters)
opts = opts.append('filters', filters);
return this.http.get(criteriaUrl + 'scData' + compare, {params: opts})
.pipe(
retry(3),
map(res => {
if (res['success'] !== 1 || res['errors'] !== null) {
throw new PLACMError(res['success'], res['message']);
}
return res;
}),
catchError(err => {
return throwError(err);
})
)
.toPromise();
}
| getNames(serverName: string, filters?: any): Promise<any> {
let opts = new HttpParams();
opts = opts.append('name', serverName);
if(filters)
opts = opts.append('filters', filters);
return this.http.get(criteriaUrl + 'scNames', {params: opts})
.pipe(
retry(3),
map(res => {
if (res['success'] !== 1 || res['errors'] !== null) {
throw new PLACMError(res['success'], res['message']);
}
return res;
}),
catchError(err => {
return throwError(err);
})
)
.toPromise();
}
} | |
facebook-content-view.js | define([
'streamhub-sdk/content/views/content-view',
'hgn!streamhub-sdk/content/templates/facebook',
'inherits',
'streamhub-sdk/jquery'],
function (ContentView, FacebookContentTemplate, inherits, $) {
'use strict';
/**
* A view for rendering facebook content into an element.
* @param opts {Object} The set of options to configure this view with (See ContentView).
* @exports streamhub-sdk/content/views/facebook-content-view
* @constructor
*/
var FacebookContentView = function FacebookContentView (opts) {
ContentView.call(this, opts);
};
inherits(FacebookContentView, ContentView);
FacebookContentView.prototype.elClass += ' content-facebook ';
FacebookContentView.prototype.template = FacebookContentTemplate;
/**
* Gets the template rendering context. By default, returns "this.content".
* @return {Content} The content object this view was instantiated with.
*/
FacebookContentView.prototype.getTemplateContext = function () {
var context = ContentView.prototype.getTemplateContext.call(this);
if (context.attachments.length) {
context.permalink = context.attachments[0].url;
}
return context;
}; | return FacebookContentView;
}); | |
route_filter.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteFilter(Resource):
| """Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list of :class:`RouteFilterRule
<azure.mgmt.network.v2017_08_01.models.RouteFilterRule>`
:param peerings: A collection of references to express route circuit
peerings.
:type peerings: list of :class:`ExpressRouteCircuitPeering
<azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeering>`
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, rules=None, peerings=None):
super(RouteFilter, self).__init__(id=id, location=location, tags=tags)
self.rules = rules
self.peerings = peerings
self.provisioning_state = None
self.etag = None |
|
generate_yolo_box.py | #
# pool2d paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def yolo_box(name : str, x, img_size, attrs : dict):
import paddle as pdpd
pdpd.enable_static()
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)
node_img_size = pdpd.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype)
boxes, scores = pdpd.vision.ops.yolo_box(node_x,
node_img_size,
anchors=attrs['anchors'],
class_num=attrs['class_num'],
conf_thresh=attrs['conf_thresh'],
downsample_ratio=attrs['downsample_ratio'],
clip_bbox=attrs['clip_bbox'],
name=None,
scale_x_y=attrs['scale_x_y'])
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'img_size': img_size},
fetch_list=[boxes, scores])
# Save inputs in order of ngraph function, to facilite Fuzzy test,
# which accepts inputs and outputs in this order as well.
saveModel(name, exe, feedkeys=['x', 'img_size'], fetchlist=[boxes, scores],
inputs=[x, img_size], outputs=outs, target_dir=sys.argv[1])
return outs
def | ():
# yolo_box
pdpd_attrs = {
'name': "yolo_box_default",
'anchors': [10, 13, 16, 30, 33, 23],
'class_num': 2,
'conf_thresh': 0.5,
'downsample_ratio': 32,
'clip_bbox': False,
'scale_x_y': 1.0
}
pdpd_attrs_clip_box = {
'name': "yolo_box_clip_box",
'anchors': [10, 13, 16, 30, 33, 23],
'class_num': 2,
'conf_thresh': 0.5,
'downsample_ratio': 32,
'clip_bbox': True,
'scale_x_y': 1.0
}
pdpd_attrs_scale_xy = {
'name': "yolo_box_scale_xy",
'anchors': [10, 13, 16, 30, 33, 23],
'class_num': 2,
'conf_thresh': 0.5,
'downsample_ratio': 32,
'clip_bbox': True,
'scale_x_y': 1.2
}
pdpd_attrs_list = [pdpd_attrs, pdpd_attrs_clip_box, pdpd_attrs_scale_xy]
N = 32
num_anchors = int(len(pdpd_attrs['anchors'])//2)
x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), 13, 13)
imgsize_shape = (N, 2)
data = np.random.random(x_shape).astype('float32')
data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32')
for item in pdpd_attrs_list:
pred_pdpd = yolo_box(item['name'], data, data_ImSize, item)
def TEST2():
# yolo_box uneven spatial width and height
pdpd_attrs = {
'name': "yolo_box_uneven_wh",
'anchors': [10, 13, 16, 30, 33, 23],
'class_num': 2,
'conf_thresh': 0.5,
'downsample_ratio': 32,
'clip_bbox': False,
'scale_x_y': 1.0
}
N = 16
SPATIAL_WIDTH = 13
SPATIAL_HEIGHT = 9
num_anchors = int(len(pdpd_attrs['anchors'])//2)
x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH)
imgsize_shape = (N, 2)
data = np.random.random(x_shape).astype('float32')
data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32')
pred_pdpd = yolo_box(pdpd_attrs['name'], data, data_ImSize, pdpd_attrs)
if __name__ == "__main__":
TEST1()
TEST2() | TEST1 |
container_manager_test.go | // +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
import (
"fmt"
"os/exec"
"path"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func getOOMScoreForPid(pid int) (int, error) {
procfsPath := path.Join("/proc", strconv.Itoa(pid), "oom_score_adj")
out, err := exec.Command("sudo", "cat", procfsPath).CombinedOutput()
if err != nil {
return 0, err
}
return strconv.Atoi(strings.TrimSpace(string(out)))
}
func validateOOMScoreAdjSetting(pid int, expectedOOMScoreAdj int) error {
oomScore, err := getOOMScoreForPid(pid)
if err != nil {
return fmt.Errorf("failed to get oom_score_adj for %d: %v", pid, err)
}
if expectedOOMScoreAdj != oomScore {
return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
}
return nil
}
func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expectedMaxOOMScoreAdj int) error {
oomScore, err := getOOMScoreForPid(pid)
if err != nil {
return fmt.Errorf("failed to get oom_score_adj for %d", pid)
}
if oomScore < expectedMinOOMScoreAdj {
return fmt.Errorf("expected pid %d's oom_score_adj to be >= %d; found %d", pid, expectedMinOOMScoreAdj, oomScore)
}
if oomScore < expectedMaxOOMScoreAdj {
return fmt.Errorf("expected pid %d's oom_score_adj to be < %d; found %d", pid, expectedMaxOOMScoreAdj, oomScore)
}
return nil
}
var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
f := framework.NewDefaultFramework("kubelet-container-manager")
Describe("Validate OOM score adjustments", func() {
Context("once the node is setup", func() {
It("docker daemon's oom-score-adj should be -999", func() {
dockerPids, err := getPidsForProcess(dockerProcessName, dockerPidFile)
Expect(err).To(BeNil(), "failed to get list of docker daemon pids")
for _, pid := range dockerPids {
Eventually(func() error {
return validateOOMScoreAdjSetting(pid, -999)
}, 5*time.Minute, 30*time.Second).Should(BeNil())
}
})
It("Kubelet's oom-score-adj should be -999", func() {
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
Expect(err).To(BeNil(), "failed to get list of kubelet pids")
Expect(len(kubeletPids)).To(Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
Eventually(func() error {
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
}, 5*time.Minute, 30*time.Second).Should(BeNil())
})
Context("", func() {
It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
var err error
podClient := f.PodClient()
podName := "besteffort" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Name: podName,
},
},
},
})
var pausePids []int
By("checking infra container's oom-score-adj")
Eventually(func() error {
pausePids, err = getPidsForProcess("pause", "")
if err != nil {
return fmt.Errorf("failed to get list of pause pids: %v", err)
}
for _, pid := range pausePids {
if err := validateOOMScoreAdjSetting(pid, -998); err != nil {
return err
}
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
var shPids []int
By("checking besteffort container's oom-score-adj")
Eventually(func() error {
shPids, err = getPidsForProcess("serve_hostname", "")
if err != nil {
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
}
if len(shPids) != 1 {
return fmt.Errorf("expected only one serve_hostname process; found %d", len(shPids))
}
return validateOOMScoreAdjSetting(shPids[0], 1000)
}, 2*time.Minute, time.Second*4).Should(BeNil())
})
// Log the running containers here to help debugging. Use `docker ps`
// directly for now because the test is already docker specific.
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
By("Dump all running docker containers")
output, err := exec.Command("docker", "ps").CombinedOutput()
Expect(err).NotTo(HaveOccurred())
framework.Logf("Running docker containers:\n%s", string(output))
}
})
})
It("guaranteed container's oom-score-adj should be -998", func() {
podClient := f.PodClient()
podName := "guaranteed" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/nginx-slim:0.7",
Name: podName,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"),
},
},
},
},
},
})
var (
ngPids []int
err error
)
Eventually(func() error {
ngPids, err = getPidsForProcess("nginx", "")
if err != nil {
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
}
for _, pid := range ngPids {
if err := validateOOMScoreAdjSetting(pid, -998); err != nil |
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
})
It("burstable container's oom-score-adj should be between [2, 1000)", func() {
podClient := f.PodClient()
podName := "burstable" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/test-webserver:e2e",
Name: podName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"),
},
},
},
},
},
})
var (
wsPids []int
err error
)
Eventually(func() error {
wsPids, err = getPidsForProcess("test-webserver", "")
if err != nil {
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
}
for _, pid := range wsPids {
if err := validateOOMScoreAdjSettingIsInRange(pid, 2, 1000); err != nil {
return err
}
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
// TODO: Test the oom-score-adj logic for burstable more accurately.
})
})
})
})
| {
return err
} |
module.ts | import {CustomView} from "../custom_views/custom_view";
import {Argument} from "./argument";
import {RelatedListProperties} from "./related_list_properties";
import {Territory} from "./territory";
import {Profile} from "../profiles/profile";
import {User} from "../users/user";
import {Choice} from "../../../../../../utils/util/choice";
import {Model} from "../../../../../../utils/util/model";
class Module implements Model {
private name: string;
private globalSearchSupported: boolean;
private kanbanView: boolean;
private deletable: boolean;
private description: string;
private creatable: boolean;
private filterStatus: boolean;
private inventoryTemplateSupported: boolean;
private modifiedTime: Date;
private pluralLabel: string;
private presenceSubMenu: boolean;
private isblueprintsupported: boolean;
private triggersSupported: boolean;
private id: bigint;
private relatedListProperties: RelatedListProperties;
private properties: Array<string>;
private onDemandProperties: Array<string>;
private perPage: number;
private visibility: number;
private visible: boolean;
private convertable: boolean;
private editable: boolean;
private emailtemplateSupport: boolean;
private profiles: Array<Profile>;
private filterSupported: boolean;
private displayField: string;
private searchLayoutFields: Array<string>;
private kanbanViewSupported: boolean;
private showAsTab: boolean;
private webLink: string;
private sequenceNumber: number;
private singularLabel: string;
private viewable: boolean;
private apiSupported: boolean;
private apiName: string;
private quickCreate: boolean;
private modifiedBy: User;
private generatedType: Choice<string>;
private feedsRequired: boolean;
private scoringSupported: boolean;
private webformSupported: boolean;
private arguments1: Array<Argument>;
private moduleName: string;
private businessCardFieldLimit: number;
private customView: CustomView;
private parentModule: Module;
private territory: Territory;
private keyModified: Map<string, number> = new Map<string, number>();
/**
* The method to get the name
* @returns A string representing the name
*/
public getName(): string {
return this.name;
}
/**
* The method to set the value to name
* @param name A string representing the name
*/
public setName(name: string): void {
this.name = name;
this.keyModified.set("name", 1);
}
/**
* The method to get the globalSearchSupported
* @returns A boolean representing the globalSearchSupported
*/
public getGlobalSearchSupported(): boolean {
return this.globalSearchSupported;
}
/**
* The method to set the value to globalSearchSupported
* @param globalSearchSupported A boolean representing the globalSearchSupported
*/
public setGlobalSearchSupported(globalSearchSupported: boolean): void {
this.globalSearchSupported = globalSearchSupported;
this.keyModified.set("global_search_supported", 1);
}
/**
* The method to get the kanbanView
* @returns A boolean representing the kanbanView
*/
public getKanbanView(): boolean {
return this.kanbanView;
}
/**
* The method to set the value to kanbanView
* @param kanbanView A boolean representing the kanbanView
*/
public setKanbanView(kanbanView: boolean): void {
this.kanbanView = kanbanView;
this.keyModified.set("kanban_view", 1);
}
/**
* The method to get the deletable
* @returns A boolean representing the deletable
*/
public getDeletable(): boolean {
return this.deletable;
}
/**
* The method to set the value to deletable
* @param deletable A boolean representing the deletable
*/
public setDeletable(deletable: boolean): void {
this.deletable = deletable;
this.keyModified.set("deletable", 1);
}
/**
* The method to get the description
* @returns A string representing the description
*/
public getDescription(): string {
return this.description;
}
/**
* The method to set the value to description
* @param description A string representing the description
*/
public setDescription(description: string): void {
this.description = description;
this.keyModified.set("description", 1);
}
/**
* The method to get the creatable
* @returns A boolean representing the creatable
*/
public getCreatable(): boolean {
return this.creatable;
}
/**
* The method to set the value to creatable
* @param creatable A boolean representing the creatable
*/
public setCreatable(creatable: boolean): void {
this.creatable = creatable;
this.keyModified.set("creatable", 1);
}
/**
* The method to get the filterStatus
* @returns A boolean representing the filterStatus
*/
public getFilterStatus(): boolean {
return this.filterStatus;
}
/**
* The method to set the value to filterStatus
* @param filterStatus A boolean representing the filterStatus
*/
public setFilterStatus(filterStatus: boolean): void {
this.filterStatus = filterStatus;
this.keyModified.set("filter_status", 1);
}
/**
* The method to get the inventoryTemplateSupported
* @returns A boolean representing the inventoryTemplateSupported
*/
public getInventoryTemplateSupported(): boolean {
return this.inventoryTemplateSupported;
}
/**
* The method to set the value to inventoryTemplateSupported
* @param inventoryTemplateSupported A boolean representing the inventoryTemplateSupported
*/
public setInventoryTemplateSupported(inventoryTemplateSupported: boolean): void {
this.inventoryTemplateSupported = inventoryTemplateSupported;
this.keyModified.set("inventory_template_supported", 1);
}
/**
* The method to get the modifiedTime
* @returns An instance of Date
*/
public getModifiedTime(): Date {
return this.modifiedTime;
}
/**
* The method to set the value to modifiedTime
* @param modifiedTime An instance of Date
*/
public setModifiedTime(modifiedTime: Date): void {
this.modifiedTime = modifiedTime;
this.keyModified.set("modified_time", 1);
}
/**
* The method to get the pluralLabel
* @returns A string representing the pluralLabel
*/
public getPluralLabel(): string {
return this.pluralLabel;
}
/**
* The method to set the value to pluralLabel
* @param pluralLabel A string representing the pluralLabel
*/
public setPluralLabel(pluralLabel: string): void {
this.pluralLabel = pluralLabel;
this.keyModified.set("plural_label", 1);
}
/**
* The method to get the presenceSubMenu
* @returns A boolean representing the presenceSubMenu
*/
public getPresenceSubMenu(): boolean {
return this.presenceSubMenu;
}
/**
* The method to set the value to presenceSubMenu
* @param presenceSubMenu A boolean representing the presenceSubMenu
*/
public setPresenceSubMenu(presenceSubMenu: boolean): void {
this.presenceSubMenu = presenceSubMenu;
this.keyModified.set("presence_sub_menu", 1);
}
/**
* The method to get the isblueprintsupported
* @returns A boolean representing the isblueprintsupported
*/
public getIsblueprintsupported(): boolean {
return this.isblueprintsupported;
}
/**
* The method to set the value to isblueprintsupported
* @param isblueprintsupported A boolean representing the isblueprintsupported
*/
public setIsblueprintsupported(isblueprintsupported: boolean): void {
this.isblueprintsupported = isblueprintsupported;
this.keyModified.set("isBlueprintSupported", 1);
}
/**
* The method to get the triggersSupported
* @returns A boolean representing the triggersSupported
*/
public getTriggersSupported(): boolean {
return this.triggersSupported;
}
/**
* The method to set the value to triggersSupported
* @param triggersSupported A boolean representing the triggersSupported
*/
public setTriggersSupported(triggersSupported: boolean): void {
this.triggersSupported = triggersSupported;
this.keyModified.set("triggers_supported", 1);
}
/**
* The method to get the id
* @returns A bigint representing the id
*/
public getId(): bigint {
return this.id;
}
/**
* The method to set the value to id
* @param id A bigint representing the id
*/
public setId(id: bigint): void {
this.id = id;
this.keyModified.set("id", 1);
}
/**
* The method to get the relatedListProperties
* @returns An instance of RelatedListProperties
*/
public getRelatedListProperties(): RelatedListProperties {
return this.relatedListProperties;
}
/**
* The method to set the value to relatedListProperties
* @param relatedListProperties An instance of RelatedListProperties
*/
public setRelatedListProperties(relatedListProperties: RelatedListProperties): void {
this.relatedListProperties = relatedListProperties;
this.keyModified.set("related_list_properties", 1);
}
/**
* The method to get the properties
* @returns An Array representing the properties
*/
public getProperties(): Array<string> {
return this.properties;
}
/**
* The method to set the value to properties
* @param properties An Array representing the properties
*/
public setProperties(properties: Array<string>): void {
this.properties = properties;
this.keyModified.set("$properties", 1);
}
/**
* The method to get the onDemandProperties
* @returns An Array representing the onDemandProperties
*/
public getOnDemandProperties(): Array<string> {
return this.onDemandProperties;
}
/**
* The method to set the value to onDemandProperties
* @param onDemandProperties An Array representing the onDemandProperties
*/
public setOnDemandProperties(onDemandProperties: Array<string>): void {
this.onDemandProperties = onDemandProperties;
this.keyModified.set("$on_demand_properties", 1);
}
/**
* The method to get the perPage
* @returns A number representing the perPage
*/
public getPerPage(): number {
return this.perPage;
}
/**
* The method to set the value to perPage
* @param perPage A number representing the perPage
*/
public setPerPage(perPage: number): void {
this.perPage = perPage;
this.keyModified.set("per_page", 1);
}
/**
* The method to get the visibility
* @returns A number representing the visibility
*/
public getVisibility(): number {
return this.visibility;
}
/**
* The method to set the value to visibility
* @param visibility A number representing the visibility
*/
public setVisibility(visibility: number): void {
this.visibility = visibility;
this.keyModified.set("visibility", 1);
| /**
* The method to get the visible
* @returns A boolean representing the visible
*/
public getVisible(): boolean {
return this.visible;
}
/**
* The method to set the value to visible
* @param visible A boolean representing the visible
*/
public setVisible(visible: boolean): void {
this.visible = visible;
this.keyModified.set("visible", 1);
}
/**
* The method to get the convertable
* @returns A boolean representing the convertable
*/
public getConvertable(): boolean {
return this.convertable;
}
/**
* The method to set the value to convertable
* @param convertable A boolean representing the convertable
*/
public setConvertable(convertable: boolean): void {
this.convertable = convertable;
this.keyModified.set("convertable", 1);
}
/**
* The method to get the editable
* @returns A boolean representing the editable
*/
public getEditable(): boolean {
return this.editable;
}
/**
* The method to set the value to editable
* @param editable A boolean representing the editable
*/
public setEditable(editable: boolean): void {
this.editable = editable;
this.keyModified.set("editable", 1);
}
/**
* The method to get the emailtemplateSupport
* @returns A boolean representing the emailtemplateSupport
*/
public getEmailtemplateSupport(): boolean {
return this.emailtemplateSupport;
}
/**
* The method to set the value to emailtemplateSupport
* @param emailtemplateSupport A boolean representing the emailtemplateSupport
*/
public setEmailtemplateSupport(emailtemplateSupport: boolean): void {
this.emailtemplateSupport = emailtemplateSupport;
this.keyModified.set("emailTemplate_support", 1);
}
/**
* The method to get the profiles
* @returns An Array representing the profiles
*/
public getProfiles(): Array<Profile> {
return this.profiles;
}
/**
* The method to set the value to profiles
* @param profiles An Array representing the profiles
*/
public setProfiles(profiles: Array<Profile>): void {
this.profiles = profiles;
this.keyModified.set("profiles", 1);
}
/**
* The method to get the filterSupported
* @returns A boolean representing the filterSupported
*/
public getFilterSupported(): boolean {
return this.filterSupported;
}
/**
* The method to set the value to filterSupported
* @param filterSupported A boolean representing the filterSupported
*/
public setFilterSupported(filterSupported: boolean): void {
this.filterSupported = filterSupported;
this.keyModified.set("filter_supported", 1);
}
/**
* The method to get the displayField
* @returns A string representing the displayField
*/
public getDisplayField(): string {
return this.displayField;
}
/**
* The method to set the value to displayField
* @param displayField A string representing the displayField
*/
public setDisplayField(displayField: string): void {
this.displayField = displayField;
this.keyModified.set("display_field", 1);
}
/**
* The method to get the searchLayoutFields
* @returns An Array representing the searchLayoutFields
*/
public getSearchLayoutFields(): Array<string> {
return this.searchLayoutFields;
}
/**
* The method to set the value to searchLayoutFields
* @param searchLayoutFields An Array representing the searchLayoutFields
*/
public setSearchLayoutFields(searchLayoutFields: Array<string>): void {
this.searchLayoutFields = searchLayoutFields;
this.keyModified.set("search_layout_fields", 1);
}
/**
* The method to get the kanbanViewSupported
* @returns A boolean representing the kanbanViewSupported
*/
public getKanbanViewSupported(): boolean {
return this.kanbanViewSupported;
}
/**
* The method to set the value to kanbanViewSupported
* @param kanbanViewSupported A boolean representing the kanbanViewSupported
*/
public setKanbanViewSupported(kanbanViewSupported: boolean): void {
this.kanbanViewSupported = kanbanViewSupported;
this.keyModified.set("kanban_view_supported", 1);
}
/**
* The method to get the showAsTab
* @returns A boolean representing the showAsTab
*/
public getShowAsTab(): boolean {
return this.showAsTab;
}
/**
* The method to set the value to showAsTab
* @param showAsTab A boolean representing the showAsTab
*/
public setShowAsTab(showAsTab: boolean): void {
this.showAsTab = showAsTab;
this.keyModified.set("show_as_tab", 1);
}
/**
* The method to get the webLink
* @returns A string representing the webLink
*/
public getWebLink(): string {
return this.webLink;
}
/**
* The method to set the value to webLink
* @param webLink A string representing the webLink
*/
public setWebLink(webLink: string): void {
this.webLink = webLink;
this.keyModified.set("web_link", 1);
}
/**
* The method to get the sequenceNumber
* @returns A number representing the sequenceNumber
*/
public getSequenceNumber(): number {
return this.sequenceNumber;
}
/**
* The method to set the value to sequenceNumber
* @param sequenceNumber A number representing the sequenceNumber
*/
public setSequenceNumber(sequenceNumber: number): void {
this.sequenceNumber = sequenceNumber;
this.keyModified.set("sequence_number", 1);
}
/**
* The method to get the singularLabel
* @returns A string representing the singularLabel
*/
public getSingularLabel(): string {
return this.singularLabel;
}
/**
* The method to set the value to singularLabel
* @param singularLabel A string representing the singularLabel
*/
public setSingularLabel(singularLabel: string): void {
this.singularLabel = singularLabel;
this.keyModified.set("singular_label", 1);
}
/**
* The method to get the viewable
* @returns A boolean representing the viewable
*/
public getViewable(): boolean {
return this.viewable;
}
/**
* The method to set the value to viewable
* @param viewable A boolean representing the viewable
*/
public setViewable(viewable: boolean): void {
this.viewable = viewable;
this.keyModified.set("viewable", 1);
}
/**
* The method to get the apiSupported
* @returns A boolean representing the apiSupported
*/
public getAPISupported(): boolean {
return this.apiSupported;
}
/**
* The method to set the value to apiSupported
* @param apiSupported A boolean representing the apiSupported
*/
public setAPISupported(apiSupported: boolean): void {
this.apiSupported = apiSupported;
this.keyModified.set("api_supported", 1);
}
/**
* The method to get the apiName
* @returns A string representing the apiName
*/
public getAPIName(): string {
return this.apiName;
}
/**
* The method to set the value to apiName
* @param apiName A string representing the apiName
*/
public setAPIName(apiName: string): void {
this.apiName = apiName;
this.keyModified.set("api_name", 1);
}
/**
* The method to get the quickCreate
* @returns A boolean representing the quickCreate
*/
public getQuickCreate(): boolean {
return this.quickCreate;
}
/**
* The method to set the value to quickCreate
* @param quickCreate A boolean representing the quickCreate
*/
public setQuickCreate(quickCreate: boolean): void {
this.quickCreate = quickCreate;
this.keyModified.set("quick_create", 1);
}
/**
* The method to get the modifiedBy
* @returns An instance of User
*/
public getModifiedBy(): User {
return this.modifiedBy;
}
/**
* The method to set the value to modifiedBy
* @param modifiedBy An instance of User
*/
public setModifiedBy(modifiedBy: User): void {
this.modifiedBy = modifiedBy;
this.keyModified.set("modified_by", 1);
}
/**
* The method to get the generatedType
* @returns An instance of Choice<string>
*/
public getGeneratedType(): Choice<string> {
return this.generatedType;
}
/**
* The method to set the value to generatedType
* @param generatedType An instance of Choice<string>
*/
public setGeneratedType(generatedType: Choice<string>): void {
this.generatedType = generatedType;
this.keyModified.set("generated_type", 1);
}
/**
* The method to get the feedsRequired
* @returns A boolean representing the feedsRequired
*/
public getFeedsRequired(): boolean {
return this.feedsRequired;
}
/**
* The method to set the value to feedsRequired
* @param feedsRequired A boolean representing the feedsRequired
*/
public setFeedsRequired(feedsRequired: boolean): void {
this.feedsRequired = feedsRequired;
this.keyModified.set("feeds_required", 1);
}
/**
* The method to get the scoringSupported
* @returns A boolean representing the scoringSupported
*/
public getScoringSupported(): boolean {
return this.scoringSupported;
}
/**
* The method to set the value to scoringSupported
* @param scoringSupported A boolean representing the scoringSupported
*/
public setScoringSupported(scoringSupported: boolean): void {
this.scoringSupported = scoringSupported;
this.keyModified.set("scoring_supported", 1);
}
/**
* The method to get the webformSupported
* @returns A boolean representing the webformSupported
*/
public getWebformSupported(): boolean {
return this.webformSupported;
}
/**
* The method to set the value to webformSupported
* @param webformSupported A boolean representing the webformSupported
*/
public setWebformSupported(webformSupported: boolean): void {
this.webformSupported = webformSupported;
this.keyModified.set("webform_supported", 1);
}
/**
* The method to get the arguments
* @returns An Array representing the arguments1
*/
public getArguments(): Array<Argument> {
return this.arguments1;
}
/**
* The method to set the value to arguments
* @param arguments1 An Array representing the arguments1
*/
public setArguments(arguments1: Array<Argument>): void {
this.arguments1 = arguments1;
this.keyModified.set("arguments", 1);
}
/**
* The method to get the moduleName
* @returns A string representing the moduleName
*/
public getModuleName(): string {
return this.moduleName;
}
/**
* The method to set the value to moduleName
* @param moduleName A string representing the moduleName
*/
public setModuleName(moduleName: string): void {
this.moduleName = moduleName;
this.keyModified.set("module_name", 1);
}
/**
* The method to get the businessCardFieldLimit
* @returns A number representing the businessCardFieldLimit
*/
public getBusinessCardFieldLimit(): number {
return this.businessCardFieldLimit;
}
/**
* The method to set the value to businessCardFieldLimit
* @param businessCardFieldLimit A number representing the businessCardFieldLimit
*/
public setBusinessCardFieldLimit(businessCardFieldLimit: number): void {
this.businessCardFieldLimit = businessCardFieldLimit;
this.keyModified.set("business_card_field_limit", 1);
}
/**
* The method to get the customView
* @returns An instance of CustomView
*/
public getCustomView(): CustomView {
return this.customView;
}
/**
* The method to set the value to customView
* @param customView An instance of CustomView
*/
public setCustomView(customView: CustomView): void {
this.customView = customView;
this.keyModified.set("custom_view", 1);
}
/**
* The method to get the parentModule
* @returns An instance of Module
*/
public getParentModule(): Module {
return this.parentModule;
}
/**
* The method to set the value to parentModule
* @param parentModule An instance of Module
*/
public setParentModule(parentModule: Module): void {
this.parentModule = parentModule;
this.keyModified.set("parent_module", 1);
}
/**
* The method to get the territory
* @returns An instance of Territory
*/
public getTerritory(): Territory {
return this.territory;
}
/**
* The method to set the value to territory
* @param territory An instance of Territory
*/
public setTerritory(territory: Territory): void {
this.territory = territory;
this.keyModified.set("territory", 1);
}
/**
* The method to check if the user has modified the given key
* @param key A string representing the key
* @returns A number representing the modification
*/
public isKeyModified(key: string): number | null | undefined {
if(this.keyModified.has(key)) {
return this.keyModified.get(key);
}
return null;
}
/**
* The method to mark the given key as modified
* @param key A string representing the key
* @param modification A number representing the modification
*/
public setKeyModified(key: string, modification: number): void {
this.keyModified.set(key, modification);
}
}
export {
Module as MasterModel,
Module as Module
} | }
|
list_video_tasks.go | package imm
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// ListVideoTasks invokes the imm.ListVideoTasks API synchronously
// api document: https://help.aliyun.com/api/imm/listvideotasks.html
func (client *Client) ListVideoTasks(request *ListVideoTasksRequest) (response *ListVideoTasksResponse, err error) {
response = CreateListVideoTasksResponse()
err = client.DoAction(request, response)
return
}
// ListVideoTasksWithChan invokes the imm.ListVideoTasks API asynchronously
// api document: https://help.aliyun.com/api/imm/listvideotasks.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ListVideoTasksWithChan(request *ListVideoTasksRequest) (<-chan *ListVideoTasksResponse, <-chan error) {
responseChan := make(chan *ListVideoTasksResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.ListVideoTasks(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
}) | if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// ListVideoTasksWithCallback invokes the imm.ListVideoTasks API asynchronously
// api document: https://help.aliyun.com/api/imm/listvideotasks.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ListVideoTasksWithCallback(request *ListVideoTasksRequest, callback func(response *ListVideoTasksResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *ListVideoTasksResponse
var err error
defer close(result)
response, err = client.ListVideoTasks(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// ListVideoTasksRequest is the request struct for api ListVideoTasks
type ListVideoTasksRequest struct {
*requests.RpcRequest
MaxKeys requests.Integer `position:"Query" name:"MaxKeys"`
Project string `position:"Query" name:"Project"`
TaskType string `position:"Query" name:"TaskType"`
Marker string `position:"Query" name:"Marker"`
}
// ListVideoTasksResponse is the response struct for api ListVideoTasks
type ListVideoTasksResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
NextMarker string `json:"NextMarker" xml:"NextMarker"`
Tasks []TasksItem `json:"Tasks" xml:"Tasks"`
}
// CreateListVideoTasksRequest creates a request to invoke ListVideoTasks API
func CreateListVideoTasksRequest() (request *ListVideoTasksRequest) {
request = &ListVideoTasksRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("imm", "2017-09-06", "ListVideoTasks", "imm", "openAPI")
return
}
// CreateListVideoTasksResponse creates a response to parse from ListVideoTasks response
func CreateListVideoTasksResponse() (response *ListVideoTasksResponse) {
response = &ListVideoTasksResponse{
BaseResponse: &responses.BaseResponse{},
}
return
} | |
home-currency-usd.js | let data = {
"body": "<path d=\"M12 3l10 9h-3v8H5v-8H2l10-9M9.22 8.93c-.47.47-.72 1.1-.72 1.82c0 1.68 2.04 2.32 3.26 2.71c1.5.47 1.71.75 1.74.79c0 .75-1.35.75-1.5.75c-.63 0-.97-.12-1.14-.22c-.19-.11-.36-.28-.36-.78h-2c0 1.43.74 2.16 1.35 2.5c.33.2.72.34 1.15.42V18h2v-1.09c1.53-.3 2.5-1.29 2.5-2.66c0-1.58-1.62-2.22-3.14-2.7c-1.56-.49-1.83-.78-1.86-.8c0-.25.07-.34.14-.41c.21-.21.72-.34 1.36-.34c.68 0 1.5.13 1.5.75h2c0-1.41-.94-2.38-2.5-2.66V7h-2v1.08c-.74.13-1.35.42-1.78.85z\" fill=\"currentColor\"/>",
"width": 24,
"height": 24
};
export default data; | ||
index.ts | /*
// Copyright 2021 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
*/
import { MetaverseMgr } from "@Modules/metaverse";
import { DomainMgr } from "@Modules/domain";
import { Slot, ConnectionState } from "@vircadia/web-sdk";
import { Store, Actions as StoreActions } from "@Store/index";
import { Metaverse } from "@Modules/metaverse/metaverse";
import { Domain } from "@Modules/domain/domain";
import {
Config, TrueValue, FalseValue, RECONNECT_ON_STARTUP, LAST_DOMAIN_SERVER,
LOG_LEVEL, DEFAULT_METAVERSE_URL
} from "@Base/config";
/* eslint-disable require-atomic-updates */
// eslint-disable-next-line @typescript-eslint/no-unused-vars
import Log from "@Modules/debugging/log";
export const Utility = {
/**
* Default processing for domain-server state change.
*
* This routine is the default Signal processor added to a domain-server's changed state Signal.
* It updates the domain-server's state in Vue's Store which will update the UI.
*/
defaultDomainOps(pDomain: Domain, pConnState: ConnectionState, pInfo: string): void {
Log.debug(Log.types.OTHER, `UTILITY: new domain state: ${pConnState}/${pInfo}`);
// eslint-disable-next-line no-void
void Store.dispatch(StoreActions.UPDATE_DOMAIN, {
domain: pDomain,
newState: pDomain.DomainStateAsString,
info: pInfo
});
},
/**
* Default processing for metaverse-server state change.
*
* This routine is the default Signal processor added to a metaverse-server's changed state Signal.
* It updates the metaverse-server's state in Vue's Store which will update the UI.
*/
defaultMetaverseOps(pMV: Metaverse, pNewState: string): void {
Log.debug(Log.types.OTHER, `UTILITY: new metaverse state: ${pNewState}`);
// eslint-disable-next-line no-void
void Store.dispatch(StoreActions.UPDATE_METAVERSE, {
metaverse: pMV,
newState: pNewState
});
},
/**
* Configuration information is persisted so restore what information
* we can.
*/
initializeConfig(): void {
Config.initialize();
// Copy the configured log level into the logging subroutines
Log.setLogLevel(Config.getItem(LOG_LEVEL, "debug"));
},
/**
* Connect to the domain on startup.
*
* If we are supposed to connect at startup, do all the connection
* setup stuff so the user is online.
*/
async initialConnectionSetup(pDomainOps?: Slot, pMetaverseOps?: Slot): Promise<void> {
if (Config.getItem(RECONNECT_ON_STARTUP, FalseValue) === TrueValue) {
Log.info(Log.types.METAVERSE, `Doing Reconnect on Startup`);
const lastDomainServer = Config.getItem(LAST_DOMAIN_SERVER, undefined);
if (lastDomainServer) { | }
// if we haven't connected to a metaverse already from a domain reconnect at startup
if (!MetaverseMgr.ActiveMetaverse) {
const metaverseUrl = Config.getItem(DEFAULT_METAVERSE_URL, "");
await Utility.metaverseConnectionSetup(metaverseUrl, pMetaverseOps);
}
},
/**
* Start a connection to a domain-server.
*
* The connection to the domain is started and, if successful, a connection to the back-end
* metaverse-server is setup and initialized.
*
* The state change routines are usually used to start interaction operations.
*
* @param pDomainUrl either just the hostname (default protocol and ports are added) or a fully qualified URL
* @param {OnDomainStateChangeCallback} pDomainOps routine to be called when domain connection state changes
* @param {OnMetaverseStateChangeCallback} pMetaverseOps routine to be called when metaverse connection state changes
*/
async connectionSetup(pDomainUrl: string, pDomainOps?: Slot, pMetaverseOps?: Slot): Promise<void> {
if (pDomainUrl) {
try {
// First ensure we disconnect from any currently active domain.
await this.disconnectActiveDomain();
Log.debug(Log.types.COMM, `connectionSetup: connecting to domain ${pDomainUrl}`);
const domain = await DomainMgr.domainFactory(pDomainUrl, pDomainOps);
DomainMgr.ActiveDomain = domain;
const metaverseUrl = await domain.getMetaverseUrl();
await Utility.metaverseConnectionSetup(metaverseUrl, pMetaverseOps);
} catch (err) {
const errr = <Error>err;
Log.error(Log.types.COMM, `Exception connecting: ${errr.message}`);
}
}
},
/**
* Start a connection to a metaverse-server.
*
* The state change routines are usually used to start interaction operations.
*
* @param pMetaverseUrl either just the hostname (default protocol and ports are added) or a fully qualified URL
* @param {OnMetaverseStateChangeCallback} pMetaverseOps routine to be called when metaverse connection state changes
*/
async metaverseConnectionSetup(pMetaverseUrl: string, pMetaverseOps?: Slot): Promise<void> {
try {
if (pMetaverseUrl) {
Log.debug(Log.types.COMM, `metaverseConnectionSetup: connecting to metaverse ${pMetaverseUrl}`);
const metaverse = await MetaverseMgr.metaverseFactory(pMetaverseUrl, pMetaverseOps);
MetaverseMgr.ActiveMetaverse = metaverse;
}
} catch (err) {
const errr = <Error>err;
Log.error(Log.types.COMM, `Exception connecting to metaverse: ${errr.message}`);
}
},
/**
* End a connection to a domain-server.
*
* If there is currently an active domain setup, this fires the disconnect method on that domain.
*/
async disconnectActiveDomain(): Promise<void> {
if (DomainMgr.ActiveDomain) {
await DomainMgr.ActiveDomain.disconnect();
}
}
}; | await Utility.connectionSetup(lastDomainServer, pDomainOps, pMetaverseOps);
}
} else {
Log.info(Log.types.COMM, `Not performing Reconnect on Startup. See "config"`); |
search_doc.go | // LianDi - 链滴笔记,连接点滴
// Copyright (c) 2020-present, b3log.org
//
// LianDi is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// See the Mulan PSL v2 for more details.
package model
import (
"strings"
"unicode/utf8"
)
func InitIndex() {
for _, dir := range Conf.Dirs {
go dir.Index()
}
}
var (
// docs 用于维护所有已挂载的文档。
docs []*Doc
)
type Doc struct {
URL string
Path string
Content string
}
type Snippet struct {
Dir *Dir `json:"dir"`
Path string `json:"path"`
Ln int `json:"ln"`
Col int `json:"col"`
Index int `json:"index"`
Content string `json:"content"`
}
func (dir *Dir) RemoveIndexDocDir(dirPath string) {
for i := 0; i < len(docs); i++ {
if dir.URL == docs[i].URL && strings.HasPrefix(docs[i].Path, dirPath) {
docs = append(docs[:i], docs[i+1:]...)
| ath, newDirPath string) {
for _, d := range docs {
if dir.URL == d.URL && strings.HasPrefix(d.Path, dirPath) {
d.Path = strings.Replace(d.Path, dirPath, newDirPath, -1)
}
}
}
func (dir *Dir) MoveIndexDoc(path, newPath string) {
for _, d := range docs {
if dir.URL == d.URL && path == d.Path {
d.Path = newPath
break
}
}
}
func (dir *Dir) RemoveIndexDoc(path string) {
for i, doc := range docs {
if doc.URL == dir.URL && doc.Path == path {
docs = docs[:i+copy(docs[i:], docs[i+1:])]
break
}
}
}
func (dir *Dir) IndexDoc(path, content string) {
doc := &Doc{URL: dir.URL, Path: path, Content: content}
for i, d := range docs {
if doc.URL == d.URL && doc.Path == d.Path {
docs = docs[:i+copy(docs[i:], docs[i+1:])]
break
}
}
docs = append(docs, doc)
}
func Search(keyword string) (ret []*Snippet) {
ret = []*Snippet{}
for _, doc := range docs {
snippets := searchDoc(keyword, doc)
ret = append(ret, snippets...)
}
return
}
func searchDoc(keyword string, doc *Doc) (ret []*Snippet) {
lines := strings.Split(doc.Content, "\n")
index := 0
maxPartLen := 32
for idx, line := range lines {
if pos := strings.Index(strings.ToLower(line), strings.ToLower(keyword)); -1 != pos {
var before []rune
var count int
for i := pos; 0 < i; { // 关键字前面太长的话缩短一些
r, size := utf8.DecodeLastRuneInString(line[:i])
i -= size
before = append([]rune{r}, before...)
count++
if maxPartLen < count {
break
}
}
highlight := string(before) + "<mark>" + line[pos:pos+len(keyword)] + "</mark>" + line[pos+len(keyword):]
dir := Conf.Dir(doc.URL)
snippet := &Snippet{Dir: dir,
Path: doc.Path,
Ln: idx + 1, Col: pos + 1, Index: index,
Content: highlight}
ret = append(ret, snippet)
index++
}
}
return ret
}
| i--
}
}
}
func (dir *Dir) MoveIndexDocsDir(dirP |
home.component.ts | import { Component, OnInit } from '@angular/core';
import { BreakpointObserver, Breakpoints } from '@angular/cdk/layout';
import { Observable } from 'rxjs';
import { map, shareReplay } from 'rxjs/operators';
import {
NgbCarousel,
NgbSlideEvent,
NgbSlideEventSource,
NgbCarouselConfig,
} from '@ng-bootstrap/ng-bootstrap';
@Component({
selector: 'app-home',
templateUrl: './home.component.html',
styleUrls: ['./home.component.scss'],
})
export class HomeComponent implements OnInit {
isHandset$: Observable<boolean> = this.breakpointObserver
.observe(Breakpoints.Handset)
.pipe(
map((result) => result.matches),
shareReplay()
);
showNavigationArrows = false;
showNavigationIndicators = true;
piabout = `It is my pleasure to introduce one of the most esteemed clubs of NIT
Patna, the Robotics Club. Formed in the year 2015 by a bunch of not
more than 8 students with vision of augmenting innovation and
technical culture in the college. In this short span of time,
Robotics Club managed to achieve an identity among college clubs at
State-level. With its members ranging from coders to designers to
hardware enthusiasts, they have been conducting an array of events
around the year, thus enhancing the students' technical skills and
making them aware of the fascinating world of automation, robotics,
and electronics surrounding them. They are also promoting robotics
among students of other nearby schools and colleges so that those
students who lack resources also get equal opportunity to explore
this fascinating domain. Members of this club have represented our
college in several inter-college technical competitions, ideathons,
and hackathons. The club has done a range of projects on IoT,
drones, 3D designing, Bot fabrication, etc. Presently, members are
actively engaged in working on a number of advanced projects like an
AI-based attendance system and an automated bot for restaurants.
Events like Robowars, Tiger's Nest, draws huge participation and
audience. The club has been pretty successful in creating an
atmosphere in the campus where people with different ideas could
collaborate together to give shape to their ideas. I hope that the
club continues doing so and keep making our college proud.`;
aboutus = `Robotics club has been embodiment of innovation at NIT Patna. Being formed in year 2015-16 by Nripendra Saroj Sir ,our club boasts a glorious legacy. With our members ranging from seasoned coders to brilliant designers to hardware scientists, Our members have also represented our college at Smart India hackathon,headed by our current Captain Dheeraj Kumar Sir.We have been constantly working to enhance the students’ technical skills and making them aware of the fascinating world of automation,robotics and electronics which surrounds us.Boasting of a huge roster of exciting and engaging events, This year’s techno-cultural fest was nothing less than a rollercoaster ride for us.`;
whatwedo = `Does robots fascinates you and have you ever fantasized to build one
for yourself? The Robotics club NIT Patna is the place! The Robotics
club at NIT Patna strives to stimulate interest in robotics among
the students of the institute.Today robotics is a rapidly growing
field,as technological advances continue;researching,designing, and
building new robots serve various practical purposes.Robotics is a
field which is not limited to a particular branch and is way more
diverse than our own imagination and so is our student club.We are a
diverse group of robotics nerds who find roots across all academic
departments of the Institute. We at Robotics club believe in working
and growing together.You can know more about us from here. #Hail
Robotics.`;
events = [
{
title: `Arduino workshop`,
url: `assets/img/arduino.JPG`,
description: `Robotics club NIT-Patna conducted a two days Arduino workshop
where all about Arduino, Bluetooth interfacing, and Bot
interfacing were discussed. The major highlights of this
workshop were that the free kits were provided to the
participants using which they learned how to control bots
using Arduino. A demonstration of how to assemble the bot
(using the components provided in the kit ) was given.
Participants had hands-on experience on tasks like LED
Blinking, buzzers, DC motor interfacing with Arduino,
Bluetooth modules and pairing, autonomous bot assembling,
wireless LED, building an obstacle detecting bot using IR
sensors, a line follower bot, and wirelessly controlled bot
using Android phone. After completion of the workshop,
certificates were provided to all the participants.`,
},
{
title: `3D printing Workshop`,
url: `assets/img/3dprinting.jpg`,
description: `Robotics club, NIT Patna conducted a two days Workshop on 3D
printing. In the workshop basics of 3D designing were
discussed. The participants gained a sound knowledge about the
parts, principles, and working of 3D printing. They were given
access to use the 3D printer of our club to gather some ideas
about how to operate it, what are its specific features. It
was a great hands-on experience of creating stuff like
Robotics arm, gear, etc for all those who participated.`,
},
{
title: `ROBOWARS`,
url: `assets/img/robowars.jpg`,
description: `We conducted robowars which was a big hit.Students of different schools and
colleges from patna participated.Robowars are probably among the most awaited events because
of the thrill and excitement it provides.We have all grown watching transformers and it gives quite a
similar feel.Robowars had a history of attracting great audiences from different colleges over the years and it’s prize amount have always been the highest. Yesteryear,I was won by a Team from Nit Silchar.`,
},
{
title: `CHAKRAVYUH`,
url: `assets/img/chakravyuh.jpg`,
description: `We had a maze solving event which was based on the backdrop of Abhimanyu’s chakravyuh from Mahabharata.The bot was supposed to be abhimanyu and it had to reach to centre of the maze(or chakravyuh) using its weapons i.e, the codes in this case.It attracted a lot of audience especially from the programming background. The fascination factor was also there because of the interesting backdrop .`,
},
{
title: `THE LION’S KINGDOM `,
url: `assets/img/lionkingdom.jpg`,
description: `In lion’s Kingdom, participants had to traverse their bots through a zig zag path overcoming the obstacles.The path had holes which were meant to be filled using cubes placed along the path.This event saw maximum participation because of its straightforward demands and facile nature.`,
},
{
title: 'PCB designing',
url: 'assets/img/pcb.jpg',
description: `Printed Circuit Board are essential part of a electronic connections to generate a mechanical support as well. We can’t rely upon bread board for substantially increased number of components and for smaller packaging sizes of Integrated circuits .
Robotics club ,NIT Patna organized a workshop on PCB Designing. This workshop focused over designing various kinds of electronic circuits and their use in making PCB Designing. Designing Software, Reference Materials, PPTs, Example sets and Study Materials were the tools provided (for the session).Concepts like :Circuit Designing, Simulation, PCB Design Structure, Component Placement and Design, Testing, Board Analysis, Fabrication output generation were discussed.`,
},
];
constructor(
private breakpointObserver: BreakpointObserver,
config: NgbCarouselConfig
) {
config.interval = 3000;
config.wrap = true;
config.keyboard = true;
config.pauseOnHover = false;
config.showNavigationArrows = false; | } | config.showNavigationIndicators = true;
}
ngOnInit() {} |
profil.service.ts | import { FilierEntity } from "./../model/filiere.entity";
import { profile } from "node:console";
import { getRepository, getConnection } from "typeorm";
import { ProfilEntity } from "./../model/profil.entity";
import { ProfilDto } from "./../dto/profil.dto";
import { ProfilDao } from "./../dao/profil.dao";
import { HttpException, HttpStatus, Injectable } from "@nestjs/common";
import { FiliereService } from "./filiere.service";
import { ProfesseurService } from "./professeur.service";
import { ProfesseurEntity } from "../model/professeur.entity";
import { EtudaintService } from "./etudiant.service";
import { AddFiliereProfesseurDto } from "../dto/AddFiliereProfesseurDto";
@Injectable()
export class ProfilService {
constructor(
private profilDao: ProfilDao,
private filiereService: FiliereService,
private profService: ProfesseurService,
private etudiantService: EtudaintService
) {
}
async saveProfile(profilDto: ProfilDto): Promise<ProfilEntity> {
const profileFound = await this.loadByLib(profilDto.libelle);
if (profileFound) {
throw new HttpException(`this branch '${profilDto.libelle}' already exist`, HttpStatus.NOT_FOUND);
}
const filieres = await Promise.all(
profilDto.filieres.map(nom => this.filiereService.findByNom(nom, true))
);
const professeurs = await Promise.all(
profilDto.professeurs.map(email => this.profService.getByEmail(email))
);
console.log(professeurs);
console.log(filieres);
if (!filieres.includes(undefined) && !professeurs.includes(undefined)) {
const profil = new ProfilEntity();
profil.libelle = profilDto.libelle;
profil.open = profilDto.open;
profil.filieres = filieres;
profil.professeurs = professeurs;
console.log(profil);
return await getRepository(ProfilEntity).save(profil);
} else {
throw new HttpException(`Element not found `, HttpStatus.NOT_FOUND);
}
}
async updateProfile(id: number, profilDto: ProfilDto): Promise<ProfilEntity> {
const profileFound = await this.findById(id);
if (!profileFound) {
throw new HttpException(`this branch '${profilDto.libelle}' not found`, HttpStatus.NOT_FOUND);
}
// find all fil by nom
// const filieres = await Promise.all(
// profilDto.filieres.map(nom => this.filiereService.findByNom(nom, true))
// );
// const professeurs = await Promise.all(
// profilDto.professeurs.map(email => this.profService.getByEmail(email))
// );
const {
found_filieres,
found_professeurs
} = await this.getFilAndProf(profilDto.filieres, profilDto.professeurs);
if (!found_filieres.includes(undefined) && !found_professeurs.includes(undefined)) {
profileFound.libelle = profilDto.libelle;
profileFound.open = profilDto.open;
profileFound.filieres = found_filieres;
profileFound.professeurs = found_professeurs;
console.log(profileFound);
return await getRepository(ProfilEntity).save(profileFound);
} else {
throw new HttpException(`Element not found `, HttpStatus.NOT_FOUND);
}
}
async findAll(): Promise<ProfilEntity[]> {
return await this.profilDao.find({
relations: ["filieres", "professeurs"]
});
}
async findById(id: number): Promise<ProfilEntity> {
return await this.profilDao.findOne(id, { relations: ["filieres", "professeurs"] });
}
async deleteById(id: number): Promise<Partial<ProfilEntity>> {
const profile = await this.profilDao.findOne(id, { relations: ["filieres", "professeurs"] });
if (!profile) {
throw new HttpException(`profile not found ${id}`, HttpStatus.NOT_FOUND);
}
await getConnection()
.createQueryBuilder()
.delete()
.from(ProfilEntity)
.where("id = :id", { id: id })
.execute();
return profile;
}
async loadByLib(libelle: string, throwException: boolean = false): Promise<ProfilEntity> {
const foundProfil = await getRepository(ProfilEntity).findOne({ libelle }, { relations: ["filieres", "professeurs"] });
if (throwException) {
if (!foundProfil) {
throw new HttpException(`this branch '${libelle}' not found`, HttpStatus.BAD_REQUEST);
} | async isUserInProfile(userId: number, profileId: number): Promise<boolean> {
const profile = await this.findById(profileId);
// console.log("Profs", profile.professeurs);
let found = false;
profile.professeurs.forEach(p => {
if (p.id == userId) {
found = true;
return;
}
});
if (found) return found;
profile.autres.forEach(a => {
if (a.id == userId) {
found = true;
return;
}
});
if (found) return found;
const etudiant = await this.etudiantService.getById(userId);
if (etudiant) {
return await this.isFiliereInProfile(etudiant.filiere, profileId);
}
return found;
}
async isProfesseurInProfile(prof: ProfesseurEntity, profileId: number): Promise<boolean> {
const profile = await this.findById(profileId);
// console.log("Profs", profile.professeurs);
let found = false;
profile.professeurs.forEach(p => {
if (p.id == prof.id) {
found = true;
return;
}
});
return found;
}
async isFiliereInProfile(filiere: FilierEntity, profileId: number): Promise<boolean> {
const profile = await this.findById(profileId);
// console.log("Profs", profile.professeurs);
let found = false;
profile.filieres.forEach(f => {
if (f.id == filiere.id) {
found = true;
return;
}
});
return found;
}
async addFiliereAndProfToProfile(id: number, addFilProflDto: AddFiliereProfesseurDto) {
const profileFound = await this.findById(id);
if (!profileFound) {
throw new HttpException(`this branch not found`, HttpStatus.NOT_FOUND);
}
const {
found_filieres,
found_professeurs
} = await this.getFilAndProf(addFilProflDto.filieres, addFilProflDto.professeurs);
console.log(found_filieres);
console.log(found_professeurs);
return null;
}
async getFilAndProf(filieres: string[], professeurs: string[]) {
const found_filieres = await Promise.all(
filieres.map(nom => this.filiereService.findByNom(nom, true))
);
//find all prof by email
const found_professeurs = await Promise.all(
professeurs.map(email => this.profService.getByEmail(email))
);
return { found_filieres, found_professeurs };
}
} | }
return foundProfil;
}
|
app.py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from ckuser import client,server
import os
def print_client_menu():
print("用户菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜单"+"-"*10)
print("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"用户登录系统"+"-"*10)
print("3"+"-"*10+"用户修改信息"+"-"*10)
print("4"+"-"*10+"用户注册信息"+"-"*10)
print("6"+"-"*10+"退出系统")
def print_server_menu():
print("服务菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜 | ("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"添加用户帐号"+"-"*10)
print("3"+"-"*10+"删除用户帐号"+"-"*10)
print("4"+"-"*10+"修改用户帐号"+"-"*10)
print("5"+"-"*10+"查找用户帐号"+"-"*10)
print("6"+"-"*10+"退出系统")
def server_oper():
print_server_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
break
elif i == 1:
os.system("clear")
print_server_menu()
elif i == 2:
server.user_add()
elif i == 3:
server.user_del()
elif i == 4:
server.user_update()
elif i == 5:
server.user_find()
elif i == 6:
os.system("clear")
os.system(exit())
except Exception as msg:
os.system("clear")
print_server_menu()
print("输入错误!")
client_oper()
def client_oper():
print_client_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
print_client_menu()
elif i == 1:
os.system("clear")
break
elif i == 2:
client.login()
elif i == 3:
client.update()
elif i == 4:
client.register()
elif i == 6:
os.system("clear")
os.system(exit())
else:
os.system("clear")
print_client_menu()
print("输入错误!")
except Exception:
os.system("clear")
print_client_menu()
print("输入错误!")
server_oper()
def main():
# server.user_update()
client_oper()
if __name__ == '__main__':
main()
| 单"+"-"*10)
print |
SHMExample.py | # -------------------------------------------------------------------- #
# This example was designed to show the project-level optimization
# option in GIAMS. This example was used in the original paper as well
# -------------------------------------------------------------------- #
import time
import ast
from Network import IndianaNetwork
from LifeCycleAnalyzer.Simulators import MainSimulator
from LifeCycleAnalyzer import LCA
from Optimizer import HillClimbing
from Optimizer import BruteForce
from Optimizer import GA
from Optimizer import IUC
from Optimizer import PSO
from utils.PredictiveModels.Linear import Linear
from utils.AwesomeTimeIt import timeit
from utils.GeneralSettings import *
class GeneralSettings:
n_elements = 1
n_states = 8
dt = 2
horizon = 20
discount_rate = 0.03
init_year = 0
n_steps = int(horizon/dt)
def lca_instance():
# Creating the settings instance
settings = GeneralSettings()
# Creating the network
session_name = 'IndianaSHM'
mynetwork = DummySHMNetwork(file_name = "INDIANA2019",
settings = settings,
n_assets = 1,
is_deck = False,
is_superstructure = True,
is_substructure = False)
mynetwork.load_network()
mynetwork.set_current_budget_limit(100000)
mynetwork.set_budget_limit_model(Linear(X0 = 100000, drift = 0, settings = settings))
mynetwork.set_npv_budget_limit(10000)
# Creating the simulator
simulator = MainSimulator(settings = settings)
# shaping the main LCA
lca = LCA(lca_name = session_name,
settings = settings,
network = mynetwork,
simulator = simulator,
random = True,
is_hazard = True,
n_simulations = 10,
should_report = True)
return lca
def obj_func(**kwargs):
return kwargs['Utility'] / kwargs['UserCost'] ** 0.2
def GA_test():
optimizer = GA(lca_instance)
optimizer.set_hyperparameters(crossver_prob = 0.75, | population_size = 200,
n_generations = 200,
n_elites = 5,
optimzition_type = 'max',
n_jobs = 1)
# optimizer.optimize(rounds = 3)
optimizer.validate()
if __name__ == "__main__":
example1()
GA_test(lca_instance) | mutation_prob = 0.03, |
extension_feed_item_service.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v4.resources.types import extension_feed_item
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.rpc import status_pb2 as status # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.services",
marshal="google.ads.googleads.v4",
manifest={
"GetExtensionFeedItemRequest",
"MutateExtensionFeedItemsRequest",
"ExtensionFeedItemOperation",
"MutateExtensionFeedItemsResponse",
"MutateExtensionFeedItemResult",
},
)
class GetExtensionFeedItemRequest(proto.Message):
r"""Request message for
[ExtensionFeedItemService.GetExtensionFeedItem][google.ads.googleads.v4.services.ExtensionFeedItemService.GetExtensionFeedItem].
Attributes:
resource_name (str):
Required. The resource name of the extension
feed item to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1)
class MutateExtensionFeedItemsRequest(proto.Message):
r"""Request message for
[ExtensionFeedItemService.MutateExtensionFeedItems][google.ads.googleads.v4.services.ExtensionFeedItemService.MutateExtensionFeedItems].
Attributes:
customer_id (str):
Required. The ID of the customer whose
extension feed items are being modified.
operations (Sequence[google.ads.googleads.v4.services.types.ExtensionFeedItemOperation]):
Required. The list of operations to perform
on individual extension feed items.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(proto.STRING, number=1)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="ExtensionFeedItemOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3)
validate_only = proto.Field(proto.BOOL, number=4)
class | (proto.Message):
r"""A single operation (create, update, remove) on an extension
feed item.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v4.resources.types.ExtensionFeedItem):
Create operation: No resource name is
expected for the new extension feed item.
update (google.ads.googleads.v4.resources.types.ExtensionFeedItem):
Update operation: The extension feed item is
expected to have a valid resource name.
remove (str):
Remove operation: A resource name for the removed extension
feed item is expected, in this format:
``customers/{customer_id}/extensionFeedItems/{feed_item_id}``
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=extension_feed_item.ExtensionFeedItem,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=extension_feed_item.ExtensionFeedItem,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation")
class MutateExtensionFeedItemsResponse(proto.Message):
r"""Response message for an extension feed item mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v4.services.types.MutateExtensionFeedItemResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateExtensionFeedItemResult",
)
class MutateExtensionFeedItemResult(proto.Message):
r"""The result for the extension feed item mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| ExtensionFeedItemOperation |
mod.rs | mod encoding;
pub use self::encoding::Encode;
mod features;
pub use self::features::{
Ext,
FeatureSet | }; | |
config.py | from typing import List, Tuple, Optional
from pydantic import Field, HttpUrl, BaseModel
class Intents(BaseModel):
guilds: bool = True
guild_members: bool = True
guild_messages: bool = False
guild_message_reactions: bool = True
direct_message: bool = False
message_audit: bool = False
forum_event: bool = False
audio_action: bool = False
at_messages: bool = True
def to_int(self):
return (
self.guilds << 0
| self.guild_members << 1
| self.guild_messages << 9
| self.guild_message_reactions << 10
| self.direct_message << 12
| self.message_audit << 27
| self.forum_event << 28
| self.audio_action << 29
| self.at_messages << 30
)
class BotInfo(BaseModel):
id: str = Field(alias="id") | shard: Optional[Tuple[int, int]] = None
intent: Intents = Field(default_factory=Intents)
class Config(BaseModel):
qqguild_is_sandbox: bool = False
qqguild_api_base: HttpUrl = Field("https://api.sgroup.qq.com/")
qqguild_sandbox_api_base: HttpUrl = Field("https://sandbox.api.sgroup.qq.com")
qqguild_bots: List[BotInfo] = Field(default_factory=list)
class Config:
extra = "ignore" | token: str = Field(alias="token")
secret: str = Field(alias="secret") |
server.rs | use std::fmt;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::Future;
use http;
use hyper;
use indexmap::IndexSet;
use tokio_core::reactor::Handle;
use tower::NewService;
use tower_h2;
use conduit_proxy_controller_grpc::common;
use connection::Connection;
use ctx::Proxy as ProxyCtx;
use ctx::transport::{Server as ServerCtx};
use drain;
use telemetry::Sensors;
use transport::GetOriginalDst;
use super::glue::{HttpBody, HttpBodyNewSvc, HyperServerSvc};
use super::protocol::Protocol;
use super::tcp;
/// A protocol-transparent Server!
///
/// This type can `serve` new connections, determine what protocol
/// the connection is speaking, and route it to the corresponding
/// service.
pub struct Server<S, B, G>
where
S: NewService<Request=http::Request<HttpBody>>,
S::Future: 'static,
B: tower_h2::Body,
{
disable_protocol_detection_ports: IndexSet<u16>,
drain_signal: drain::Watch,
executor: Handle,
get_orig_dst: G,
h1: hyper::server::Http,
h2: tower_h2::Server<HttpBodyNewSvc<S>, Handle, B>,
listen_addr: SocketAddr,
new_service: S,
proxy_ctx: Arc<ProxyCtx>,
sensors: Sensors,
tcp: tcp::Proxy,
}
impl<S, B, G> Server<S, B, G>
where
S: NewService<
Request = http::Request<HttpBody>,
Response = http::Response<B>
> + Clone + 'static,
S::Future: 'static,
S::Error: fmt::Debug,
S::InitError: fmt::Debug,
B: tower_h2::Body + 'static,
G: GetOriginalDst,
{
/// Creates a new `Server`.
pub fn new(
listen_addr: SocketAddr,
proxy_ctx: Arc<ProxyCtx>,
sensors: Sensors,
get_orig_dst: G,
stack: S,
tcp_connect_timeout: Duration,
disable_protocol_detection_ports: IndexSet<u16>,
drain_signal: drain::Watch,
executor: Handle,
) -> Self {
let recv_body_svc = HttpBodyNewSvc::new(stack.clone());
let tcp = tcp::Proxy::new(tcp_connect_timeout, sensors.clone(), &executor);
Server {
disable_protocol_detection_ports,
drain_signal,
executor: executor.clone(),
get_orig_dst,
h1: hyper::server::Http::new(),
h2: tower_h2::Server::new(recv_body_svc, Default::default(), executor),
listen_addr,
new_service: stack,
proxy_ctx,
sensors,
tcp,
}
}
/// Handle a new connection.
///
/// This will peek on the connection for the first bytes to determine
/// what protocol the connection is speaking. From there, the connection
/// will be mapped into respective services, and spawned into an
/// executor.
pub fn serve(&self, connection: Connection, remote_addr: SocketAddr) |
}
// These newtypes act as a form of keyword arguments.
//
// It should be easier to notice when wrapping `LocalAddr(remote_addr)` at
// the call site, then simply passing multiple socket addr arguments.
struct LocalAddr<'a>(&'a SocketAddr);
struct RemoteAddr<'a>(&'a SocketAddr);
struct OrigDst<'a>(&'a Option<SocketAddr>);
fn tcp_serve(
tcp: &tcp::Proxy,
connection: Connection,
drain_signal: drain::Watch,
sensors: &Sensors,
opened_at: Instant,
proxy_ctx: &Arc<ProxyCtx>,
local_addr: LocalAddr,
remote_addr: RemoteAddr,
orig_dst: OrigDst,
) -> Box<Future<Item=(), Error=()>> {
let srv_ctx = ServerCtx::new(
proxy_ctx,
local_addr.0,
remote_addr.0,
orig_dst.0,
common::Protocol::Tcp,
);
// record telemetry
let tcp_in = sensors.accept(connection, opened_at, &srv_ctx);
let fut = tcp.serve(tcp_in, srv_ctx);
// There's nothing to do when drain is signaled, we just have to hope
// the sockets finish soon. However, the drain signal still needs to
// 'watch' the TCP future so that the process doesn't close early.
Box::new(drain_signal.watch(fut, |_| ()))
}
| {
let opened_at = Instant::now();
// create Server context
let orig_dst = connection.original_dst_addr(&self.get_orig_dst);
let local_addr = connection.local_addr().unwrap_or(self.listen_addr);
// We are using the port from the connection's SO_ORIGINAL_DST to
// determine whether to skip protocol detection, not any port that
// would be found after doing discovery.
let disable_protocol_detection = orig_dst
.map(|addr| {
self.disable_protocol_detection_ports.contains(&addr.port())
})
.unwrap_or(false);
if disable_protocol_detection {
trace!("protocol detection disabled for {:?}", orig_dst);
let fut = tcp_serve(
&self.tcp,
connection,
self.drain_signal.clone(),
&self.sensors,
opened_at,
&self.proxy_ctx,
LocalAddr(&local_addr),
RemoteAddr(&remote_addr),
OrigDst(&orig_dst),
);
self.executor.spawn(fut);
return;
}
// try to sniff protocol
let proxy_ctx = self.proxy_ctx.clone();
let sniff = [0u8; 32];
let sensors = self.sensors.clone();
let h1 = self.h1.clone();
let h2 = self.h2.clone();
let tcp = self.tcp.clone();
let new_service = self.new_service.clone();
let drain_signal = self.drain_signal.clone();
let fut = connection
.peek_future(sniff)
.map_err(|_| ())
.and_then(move |(connection, sniff, n)| -> Box<Future<Item=(), Error=()>> {
if let Some(proto) = Protocol::detect(&sniff[..n]) {
let srv_ctx = ServerCtx::new(
&proxy_ctx,
&local_addr,
&remote_addr,
&orig_dst,
common::Protocol::Http,
);
// record telemetry
let io = sensors.accept(connection, opened_at, &srv_ctx);
match proto {
Protocol::Http1 => {
trace!("transparency detected HTTP/1");
Box::new(new_service.new_service()
.map_err(|_| ())
.and_then(move |s| {
let svc = HyperServerSvc::new(s, srv_ctx);
drain_signal
.watch(h1.serve_connection(io, svc), |conn| {
conn.disable_keep_alive();
})
.map(|_| ())
.map_err(|e| trace!("http1 server error: {:?}", e))
}))
},
Protocol::Http2 => {
trace!("transparency detected HTTP/2");
let set_ctx = move |request: &mut http::Request<()>| {
request.extensions_mut().insert(srv_ctx.clone());
};
let fut = drain_signal
.watch(h2.serve_modified(io, set_ctx), |conn| {
conn.graceful_shutdown();
})
.map_err(|e| trace!("h2 server error: {:?}", e));
Box::new(fut)
}
}
} else {
trace!("transparency did not detect protocol, treating as TCP");
tcp_serve(
&tcp,
connection,
drain_signal,
&sensors,
opened_at,
&proxy_ctx,
LocalAddr(&local_addr),
RemoteAddr(&remote_addr),
OrigDst(&orig_dst),
)
}
});
self.executor.spawn(fut);
} |
mock_iam.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/keikoproj/iam-manager/pkg/awsapi (interfaces: IAMIface)
// Package mock_awsapi is a generated GoMock package.
package mock_awsapi
import (
context "context" |
// MockIAMIface is a mock of IAMIface interface
type MockIAMIface struct {
ctrl *gomock.Controller
recorder *MockIAMIfaceMockRecorder
}
// MockIAMIfaceMockRecorder is the mock recorder for MockIAMIface
type MockIAMIfaceMockRecorder struct {
mock *MockIAMIface
}
// NewMockIAMIface creates a new mock instance
func NewMockIAMIface(ctrl *gomock.Controller) *MockIAMIface {
mock := &MockIAMIface{ctrl: ctrl}
mock.recorder = &MockIAMIfaceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockIAMIface) EXPECT() *MockIAMIfaceMockRecorder {
return m.recorder
}
// AddPermissionBoundary mocks base method
func (m *MockIAMIface) AddPermissionBoundary(arg0 context.Context, arg1 awsapi.IAMRoleRequest) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "AddPermissionBoundary", arg0, arg1)
}
// AddPermissionBoundary indicates an expected call of AddPermissionBoundary
func (mr *MockIAMIfaceMockRecorder) AddPermissionBoundary(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPermissionBoundary", reflect.TypeOf((*MockIAMIface)(nil).AddPermissionBoundary), arg0, arg1)
}
// AttachInlineRolePolicy mocks base method
func (m *MockIAMIface) AttachInlineRolePolicy(arg0 context.Context, arg1 awsapi.IAMRoleRequest) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "AttachInlineRolePolicy", arg0, arg1)
}
// AttachInlineRolePolicy indicates an expected call of AttachInlineRolePolicy
func (mr *MockIAMIfaceMockRecorder) AttachInlineRolePolicy(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachInlineRolePolicy", reflect.TypeOf((*MockIAMIface)(nil).AttachInlineRolePolicy), arg0, arg1)
}
// CreateRole mocks base method
func (m *MockIAMIface) CreateRole(arg0 context.Context, arg1 awsapi.IAMRoleRequest) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "CreateRole", arg0, arg1)
}
// CreateRole indicates an expected call of CreateRole
func (mr *MockIAMIfaceMockRecorder) CreateRole(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRole", reflect.TypeOf((*MockIAMIface)(nil).CreateRole), arg0, arg1)
}
// DeleteRole mocks base method
func (m *MockIAMIface) DeleteRole(arg0 context.Context, arg1 string) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "DeleteRole", arg0, arg1)
}
// DeleteRole indicates an expected call of DeleteRole
func (mr *MockIAMIfaceMockRecorder) DeleteRole(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRole", reflect.TypeOf((*MockIAMIface)(nil).DeleteRole), arg0, arg1)
}
// GetRolePolicy mocks base method
func (m *MockIAMIface) GetRolePolicy(arg0 context.Context, arg1 awsapi.IAMRoleRequest) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRolePolicy", arg0, arg1)
ret0, _ := ret[0].(bool)
return ret0
}
// GetRolePolicy indicates an expected call of GetRolePolicy
func (mr *MockIAMIfaceMockRecorder) GetRolePolicy(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRolePolicy", reflect.TypeOf((*MockIAMIface)(nil).GetRolePolicy), arg0, arg1)
}
// UpdateRole mocks base method
func (m *MockIAMIface) UpdateRole(arg0 context.Context, arg1 awsapi.IAMRoleRequest) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "UpdateRole", arg0, arg1)
}
// UpdateRole indicates an expected call of UpdateRole
func (mr *MockIAMIfaceMockRecorder) UpdateRole(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRole", reflect.TypeOf((*MockIAMIface)(nil).UpdateRole), arg0, arg1)
} | gomock "github.com/golang/mock/gomock"
awsapi "github.com/keikoproj/iam-manager/pkg/awsapi"
reflect "reflect"
) |
fuse_grouped_conv.py | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from mo.graph.graph import Node, Graph
from mo.middle.passes.fusing.helpers import get_next_operation
# TODO: unit tests
def concat_convolutions(graph: Graph, start_node: Node, last_node: Node):
"""
This function converts group of convolutions into one
"""
# Check that concatenation makes in the same order
conv_nodes = get_next_operation(start_node)
assert len(conv_nodes) == len(last_node.in_nodes())
gconv = conv_nodes[0]
for id in range(len(conv_nodes)):
conv = conv_nodes[id]
if conv.out_node().id != last_node.in_node(id).id:
return False
# Check that all convolutions have same weights shapes
if not np.array_equal(conv.in_node(1).shape, gconv.in_node(1).shape):
log.debug('Grouped convolutions fusion : convolutions have different weights shape')
return False
# Check that split and concat dims are valid
channel_dim = gconv.channel_dims[0]
split_axis = start_node.in_port(1).data.get_value()
if channel_dim != split_axis or channel_dim != last_node.axis:
log.debug('Grouped convolutions fusion : split or concat has weird axis!')
return False
# Check that all convolutions has the same parameters
conv_attrs = ['pad', 'stride']
for attr in conv_attrs:
for id in range(len(conv_nodes)):
conv = conv_nodes[id]
if not np.array_equal(gconv[attr], conv[attr]):
log.debug('Grouped convolutions fusion : attrs {} doesn\'t match'.format(attr))
return False
# Check that all Convolutions has biases (if exists)
has_biases = False
for id in range(len(conv_nodes)):
conv = conv_nodes[id]
if len(conv.in_nodes()) == 3:
if not has_biases:
has_biases = True
elif has_biases:
return False # All convolution mast have biases
# Check that all biases have same shape
if has_biases:
for id in range(len(conv_nodes)):
conv = conv_nodes[id]
if conv.in_node(2).shape != gconv.in_node(2).shape:
log.debug('Group convolutions fusion : convolutions have different biases shape {} and {}'.format(
conv.in_node(2).shape, gconv.in_node(2).shape))
return False
graph.remove_edge(gconv.in_node(0).id, gconv.id)
graph.remove_edge(gconv.id, gconv.out_node().id)
input = start_node.in_node(0)
output = last_node.out_node()
# Removing edges from data nodes to Split and Concat
graph.remove_edge(input.id, start_node.id)
graph.remove_edge(last_node.id, output.id)
# Add edges to grouped convolution
graph.add_edges_from([
(input.id, gconv.id, {'in': 0}),
(gconv.id, output.id, {'out': 0})
])
# Concatenation of convolutions
weights_node = gconv.in_node(1)
bias_node = gconv.in_node(2) if has_biases else None
weights_value = np.array(weights_node.value)
bias_value = np.array(bias_node.value) if has_biases else None
feature_dim = 3 if graph.graph['layout'] == 'NHWC' else 0
for conv in conv_nodes[1:]:
weights_value = np.concatenate((weights_value, conv.in_node(1).value), axis=feature_dim)
if has_biases:
bias_value = np.concatenate((bias_value, conv.in_node(2).value), axis=-1) # Not validated
weights_node.value = np.array(weights_value)
weights_node.shape = np.array(weights_value.shape)
if has_biases:
bias_node.value = np.array(bias_value)
bias_node.shape = np.array(bias_value.shape)
log.debug('Start node : {} Last node : {} Nodes inside : {}'.format(start_node.id, last_node.id,
len(start_node.out_nodes())))
log.debug('Output shape : {}'.format(weights_value.shape))
gconv.group = len(conv_nodes)
gconv.output = weights_node.shape[feature_dim]
gconv.output_shape[feature_dim] = weights_node.shape[feature_dim]
return True
# TODO: unit tests
def | (graph: Graph):
while True:
is_fused = False
graph.clean_up()
for node in graph.pseudo_topological_sort():
if node.kind == 'op' and len(node.out_nodes()) > 1:
if node.soft_get('can_be_fused') == False:
continue
is_valid_convolutions = True
last_layer = None
next_nodes = get_next_operation(node)
# Check that all operation after this one are Convolutions
# and all convolutions has same output
if len(next_nodes) > 1 and all(_node.soft_get('type') in ['Convolution', 'Deconvolution'] for _node in next_nodes):
for conv in next_nodes:
conv_outputs = get_next_operation(conv)
if conv.soft_get('can_be_fused') == False:
is_valid_convolutions = False
if len(conv_outputs) != 1:
is_valid_convolutions = False
if last_layer is None:
last_layer = conv_outputs[0].id
# TODO: this check is not working for V10 where Biases appears as separate operations
elif conv_outputs[0].id != last_layer:
is_valid_convolutions = False
if is_valid_convolutions:
is_fused = concat_convolutions(graph, node, Node(graph, last_layer))
if is_fused:
break
if not is_fused:
break
| grouped_convolutions_fusing |
irfft.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorFFTMixin, validate_fft, TensorRealFFT
class TensorIRFFT(TensorRealFFT, TensorFFTMixin):
_op_type_ = OperandDef.IRFFT
def __init__(self, n=None, axis=-1, norm=None, **kw):
super().__init__(_n=n, _axis=axis, _norm=norm, **kw)
@classmethod
def _get_shape(cls, op, shape):
new_shape = list(shape)
if op.n is not None:
|
else:
new_shape[op.axis] = 2 * (new_shape[op.axis] - 1)
return tuple(new_shape)
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input tensor.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : Tensor
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
mt.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> import mars.tenosr as mt
>>> mt.fft.ifft([1, -1j, -1, 1j]).execute()
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> mt.fft.irfft([1, -1j, -1]).execute()
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = astensor(a)
validate_fft(a, axis=axis, norm=norm)
op = TensorIRFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.float_))
return op(a)
| new_shape[op.axis] = op.n |
helpers.py | import numpy as np
def compute_anomaly_corrs(out_true, out_pred):
|
def split_train_data(train_months, test_months, train_years, test_years):
def make_idx(months, years): # based on simple broadcasting
return np.asarray(months).reshape(-1,1)+(12*np.asarray(years).flatten())
idx_source_train = make_idx(train_months, train_years)
idx_target_train = make_idx(test_months, train_years)
idx_source_test = make_idx(train_months, test_years)
idx_target_test = make_idx(test_months, test_years)
return idx_source_train, idx_target_train, idx_source_test, idx_target_test | anomaly_corrs = np.zeros(out_pred.shape[1])
for i in range(anomaly_corrs.size):
anomaly_corrs[i] = np.corrcoef(out_pred[:,i], out_true[:,i])[0,1]
return anomaly_corrs |
main.go | package main
import (
"context"
"io/ioutil"
"log"
"net"
"net/http"
"time"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/s7techlab/cckit/examples/cpaper_asservice"
cpaperservice "github.com/s7techlab/cckit/examples/cpaper_asservice/service"
"github.com/s7techlab/cckit/gateway"
gwmock "github.com/s7techlab/cckit/gateway/mock"
"github.com/s7techlab/cckit/testing"
"google.golang.org/grpc"
)
const (
chaincodeName = `cpaper`
channelName = `cpaper`
grpcAddress = `:8080`
restAddress = `:8081`
)
func | () {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Create mock for commercial paper chaincode invocation
// Commercial paper chaincode instance
cc, err := cpaper_asservice.NewCC()
if err != nil {
log.Fatalln(err)
}
// Mockstub for commercial paper
cpaperMock := testing.NewMockStub(chaincodeName, cc)
// Chaincode invocation service mock. For real network you can use example with hlf-sdk-go
cpaperMockService := gwmock.New(testing.NewPeer().WithChannel(channelName, cpaperMock))
// default identity for signing requests to peeer (mocked)
apiIdentity, err := testing.IdentityFromFile(`MSP`, `../../../testdata/admin.pem`, ioutil.ReadFile)
if err != nil {
log.Fatalln(err)
}
// Generated gateway for access to chaincode from external application
cpaperGateway := cpaperservice.NewCPaperGateway(
cpaperMockService, // gateway use mocked chaincode access service
channelName,
chaincodeName,
gateway.WithDefaultSigner(apiIdentity))
grpcListener, err := net.Listen("tcp", grpcAddress)
if err != nil {
log.Fatalf("failed to listen grpc: %v", err)
}
// Create gRPC server
s := grpc.NewServer()
cpaperservice.RegisterCPaperServer(s, cpaperGateway)
// Runs gRPC server in goroutine
go func() {
log.Printf(`listen gRPC at %s`, grpcAddress)
if err := s.Serve(grpcListener); err != nil {
log.Fatalf("failed to serve gRPC: %v", err)
}
}()
// wait for gRPC service stared
time.Sleep(3 * time.Second)
// Register gRPC server endpoint
mux := runtime.NewServeMux()
opts := []grpc.DialOption{grpc.WithInsecure()}
err = cpaperservice.RegisterCPaperHandlerFromEndpoint(ctx, mux, grpcAddress, opts)
if err != nil {
log.Fatalf("failed to register handler from endpoint %v", err)
}
log.Printf(`listen REST at %s`, restAddress)
// Start HTTP server (and proxy calls to gRPC server endpoint)
if err = http.ListenAndServe(restAddress, mux); err != nil {
log.Fatalf("failed to serve REST: %v", err)
}
}
| main |
eventHubSender.ts | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
import * as debugModule from "debug";
import * as uuid from "uuid/v4";
import {
messageProperties, Sender, EventContext, OnAmqpEvent, SenderOptions, Delivery, SenderEvents,
message, AmqpError, SessionEvents
} from "./rhea-promise";
import {
defaultLock, Func, retry, translate, AmqpMessage, ErrorNameConditionMapper,
randomNumberFromInterval, RetryConfig, RetryOperationType, Constants
} from "./amqp-common";
import { EventData } from "./eventData";
import { ConnectionContext } from "./connectionContext";
import { LinkEntity } from "./linkEntity";
const debug = debugModule("azure:event-hubs:sender");
interface CreateSenderOptions {
newName?: boolean;
}
/**
* Describes the EventHubSender that will send event data to EventHub.
* @class EventHubSender
*/
export class | extends LinkEntity {
/**
* @property {string} senderLock The unqiue lock name per connection that is used to acquire the
* lock for establishing a sender link by an entity on that connection.
* @readonly
*/
readonly senderLock: string = `sender-${uuid()}`;
/**
* @property {OnAmqpEvent} _onAmqpError The handler function to handle errors that happen on the
* underlying sender.
* @readonly
*/
private readonly _onAmqpError: OnAmqpEvent;
/**
* @property {OnAmqpEvent} _onAmqpError The handler function to handle close events that happen
* on the underlying sender.
* @readonly
*/
private readonly _onAmqpClose: OnAmqpEvent;
/**
* @property {Sender} [_sender] The AMQP sender link.
* @private
*/
private _sender?: Sender;
/**
* Creates a new EventHubSender instance.
* @constructor
* @param {ConnectionContext} context The connection context.
* @param {string|number} [partitionId] The EventHub partition id to which the sender
* wants to send the event data.
*/
constructor(context: ConnectionContext, partitionId?: string | number, name?: string) {
super(context, { name: name, partitionId: partitionId });
this.address = this._context.config.entityPath as string;
if (this.partitionId != undefined) {
this.address += `/Partitions/${this.partitionId}`;
}
this.audience = `${this._context.config.endpoint}${this.address}`;
this._onAmqpError = (context: EventContext) => {
const senderError = context.sender && context.sender.error;
const sessionError = context.session && context.session.error;
if (senderError) {
const err = translate(senderError);
debug("[%s] An error occurred for sender '%s': %O.",
this._context.connectionId, this.name, err);
} else if (sessionError) {
const err = translate(sessionError);
debug("[%s] An error occurred on the session of sender '%s': %O.",
this._context.connectionId, this.name, err);
}
};
this._onAmqpClose = async (context: EventContext) => {
const senderError = context.sender && context.sender.error;
const sessionError = context.session && context.session.error;
if (senderError) {
debug("[%s] 'sender_close' event occurred for sender '%s'. The associated error is: %O",
this._context.connectionId, this.address, senderError);
} else if (sessionError) {
debug("[%s] 'session_close' event occurred for sender '%s'. The associated error is: %O",
this._context.connectionId, this.address, sessionError);
}
await this.detached(senderError || sessionError);
};
}
/**
* Sends the given message, with the given options on this link
*
* @param {any} data Message to send. Will be sent as UTF8-encoded JSON string.
* @returns {Promise<Delivery>} Promise<Delivery>
*/
async send(data: EventData): Promise<Delivery> {
try {
if (!data || (data && typeof data !== "object")) {
throw new Error("data is required and it must be of type object.");
}
if (!this.isOpen()) {
debug("Acquiring lock %s for initializing the session, sender and " +
"possibly the connection.", this.senderLock);
await defaultLock.acquire(this.senderLock, () => { return this._init(); });
}
const message = EventData.toAmqpMessage(data);
message.body = this._context.dataTransformer.encode(data.body);
return await this._trySend(message, message.message_id);
} catch (err) {
debug("An error occurred while sending the message %O", err);
throw err;
}
}
/**
* Send a batch of EventData to the EventHub. The "message_annotations",
* "application_properties" and "properties" of the first message will be set as that
* of the envelope (batch message).
* @param {Array<EventData>} datas An array of EventData objects to be sent in a Batch message.
* @return {Promise<Delivery>} Promise<Delivery>
*/
async sendBatch(datas: EventData[]): Promise<Delivery> {
try {
if (!datas || (datas && !Array.isArray(datas))) {
throw new Error("data is required and it must be an Array.");
}
if (!this.isOpen()) {
debug("Acquiring lock %s for initializing the session, sender and " +
"possibly the connection.", this.senderLock);
await defaultLock.acquire(this.senderLock, () => { return this._init(); });
}
debug("[%s] Sender '%s', trying to send EventData[].",
this._context.connectionId, this.name);
const messages: AmqpMessage[] = [];
// Convert EventData to AmqpMessage.
for (let i = 0; i < datas.length; i++) {
const message = EventData.toAmqpMessage(datas[i]);
message.body = this._context.dataTransformer.encode(datas[i].body);
messages[i] = message;
}
// Encode every amqp message and then convert every encoded message to amqp data section
const batchMessage: AmqpMessage = {
body: message.data_sections(messages.map(message.encode))
};
// Set message_annotations, application_properties and properties of the first message as
// that of the envelope (batch message).
if (messages[0].message_annotations) {
batchMessage.message_annotations = messages[0].message_annotations;
}
if (messages[0].application_properties) {
batchMessage.application_properties = messages[0].application_properties;
}
for (const prop of messageProperties) {
if ((messages[0] as any)[prop]) {
(batchMessage as any)[prop] = (messages[0] as any)[prop];
}
}
if (!batchMessage.message_id) {
batchMessage.message_id = uuid();
}
// Finally encode the envelope (batch message).
const encodedBatchMessage = message.encode(batchMessage);
debug("[%s] Sender '%s', sending encoded batch message.",
this._context.connectionId, this.name, encodedBatchMessage);
return await this._trySend(encodedBatchMessage, batchMessage.message_id, 0x80013700);
} catch (err) {
debug("An error occurred while sending the batch message %O", err);
throw err;
}
}
/**
* Will reconnect the sender link if necessary.
* @param {AmqpError | Error} [senderError] The sender error if any.
* @returns {Promise<void>} Promise<void>.
*/
async detached(senderError?: AmqpError | Error): Promise<void> {
await this._closeLink(this._sender); // clear the token renewal timer.
let shouldReopen = false;
if (senderError && this._context.senders[this.address]) {
const translatedError = translate(senderError);
if (translatedError.retryable) {
shouldReopen = true;
}
} else if (this._context.senders[this.address]) {
shouldReopen = true;
debug("[%s] close() method of Sender '%s' with address '%s' was not called. There " +
"was no accompanying error as well. This is a candidate for re-establishing the sender link.",
this._context.connectionId, this.name, this.address);
}
if (shouldReopen) {
await defaultLock.acquire(this.senderLock, () => {
const options: SenderOptions = this._createSenderOptions({
newName: true
});
// shall retry 3 times at an interval of 15 seconds and bail out.
const config: RetryConfig<void> = {
operation: () => this._init(options),
connectionId: this._context.connectionId,
operationType: RetryOperationType.senderLink
};
return retry<void>(config);
});
}
}
/**
* Deletes the sender fromt the context. Clears the token renewal timer. Closes the sender link.
* @return {Promise<void>} Promise<void>
*/
async close(): Promise<void> {
if (this._sender) {
const senderLink = this._sender;
this._deleteFromCache();
await this._closeLink(senderLink);
}
}
/**
* Determines whether the AMQP sender link is open. If open then returns true else returns false.
* @return {boolean} boolean
*/
isOpen(): boolean {
const result: boolean = this._sender! && this._sender!.isOpen();
debug("[%s] Sender '%s' with address '%s' is open? -> %s", this._context.connectionId,
this.name, this.address, result);
return result;
}
private _deleteFromCache(): void {
this._sender = undefined;
delete this._context.senders[this.address];
debug("[%s] Deleted the sender '%s' with address '%s' from the client cache.",
this._context.connectionId, this.name, this.address);
}
private _createSenderOptions(options: CreateSenderOptions): SenderOptions {
if (options.newName) this.name = `${uuid()}`;
const srOptions: SenderOptions = {
name: this.name,
target: {
address: this.address
},
onError: this._onAmqpError,
onClose: this._onAmqpClose
};
debug("Creating sender with options: %O", srOptions);
return srOptions;
}
/**
* Tries to send the message to EventHub if there is enough credit to send them
* and the circular buffer has available space to settle the message after sending them.
*
* We have implemented a synchronous send over here in the sense that we shall be waiting
* for the message to be accepted or rejected and accordingly resolve or reject the promise.
*
* @param message The message to be sent to EventHub.
* @return {Promise<Delivery>} Promise<Delivery>
*/
private _trySend(message: AmqpMessage, tag?: any, format?: number): Promise<Delivery> {
const sendEventPromise = () => new Promise<Delivery>((resolve, reject) => {
debug("[%s] Sender '%s', credit: %d available: %d", this._context.connectionId, this.name,
this._sender!.credit, this._sender!.session.outgoing.available());
if (this._sender!.sendable()) {
debug("[%s] Sender '%s', sending message with id '%s'.", this._context.connectionId,
this.name, message.message_id);
let onRejected: Func<EventContext, void>;
let onReleased: Func<EventContext, void>;
let onModified: Func<EventContext, void>;
let onAccepted: Func<EventContext, void>;
const removeListeners = (): void => {
this._sender!.removeHandler(SenderEvents.rejected, onRejected);
this._sender!.removeHandler(SenderEvents.accepted, onAccepted);
this._sender!.removeHandler(SenderEvents.released, onReleased);
this._sender!.removeHandler(SenderEvents.modified, onModified);
};
onAccepted = (context: EventContext) => {
// Since we will be adding listener for accepted and rejected event every time
// we send a message, we need to remove listener for both the events.
// This will ensure duplicate listeners are not added for the same event.
removeListeners();
debug("[%s] Sender '%s', got event accepted.", this._context.connectionId, this.name);
resolve(context.delivery);
};
onRejected = (context: EventContext) => {
removeListeners();
debug("[%s] Sender '%s', got event rejected.", this._context.connectionId, this.name);
reject(translate(context!.delivery!.remote_state!.error));
};
onReleased = (context: EventContext) => {
removeListeners();
debug("[%s] Sender '%s', got event released.", this._context.connectionId, this.name);
let err: Error;
if (context!.delivery!.remote_state!.error) {
err = translate(context!.delivery!.remote_state!.error);
} else {
err = new Error(`[${this._context.connectionId}] Sender '${this.name}', ` +
`received a release disposition.Hence we are rejecting the promise.`);
}
reject(err);
};
onModified = (context: EventContext) => {
removeListeners();
debug("[%s] Sender '%s', got event modified.", this._context.connectionId, this.name);
let err: Error;
if (context!.delivery!.remote_state!.error) {
err = translate(context!.delivery!.remote_state!.error);
} else {
err = new Error(`[${this._context.connectionId}] Sender "${this.name}", ` +
`received a modified disposition.Hence we are rejecting the promise.`);
}
reject(err);
};
this._sender!.registerHandler(SenderEvents.accepted, onAccepted);
this._sender!.registerHandler(SenderEvents.rejected, onRejected);
this._sender!.registerHandler(SenderEvents.modified, onModified);
this._sender!.registerHandler(SenderEvents.released, onReleased);
const delivery = this._sender!.send(message, tag, format);
debug("[%s] Sender '%s', sent message with delivery id: %d and tag: %s",
this._context.connectionId, this.name, delivery.id, delivery.tag.toString());
} else {
// let us retry to send the message after some time.
const msg = `[${this._context.connectionId}] Sender "${this.name}", ` +
`cannot send the message right now. Please try later.`;
debug(msg);
const amqpError: AmqpError = {
condition: ErrorNameConditionMapper.SenderBusyError,
description: msg
};
reject(translate(amqpError));
}
});
const jitterInSeconds = randomNumberFromInterval(1, 4);
const config: RetryConfig<Delivery> = {
operation: sendEventPromise,
connectionId: this._context.connectionId,
operationType: RetryOperationType.sendMessage,
times: Constants.defaultRetryAttempts,
delayInSeconds: Constants.defaultDelayBetweenRetriesInSeconds + jitterInSeconds
};
return retry<Delivery>(config);
}
/**
* Initializes the sender session on the connection.
* @returns {Promise<void>}
*/
private async _init(options?: SenderOptions): Promise<void> {
try {
if (!this.isOpen()) {
await this._negotiateClaim();
debug("[%s] Trying to create sender '%s'...", this._context.connectionId, this.name);
if (!options) {
options = this._createSenderOptions({});
}
this._sender = await this._context.connection.createSender(options);
this._sender.setMaxListeners(1000);
this._sender.registerSessionHandler(SessionEvents.sessionError, this._onAmqpError);
this._sender.registerSessionHandler(SessionEvents.sessionClose, this._onAmqpClose);
debug("[%s] Promise to create the sender resolved. Created sender with name: %s",
this._context.connectionId, this.name);
debug("[%s] Sender '%s' created with sender options: %O",
this._context.connectionId, this.name, options);
// It is possible for someone to close the sender and then start it again.
// Thus make sure that the sender is present in the client cache.
if (!this._context.senders[this.address]) this._context.senders[this.address] = this;
await this._ensureTokenRenewal();
}
} catch (err) {
err = translate(err);
debug("[%s] An error occurred while creating the sender %s",
this._context.connectionId, this.name, err);
throw err;
}
}
/**
* Creates a new sender to the given event hub, and optionally to a given partition if it is
* not present in the context or returns the one present in the context.
* @static
* @param {(string|number)} [partitionId] Partition ID to which it will send event data.
* @returns {Promise<EventHubSender>}
*/
static create(context: ConnectionContext, partitionId?: string | number): EventHubSender {
if (partitionId && typeof partitionId !== "string" && typeof partitionId !== "number") {
throw new Error("'partitionId' must be of type: 'string' | 'number'.");
}
const ehSender: EventHubSender = new EventHubSender(context, partitionId);
if (!context.senders[ehSender.address]) {
context.senders[ehSender.address] = ehSender;
}
return context.senders[ehSender.address];
}
}
| EventHubSender |
test_stanza_element.py | from sleekxmpp.test import *
from sleekxmpp.xmlstream.stanzabase import ElementBase
class TestElementBase(SleekTest):
def testFixNs(self):
"""Test fixing namespaces in an XPath expression."""
e = ElementBase()
ns = "http://jabber.org/protocol/disco#items"
result = e._fix_ns("{%s}foo/bar/{abc}baz/{%s}more" % (ns, ns))
expected = "/".join(["{%s}foo" % ns,
"{%s}bar" % ns,
"{abc}baz",
"{%s}more" % ns])
self.failUnless(expected == result,
"Incorrect namespace fixing result: %s" % str(result))
def testExtendedName(self):
"""Test element names of the form tag1/tag2/tag3."""
class TestStanza(ElementBase):
name = "foo/bar/baz"
namespace = "test"
stanza = TestStanza()
self.check(stanza, """
<foo xmlns="test">
<bar>
<baz />
</bar>
</foo>
""")
def testGetStanzaValues(self):
"""Test getStanzaValues using plugins and substanzas."""
class TestStanzaPlugin(ElementBase):
name = "foo2"
namespace = "foo"
interfaces = set(('bar', 'baz'))
plugin_attrib = "foo2"
class TestSubStanza(ElementBase):
name = "subfoo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
subitem = set((TestSubStanza,))
register_stanza_plugin(TestStanza, TestStanzaPlugin)
stanza = TestStanza()
stanza['bar'] = 'a'
stanza['foo2']['baz'] = 'b'
substanza = TestSubStanza()
substanza['bar'] = 'c'
stanza.append(substanza)
values = stanza.getStanzaValues()
expected = {'bar': 'a',
'baz': '',
'foo2': {'bar': '',
'baz': 'b'},
'substanzas': [{'__childtag__': '{foo}subfoo',
'bar': 'c',
'baz': ''}]}
self.failUnless(values == expected,
"Unexpected stanza values:\n%s\n%s" % (str(expected), str(values)))
def testSetStanzaValues(self):
"""Test using setStanzaValues with substanzas and plugins."""
class TestStanzaPlugin(ElementBase):
name = "pluginfoo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
plugin_attrib = "plugin_foo"
class TestStanzaPlugin2(ElementBase):
name = "pluginfoo2"
namespace = "foo"
interfaces = set(('bar', 'baz'))
plugin_attrib = "plugin_foo2"
class TestSubStanza(ElementBase):
name = "subfoo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
subitem = set((TestSubStanza,))
register_stanza_plugin(TestStanza, TestStanzaPlugin)
register_stanza_plugin(TestStanza, TestStanzaPlugin2)
stanza = TestStanza()
values = {'bar': 'a',
'baz': '',
'plugin_foo': {'bar': '',
'baz': 'b'},
'plugin_foo2': {'bar': 'd',
'baz': 'e'},
'substanzas': [{'__childtag__': '{foo}subfoo',
'bar': 'c',
'baz': ''}]}
stanza.setStanzaValues(values)
self.check(stanza, """
<foo xmlns="foo" bar="a">
<pluginfoo baz="b" />
<pluginfoo2 bar="d" baz="e" />
<subfoo bar="c" />
</foo>
""")
def testGetItem(self):
"""Test accessing stanza interfaces."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz', 'qux'))
sub_interfaces = set(('baz',))
def | (self):
return 'qux'
class TestStanzaPlugin(ElementBase):
name = "foobar"
namespace = "foo"
plugin_attrib = "foobar"
interfaces = set(('fizz',))
TestStanza.subitem = (TestStanza,)
register_stanza_plugin(TestStanza, TestStanzaPlugin)
stanza = TestStanza()
substanza = TestStanza()
stanza.append(substanza)
stanza.setStanzaValues({'bar': 'a',
'baz': 'b',
'qux': 42,
'foobar': {'fizz': 'c'}})
# Test non-plugin interfaces
expected = {'substanzas': [substanza],
'bar': 'a',
'baz': 'b',
'qux': 'qux',
'meh': ''}
for interface, value in expected.items():
result = stanza[interface]
self.failUnless(result == value,
"Incorrect stanza interface access result: %s" % result)
# Test plugin interfaces
self.failUnless(isinstance(stanza['foobar'], TestStanzaPlugin),
"Incorrect plugin object result.")
self.failUnless(stanza['foobar']['fizz'] == 'c',
"Incorrect plugin subvalue result.")
def testSetItem(self):
"""Test assigning to stanza interfaces."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz', 'qux'))
sub_interfaces = set(('baz',))
def setQux(self, value):
pass
class TestStanzaPlugin(ElementBase):
name = "foobar"
namespace = "foo"
plugin_attrib = "foobar"
interfaces = set(('foobar',))
register_stanza_plugin(TestStanza, TestStanzaPlugin)
stanza = TestStanza()
stanza['bar'] = 'attribute!'
stanza['baz'] = 'element!'
stanza['qux'] = 'overridden'
stanza['foobar'] = 'plugin'
self.check(stanza, """
<foo xmlns="foo" bar="attribute!">
<baz>element!</baz>
<foobar foobar="plugin" />
</foo>
""")
def testDelItem(self):
"""Test deleting stanza interface values."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz', 'qux'))
sub_interfaces = set(('bar',))
def delQux(self):
pass
class TestStanzaPlugin(ElementBase):
name = "foobar"
namespace = "foo"
plugin_attrib = "foobar"
interfaces = set(('foobar',))
register_stanza_plugin(TestStanza, TestStanzaPlugin)
stanza = TestStanza()
stanza['bar'] = 'a'
stanza['baz'] = 'b'
stanza['qux'] = 'c'
stanza['foobar']['foobar'] = 'd'
self.check(stanza, """
<foo xmlns="foo" baz="b" qux="c">
<bar>a</bar>
<foobar foobar="d" />
</foo>
""")
del stanza['bar']
del stanza['baz']
del stanza['qux']
del stanza['foobar']
self.check(stanza, """
<foo xmlns="foo" qux="c" />
""")
def testModifyingAttributes(self):
"""Test modifying top level attributes of a stanza's XML object."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
stanza = TestStanza()
self.check(stanza, """
<foo xmlns="foo" />
""")
self.failUnless(stanza._get_attr('bar') == '',
"Incorrect value returned for an unset XML attribute.")
stanza._set_attr('bar', 'a')
stanza._set_attr('baz', 'b')
self.check(stanza, """
<foo xmlns="foo" bar="a" baz="b" />
""")
self.failUnless(stanza._get_attr('bar') == 'a',
"Retrieved XML attribute value is incorrect.")
stanza._set_attr('bar', None)
stanza._del_attr('baz')
self.check(stanza, """
<foo xmlns="foo" />
""")
self.failUnless(stanza._get_attr('bar', 'c') == 'c',
"Incorrect default value returned for an unset XML attribute.")
def testGetSubText(self):
"""Test retrieving the contents of a sub element."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar',))
def setBar(self, value):
wrapper = ET.Element("{foo}wrapper")
bar = ET.Element("{foo}bar")
bar.text = value
wrapper.append(bar)
self.xml.append(wrapper)
def getBar(self):
return self._get_sub_text("wrapper/bar", default="not found")
stanza = TestStanza()
self.failUnless(stanza['bar'] == 'not found',
"Default _get_sub_text value incorrect.")
stanza['bar'] = 'found'
self.check(stanza, """
<foo xmlns="foo">
<wrapper>
<bar>found</bar>
</wrapper>
</foo>
""")
self.failUnless(stanza['bar'] == 'found',
"_get_sub_text value incorrect: %s." % stanza['bar'])
def testSubElement(self):
"""Test setting the contents of a sub element."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
def setBaz(self, value):
self._set_sub_text("wrapper/baz", text=value)
def getBaz(self):
return self._get_sub_text("wrapper/baz")
def setBar(self, value):
self._set_sub_text("wrapper/bar", text=value)
def getBar(self):
return self._get_sub_text("wrapper/bar")
stanza = TestStanza()
stanza['bar'] = 'a'
stanza['baz'] = 'b'
self.check(stanza, """
<foo xmlns="foo">
<wrapper>
<bar>a</bar>
<baz>b</baz>
</wrapper>
</foo>
""")
stanza._set_sub_text('wrapper/bar', text='', keep=True)
self.check(stanza, """
<foo xmlns="foo">
<wrapper>
<bar />
<baz>b</baz>
</wrapper>
</foo>
""", use_values=False)
stanza['bar'] = 'a'
stanza._set_sub_text('wrapper/bar', text='')
self.check(stanza, """
<foo xmlns="foo">
<wrapper>
<baz>b</baz>
</wrapper>
</foo>
""")
def testDelSub(self):
"""Test removing sub elements."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
def setBar(self, value):
self._set_sub_text("path/to/only/bar", value);
def getBar(self):
return self._get_sub_text("path/to/only/bar")
def delBar(self):
self._del_sub("path/to/only/bar")
def setBaz(self, value):
self._set_sub_text("path/to/just/baz", value);
def getBaz(self):
return self._get_sub_text("path/to/just/baz")
def delBaz(self):
self._del_sub("path/to/just/baz")
stanza = TestStanza()
stanza['bar'] = 'a'
stanza['baz'] = 'b'
self.check(stanza, """
<foo xmlns="foo">
<path>
<to>
<only>
<bar>a</bar>
</only>
<just>
<baz>b</baz>
</just>
</to>
</path>
</foo>
""")
del stanza['bar']
del stanza['baz']
self.check(stanza, """
<foo xmlns="foo">
<path>
<to>
<only />
<just />
</to>
</path>
</foo>
""", use_values=False)
stanza['bar'] = 'a'
stanza['baz'] = 'b'
stanza._del_sub('path/to/only/bar', all=True)
self.check(stanza, """
<foo xmlns="foo">
<path>
<to>
<just>
<baz>b</baz>
</just>
</to>
</path>
</foo>
""")
def testMatch(self):
"""Test matching a stanza against an XPath expression."""
class TestSubStanza(ElementBase):
name = "sub"
namespace = "baz"
interfaces = set(('attrib',))
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar','baz', 'qux'))
sub_interfaces = set(('qux',))
subitem = (TestSubStanza,)
def setQux(self, value):
self._set_sub_text('qux', text=value)
def getQux(self):
return self._get_sub_text('qux')
class TestStanzaPlugin(ElementBase):
name = "plugin"
namespace = "http://test/slash/bar"
interfaces = set(('attrib',))
register_stanza_plugin(TestStanza, TestStanzaPlugin)
stanza = TestStanza()
self.failUnless(stanza.match("foo"),
"Stanza did not match its own tag name.")
self.failUnless(stanza.match("{foo}foo"),
"Stanza did not match its own namespaced name.")
stanza['bar'] = 'a'
self.failUnless(stanza.match("foo@bar=a"),
"Stanza did not match its own name with attribute value check.")
stanza['baz'] = 'b'
self.failUnless(stanza.match("foo@bar=a@baz=b"),
"Stanza did not match its own name with multiple attributes.")
stanza['qux'] = 'c'
self.failUnless(stanza.match("foo/qux"),
"Stanza did not match with subelements.")
stanza['qux'] = ''
self.failUnless(stanza.match("foo/qux") == False,
"Stanza matched missing subinterface element.")
self.failUnless(stanza.match("foo/bar") == False,
"Stanza matched nonexistent element.")
stanza['plugin']['attrib'] = 'c'
self.failUnless(stanza.match("foo/plugin@attrib=c"),
"Stanza did not match with plugin and attribute.")
self.failUnless(stanza.match("foo/{http://test/slash/bar}plugin"),
"Stanza did not match with namespaced plugin.")
substanza = TestSubStanza()
substanza['attrib'] = 'd'
stanza.append(substanza)
self.failUnless(stanza.match("foo/sub@attrib=d"),
"Stanza did not match with substanzas and attribute.")
self.failUnless(stanza.match("foo/{baz}sub"),
"Stanza did not match with namespaced substanza.")
def testComparisons(self):
"""Test comparing ElementBase objects."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
stanza1 = TestStanza()
stanza1['bar'] = 'a'
self.failUnless(stanza1,
"Stanza object does not evaluate to True")
stanza2 = TestStanza()
stanza2['baz'] = 'b'
self.failUnless(stanza1 != stanza2,
"Different stanza objects incorrectly compared equal.")
stanza1['baz'] = 'b'
stanza2['bar'] = 'a'
self.failUnless(stanza1 == stanza2,
"Equal stanzas incorrectly compared inequal.")
def testKeys(self):
"""Test extracting interface names from a stanza object."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
plugin_attrib = 'qux'
register_stanza_plugin(TestStanza, TestStanza)
stanza = TestStanza()
self.failUnless(set(stanza.keys()) == set(('bar', 'baz')),
"Returned set of interface keys does not match expected.")
stanza.enable('qux')
self.failUnless(set(stanza.keys()) == set(('bar', 'baz', 'qux')),
"Incorrect set of interface and plugin keys.")
def testGet(self):
"""Test accessing stanza interfaces using get()."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
stanza = TestStanza()
stanza['bar'] = 'a'
self.failUnless(stanza.get('bar') == 'a',
"Incorrect value returned by stanza.get")
self.failUnless(stanza.get('baz', 'b') == 'b',
"Incorrect default value returned by stanza.get")
def testSubStanzas(self):
"""Test manipulating substanzas of a stanza object."""
class TestSubStanza(ElementBase):
name = "foobar"
namespace = "foo"
interfaces = set(('qux',))
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
subitem = (TestSubStanza,)
stanza = TestStanza()
substanza1 = TestSubStanza()
substanza2 = TestSubStanza()
substanza1['qux'] = 'a'
substanza2['qux'] = 'b'
# Test appending substanzas
self.failUnless(len(stanza) == 0,
"Incorrect empty stanza size.")
stanza.append(substanza1)
self.check(stanza, """
<foo xmlns="foo">
<foobar qux="a" />
</foo>
""", use_values=False)
self.failUnless(len(stanza) == 1,
"Incorrect stanza size with 1 substanza.")
stanza.append(substanza2)
self.check(stanza, """
<foo xmlns="foo">
<foobar qux="a" />
<foobar qux="b" />
</foo>
""", use_values=False)
self.failUnless(len(stanza) == 2,
"Incorrect stanza size with 2 substanzas.")
# Test popping substanzas
stanza.pop(0)
self.check(stanza, """
<foo xmlns="foo">
<foobar qux="b" />
</foo>
""", use_values=False)
# Test iterating over substanzas
stanza.append(substanza1)
results = []
for substanza in stanza:
results.append(substanza['qux'])
self.failUnless(results == ['b', 'a'],
"Iteration over substanzas failed: %s." % str(results))
def testCopy(self):
"""Test copying stanza objects."""
class TestStanza(ElementBase):
name = "foo"
namespace = "foo"
interfaces = set(('bar', 'baz'))
stanza1 = TestStanza()
stanza1['bar'] = 'a'
stanza2 = stanza1.__copy__()
self.failUnless(stanza1 == stanza2,
"Copied stanzas are not equal to each other.")
stanza1['baz'] = 'b'
self.failUnless(stanza1 != stanza2,
"Divergent stanza copies incorrectly compared equal.")
suite = unittest.TestLoader().loadTestsFromTestCase(TestElementBase)
| getQux |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.