file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
min.rs
|
//! Tests auto-converted from "sass-spec/spec/core_functions/math/min.hrx"
#[allow(unused)]
fn runner() -> crate::TestRunner {
super::runner()
}
mod error {
#[allow(unused)]
use super::runner;
#[test]
fn incompatible_units() {
assert_eq!(
runner().err(
"@use \"sass:math\";\
\na {b: math.min(1px, 2s)}\n"
),
"Error: 1px and 2s have incompatible units.\
\n ,\
\n2 | a {b: math.min(1px, 2s)}\
\n | ^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 2:7 root stylesheet",
);
}
#[test]
fn too_few_args() {
assert_eq!(
runner().err(
"@use \"sass:math\";\
\na {b: math.min()}\n"
),
"Error: At least one argument must be passed.\
\n ,\
\n2 | a {b: math.min()}\
\n | ^^^^^^^^^^\
\n \'\
\n input.scss 2:7 root stylesheet",
);
}
mod test_type {
#[allow(unused)]
use super::runner;
#[test]
fn arg_1() {
assert_eq!(
runner().err(
"@use \"sass:math\";\
\na {b: math.min(c)}\n"
),
"Error: c is not a number.\
\n ,\
\n2 | a {b: math.min(c)}\
\n | ^^^^^^^^^^^\
\n \'\
\n input.scss 2:7 root stylesheet",
);
}
#[test]
fn arg_2() {
assert_eq!(
runner().err(
"@use \"sass:math\";\
\na {b: math.min(1, c)}\n"
),
"Error: c is not a number.\
\n ,\
\n2 | a {b: math.min(1, c)}\
\n | ^^^^^^^^^^^^^^\
\n \'\
\n input.scss 2:7 root stylesheet",
);
}
#[test]
fn arg_3() {
assert_eq!(
runner().err(
"@use \"sass:math\";\
\na {b: math.min(1, 2, c)}\n"
),
"Error: c is not a number.\
\n ,\
\n2 | a {b: math.min(1, 2, c)}\
\n | ^^^^^^^^^^^^^^^^^\
\n \'\
\n input.scss 2:7 root stylesheet",
);
}
}
}
mod global {
#[allow(unused)]
use super::runner;
#[test]
fn modulo() {
assert_eq!(
runner().ok("a {b: min(1px, 7px % 4)}\n"),
"a {\
\n b: 1px;\
\n}\n"
);
}
#[test]
fn surrounding_whitespace() {
assert_eq!(
runner().ok(
"// The extra whitespace doesn\'t cause this to be parsed as a Sass function, but\
\n// we want to verify that it also doesn\'t interfere.\
\nb {c: min( 1px, 2px, )}\n"
),
"b {\
\n c: 1px;\
\n}\n"
);
}
#[test]
fn trailing_comma() {
assert_eq!(
runner().ok("a {b: min(1px, 2px,)}\n"),
"a {\
\n b: 1px;\
\n}\n"
);
}
}
#[test]
fn one_arg() {
assert_eq!(
runner().ok("@use \"sass:math\";\
\na {b: math.min(1)}\n"),
"a {\
\n b: 1;\
\n}\n"
);
}
#[test]
fn three_args() {
assert_eq!(
runner().ok("@use \"sass:math\";\
\na {b: math.min(3, 1, 2)}\n"),
"a {\
\n b: 1;\
\n}\n"
);
}
#[test]
fn two_args()
|
mod units {
#[allow(unused)]
use super::runner;
#[test]
fn and_unitless() {
assert_eq!(
runner().ok("@use \"sass:math\";\
\na {b: math.min(2px, 1)}\n"),
"a {\
\n b: 1;\
\n}\n"
);
}
#[test]
fn compatible() {
assert_eq!(
runner().ok("@use \"sass:math\";\
\na {b: math.min(1px, 1in, 1cm)}\n"),
"a {\
\n b: 1px;\
\n}\n"
);
}
#[test]
fn same() {
assert_eq!(
runner().ok("@use \"sass:math\";\
\na {b: math.min(6px, 2px, 10px)}\n"),
"a {\
\n b: 2px;\
\n}\n"
);
}
}
|
{
assert_eq!(
runner().ok("@use \"sass:math\";\
\na {b: math.min(1, 2)}\n"),
"a {\
\n b: 1;\
\n}\n"
);
}
|
max_length.rs
|
use crate::{
compilation::context::CompilationContext,
error::{error, no_error, ErrorIterator, ValidationError},
keywords::{helpers::fail_on_non_positive_integer, CompilationResult},
paths::{InstancePath, JSONPointer},
validator::Validate,
};
use serde_json::{Map, Value};
pub(crate) struct MaxLengthValidator {
limit: u64,
schema_path: JSONPointer,
}
impl MaxLengthValidator {
#[inline]
pub(crate) fn compile(schema: &Value, schema_path: JSONPointer) -> CompilationResult {
if let Some(limit) = schema.as_u64() {
Ok(Box::new(MaxLengthValidator { limit, schema_path }))
} else {
Err(fail_on_non_positive_integer(schema, schema_path))
}
}
}
impl Validate for MaxLengthValidator {
fn is_valid(&self, instance: &Value) -> bool {
if let Value::String(item) = instance {
if (bytecount::num_chars(item.as_bytes()) as u64) > self.limit {
return false;
}
}
true
}
fn validate<'instance>(
&self,
instance: &'instance Value,
instance_path: &InstancePath,
) -> ErrorIterator<'instance> {
if let Value::String(item) = instance {
if (bytecount::num_chars(item.as_bytes()) as u64) > self.limit {
return error(ValidationError::max_length(
self.schema_path.clone(),
instance_path.into(),
instance,
self.limit,
));
}
|
}
impl core::fmt::Display for MaxLengthValidator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "maxLength: {}", self.limit)
}
}
#[inline]
pub(crate) fn compile<'a>(
_: &'a Map<String, Value>,
schema: &'a Value,
context: &CompilationContext,
) -> Option<CompilationResult<'a>> {
let schema_path = context.as_pointer_with("maxLength");
Some(MaxLengthValidator::compile(schema, schema_path))
}
#[cfg(test)]
mod tests {
use crate::tests_util;
use serde_json::json;
#[test]
fn schema_path() {
tests_util::assert_schema_path(&json!({"maxLength": 1}), &json!("ab"), "/maxLength")
}
}
|
}
no_error()
}
|
lib.rs
|
//! The `netutil` module assists with networking
use nix::sys::socket::setsockopt;
use nix::sys::socket::sockopt::{ReuseAddr, ReusePort};
use rand::{thread_rng, Rng};
use socket2::{Domain, SockAddr, Socket, Type};
use std::io;
use std::io::Read;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, TcpStream, ToSocketAddrs, UdpSocket};
use std::os::unix::io::AsRawFd;
use std::time::Duration;
mod ip_echo_server;
pub use ip_echo_server::*;
/// A data type representing a public Udp socket
pub struct UdpSocketPair {
pub addr: SocketAddr, // Public address of the socket
pub receiver: UdpSocket, // Locally bound socket that can receive from the public address
pub sender: UdpSocket, // Locally bound socket to send via public address
}
pub type PortRange = (u16, u16);
/// Determine the public IP address of this machine by asking an ip_echo_server at the given
/// address
pub fn get_public_ip_addr(ip_echo_server_addr: &SocketAddr) -> Result<IpAddr, String> {
let mut data = Vec::new();
let timeout = Duration::new(5, 0);
TcpStream::connect_timeout(ip_echo_server_addr, timeout)
.and_then(|mut stream| {
stream
.set_read_timeout(Some(Duration::new(10, 0)))
.expect("set_read_timeout");
stream.read_to_end(&mut data)
})
.and_then(|_| {
bincode::deserialize(&data).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("Failed to deserialize: {:?}", err),
)
})
})
.map_err(|err| err.to_string())
}
pub fn parse_port_or_addr(optstr: Option<&str>, default_addr: SocketAddr) -> SocketAddr {
if let Some(addrstr) = optstr {
if let Ok(port) = addrstr.parse() {
let mut addr = default_addr;
addr.set_port(port);
addr
} else if let Ok(addr) = addrstr.parse() {
addr
} else {
default_addr
}
} else {
|
}
pub fn parse_port_range(port_range: &str) -> Option<PortRange> {
let ports: Vec<&str> = port_range.split('-').collect();
if ports.len() != 2 {
return None;
}
let start_port = ports[0].parse();
let end_port = ports[1].parse();
if start_port.is_err() || end_port.is_err() {
return None;
}
let start_port = start_port.unwrap();
let end_port = end_port.unwrap();
if end_port < start_port {
return None;
}
Some((start_port, end_port))
}
pub fn parse_host(host: &str) -> Result<IpAddr, String> {
let ips: Vec<_> = (host, 0)
.to_socket_addrs()
.map_err(|err| err.to_string())?
.map(|socket_address| socket_address.ip())
.collect();
if ips.is_empty() {
Err(format!("Unable to resolve host: {}", host))
} else {
Ok(ips[0])
}
}
pub fn parse_host_port(host_port: &str) -> Result<SocketAddr, String> {
let addrs: Vec<_> = host_port
.to_socket_addrs()
.map_err(|err| err.to_string())?
.collect();
if addrs.is_empty() {
Err(format!("Unable to resolve host: {}", host_port))
} else {
Ok(addrs[0])
}
}
fn udp_socket(reuseaddr: bool) -> io::Result<Socket> {
let sock = Socket::new(Domain::ipv4(), Type::dgram(), None)?;
let sock_fd = sock.as_raw_fd();
if reuseaddr {
// best effort, i.e. ignore errors here, we'll get the failure in caller
setsockopt(sock_fd, ReusePort, &true).ok();
setsockopt(sock_fd, ReuseAddr, &true).ok();
}
Ok(sock)
}
pub fn bind_in_range(range: PortRange) -> io::Result<(u16, UdpSocket)> {
let sock = udp_socket(false)?;
let (start, end) = range;
let mut tries_left = end - start;
let mut rand_port = thread_rng().gen_range(start, end);
loop {
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rand_port);
match sock.bind(&SockAddr::from(addr)) {
Ok(_) => {
let sock = sock.into_udp_socket();
break Result::Ok((sock.local_addr().unwrap().port(), sock));
}
Err(err) => {
if tries_left == 0 {
return Err(err);
}
}
}
rand_port += 1;
if rand_port == end {
rand_port = start;
}
tries_left -= 1;
}
}
// binds many sockets to the same port in a range
pub fn multi_bind_in_range(range: PortRange, num: usize) -> io::Result<(u16, Vec<UdpSocket>)> {
let mut sockets = Vec::with_capacity(num);
let port = {
let (port, _) = bind_in_range(range)?;
port
}; // drop the probe, port should be available... briefly.
for _ in 0..num {
sockets.push(bind_to(port, true)?);
}
Ok((port, sockets))
}
pub fn bind_to(port: u16, reuseaddr: bool) -> io::Result<UdpSocket> {
let sock = udp_socket(reuseaddr)?;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port);
match sock.bind(&SockAddr::from(addr)) {
Ok(_) => Result::Ok(sock.into_udp_socket()),
Err(err) => Err(err),
}
}
pub fn find_available_port_in_range(range: PortRange) -> io::Result<u16> {
let (start, end) = range;
let mut tries_left = end - start;
let mut rand_port = thread_rng().gen_range(start, end);
loop {
match TcpListener::bind(SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
rand_port,
)) {
Ok(_) => {
break Ok(rand_port);
}
Err(err) => {
if tries_left == 0 {
return Err(err);
}
}
}
rand_port += 1;
if rand_port == end {
rand_port = start;
}
tries_left -= 1;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_port_or_addr() {
let p1 = parse_port_or_addr(Some("9000"), SocketAddr::from(([1, 2, 3, 4], 1)));
assert_eq!(p1.port(), 9000);
let p2 = parse_port_or_addr(Some("127.0.0.1:7000"), SocketAddr::from(([1, 2, 3, 4], 1)));
assert_eq!(p2.port(), 7000);
let p2 = parse_port_or_addr(Some("hi there"), SocketAddr::from(([1, 2, 3, 4], 1)));
assert_eq!(p2.port(), 1);
let p3 = parse_port_or_addr(None, SocketAddr::from(([1, 2, 3, 4], 1)));
assert_eq!(p3.port(), 1);
}
#[test]
fn test_parse_port_range() {
assert_eq!(parse_port_range("garbage"), None);
assert_eq!(parse_port_range("1-"), None);
assert_eq!(parse_port_range("1-2"), Some((1, 2)));
assert_eq!(parse_port_range("1-2-3"), None);
assert_eq!(parse_port_range("2-1"), None);
}
#[test]
fn test_parse_host() {
parse_host("localhost:1234").unwrap_err();
parse_host("localhost").unwrap();
parse_host("127.0.0.0:1234").unwrap_err();
parse_host("127.0.0.0").unwrap();
}
#[test]
fn test_parse_host_port() {
parse_host_port("localhost:1234").unwrap();
parse_host_port("localhost").unwrap_err();
parse_host_port("127.0.0.0:1234").unwrap();
parse_host_port("127.0.0.0").unwrap_err();
}
#[test]
fn test_bind() {
assert_eq!(bind_in_range((2000, 2001)).unwrap().0, 2000);
let x = bind_to(2002, true).unwrap();
let y = bind_to(2002, true).unwrap();
assert_eq!(
x.local_addr().unwrap().port(),
y.local_addr().unwrap().port()
);
let (port, v) = multi_bind_in_range((2010, 2110), 10).unwrap();
for sock in &v {
assert_eq!(port, sock.local_addr().unwrap().port());
}
}
#[test]
#[should_panic]
fn test_bind_in_range_nil() {
let _ = bind_in_range((2000, 2000));
}
#[test]
fn test_find_available_port_in_range() {
assert_eq!(find_available_port_in_range((3000, 3001)).unwrap(), 3000);
let port = find_available_port_in_range((3000, 3050)).unwrap();
assert!(3000 <= port && port < 3050);
}
}
|
default_addr
}
|
main_p1.go
|
package main
import (
"fmt"
"math"
)
func main() {
n := []int{0, 14, 1, 3, 7, 9}
spoken := make(map[int][]int)
for i, el := range n {
spoken[el] = []int{i + 1}
}
lastSpoken := n[len(n)-1]
for i := len(n); i < 2020; i++ {
curr, total := spoken[lastSpoken], 0
if len(curr) <= 1
|
else {
total = int(math.Abs(float64(curr[len(curr)-1]) - float64(curr[len(curr)-2])))
spoken[total] = append(spoken[total], i+1)
}
lastSpoken = total
}
fmt.Println(lastSpoken)
}
|
{
spoken[0] = append(spoken[0], i+1)
}
|
get_data.py
|
import json
import requests
import pandas as pd
import os
baseurl = "http://exploreapiswith.tech/api/"
categories = json.loads(requests.get(
baseurl + "category").text)
def
|
(category_name=None):
category_apis = json.loads(requests.get(
baseurl + "category/" + category_name).text)
return category_apis
api_list = []
for category in categories:
api = get_category_api(category)
api_list += api
if os.path.exists("data/apis.json"):
os.remove("data/apis.json")
if os.path.exists("data/apis.csv"):
os.remove("data/apis.csv")
with open(r"data/apis.json", "x") as f:
json.dump(api_list, f)
json_file = pd.read_json(r"data/apis.json")
json_file.to_csv(r"data/apis.csv", index=False)
|
get_category_api
|
test_url.py
|
from __future__ import print_function
import pytest
from functools import partial
import codecs
import os
from odo import odo, resource, URL, discover, CSV, TextFile, convert
from odo.backends.url import sample
from odo.temp import _Temp, Temp
from odo.utils import tmpfile, raises
import datashape
try:
from urllib2 import urlopen
from urllib2 import HTTPError, URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
pytestmark = pytest.mark.skipif(raises(URLError,
partial(urlopen, "http://google.com")),
reason='unable to connect to google.com')
iris_url = ('https://raw.githubusercontent.com/'
'blaze/blaze/master/blaze/examples/data/iris.csv')
ftp_url = "ftp://athena-dist.mit.edu/pub/XNeXT/README.txt"
|
def test_url_resource():
csv = resource(iris_url)
assert isinstance(csv, URL(CSV))
def test_small_chunk_size():
normal = convert(Temp(CSV), resource(iris_url))
small_chunk = convert(Temp(CSV), resource(iris_url, chunk_size=1))
with open(normal.path, 'rb') as fn:
normal_data = fn.read()
with open(small_chunk.path, 'rb') as fn:
small_chunk_data = fn.read()
assert normal_data == small_chunk_data
def test_sample_different_line_counts():
with sample(resource(iris_url), lines=10) as fn:
with open(fn, 'r') as f:
assert len(list(f)) == 10
with sample(resource(iris_url), lines=5) as fn:
with open(fn, 'r') as f:
assert len(list(f)) == 5
def test_sample_different_encoding():
encoding = 'latin-1'
lines = 10
with sample(resource(iris_url), lines=lines, encoding=encoding) as fn:
with codecs.open(fn, 'r', encoding=encoding) as f:
assert len(list(f)) == lines
@pytest.mark.xfail(raises=HTTPError)
def test_failed_url():
failed_url = "http://foo.com/myfile.csv"
with tmpfile('.csv') as fn:
odo(failed_url, fn)
def test_url_discover():
csv = resource(iris_url)
assert isinstance(discover(csv), datashape.DataShape)
def test_url_to_local_csv():
with tmpfile('.csv') as fn:
csv = odo(iris_url, fn)
path = os.path.abspath(csv.path)
assert os.path.exists(path)
def test_url_txt_resource():
txt = resource(ftp_url)
assert isinstance(txt, URL(TextFile))
@pytest.mark.xfail(
raises=URLError,
reason='MIT Athena FTP is down as of October 23, 2015'
)
def test_ftp_to_local_txt():
with tmpfile('.txt') as fn:
txt = odo(ftp_url, fn, timeout=5)
path = os.path.abspath(txt.path)
assert os.path.exists(path)
def test_convert():
url_csv = resource(iris_url)
t_csv = convert(Temp(CSV), url_csv)
assert discover(url_csv) == discover(t_csv)
assert isinstance(t_csv, _Temp)
@pytest.mark.skipif(os.environ.get('HDFS_TEST_HOST') is None,
reason='No HDFS_TEST_HOST envar defined')
def test_url_to_hdfs():
from .test_hdfs import tmpfile_hdfs, hdfs, HDFS
with tmpfile_hdfs() as target:
# build temp csv for assertion check
url_csv = resource(iris_url)
csv = convert(Temp(CSV), url_csv)
# test against url
scsv = HDFS(CSV)(target, hdfs=hdfs)
odo(iris_url, scsv)
assert discover(scsv) == discover(csv)
| |
md5_test.go
|
package goencrypt
import "testing"
type testPair struct {
in interface{}
out string
}
var pairs = []testPair{
{"hello world", "5eb63bbbe01eeed093cb22bb8f5acdc3"},
{123456789, ""},
{[]byte("hello world"), "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
func TestMD5(t *testing.T) {
for _, p := range pairs {
out, _ := MD5(p.in)
if string(out) != p.out {
t.Errorf("bad return value: got: %s want: %s", string(out), p.out)
}
}
}
func BenchmarkMD5(b *testing.B)
|
{
for i := 0; i < b.N; i++ {
if i%3 == 0 {
out, _ := MD5(pairs[0].in)
if string(out) != pairs[0].out {
b.Errorf("bad return value: got: %s want: %s", string(out), pairs[0].out)
}
} else if i%3 == 1 {
out, _ := MD5(pairs[1].in)
if string(out) != pairs[1].out {
b.Errorf("bad return value: got: %s want: %s", string(out), pairs[1].out)
}
} else {
out, _ := MD5(pairs[2].in)
if string(out) != pairs[2].out {
b.Errorf("bad return value: got: %s want: %s", string(out), pairs[2].out)
}
}
}
}
|
|
jinja_cfg.py
|
# Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import jinja2
from octavia_lib.common import constants as lib_consts
from octavia.common.config import cfg
from octavia.common import constants
from octavia.common import utils as octavia_utils
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp',
constants.PROTOCOL_PROXY: 'proxy',
lib_consts.PROTOCOL_PROXYV2: 'proxy',
constants.PROTOCOL_TERMINATED_HTTPS: 'http'
}
BALANCE_MAP = {
constants.LB_ALGORITHM_ROUND_ROBIN: 'roundrobin',
constants.LB_ALGORITHM_LEAST_CONNECTIONS: 'leastconn',
constants.LB_ALGORITHM_SOURCE_IP: 'source'
}
CLIENT_AUTH_MAP = {constants.CLIENT_AUTH_NONE: 'none',
constants.CLIENT_AUTH_OPTIONAL: 'optional',
constants.CLIENT_AUTH_MANDATORY: 'required'}
ACTIVE_PENDING_STATUSES = constants.SUPPORTED_PROVISIONING_STATUSES + (
constants.DEGRADED,)
BASE_PATH = '/var/lib/octavia'
BASE_CRT_DIR = BASE_PATH + '/certs'
HAPROXY_TEMPLATE = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'templates/haproxy.cfg.j2'))
CONF = cfg.CONF
JINJA_ENV = None
|
base_crt_dir=None,
haproxy_template=None,
log_http=None,
log_server=None,
connection_logging=True):
"""HaProxy configuration generation
:param base_amp_path: Base path for amphora data
:param base_crt_dir: Base directory for certificate storage
:param haproxy_template: Absolute path to Jinja template
:param log_http: Haproxy HTTP logging path
:param log_server: Haproxy Server logging path
:param connection_logging: enable logging connections in haproxy
"""
self.base_amp_path = base_amp_path or BASE_PATH
self.base_crt_dir = base_crt_dir or BASE_CRT_DIR
self.haproxy_template = haproxy_template or HAPROXY_TEMPLATE
self.log_http = log_http
self.log_server = log_server
self.connection_logging = connection_logging
def build_config(self, host_amphora, listeners, tls_certs,
haproxy_versions, socket_path=None):
"""Convert a logical configuration to the HAProxy version
:param host_amphora: The Amphora this configuration is hosted on
:param listener: The listener configuration
:param socket_path: The socket path for Haproxy process
:return: Rendered configuration
"""
# Check for any backward compatibility items we need to check
# This is done here for upgrade scenarios where one amp in a
# pair might be running an older amphora version.
feature_compatibility = {}
# Is it newer than haproxy 1.5?
if not (int(haproxy_versions[0]) < 2 and int(haproxy_versions[1]) < 6):
feature_compatibility[constants.HTTP_REUSE] = True
if not (int(haproxy_versions[0]) < 2 and int(haproxy_versions[1]) < 9):
feature_compatibility[constants.POOL_ALPN] = True
return self.render_loadbalancer_obj(
host_amphora, listeners, tls_certs=tls_certs,
socket_path=socket_path,
feature_compatibility=feature_compatibility)
def _get_template(self):
"""Returns the specified Jinja configuration template."""
global JINJA_ENV
if not JINJA_ENV:
template_loader = jinja2.FileSystemLoader(
searchpath=os.path.dirname(self.haproxy_template))
JINJA_ENV = jinja2.Environment(
autoescape=True,
loader=template_loader,
trim_blocks=True,
lstrip_blocks=True)
JINJA_ENV.filters['hash_amp_id'] = octavia_utils.base64_sha1_string
return JINJA_ENV.get_template(os.path.basename(self.haproxy_template))
def _format_log_string(self, load_balancer, protocol):
log_format = CONF.haproxy_amphora.user_log_format.replace(
'{{ project_id }}', load_balancer.project_id)
log_format = log_format.replace('{{ lb_id }}', load_balancer.id)
# Order of these filters matter.
# TODO(johnsom) Remove when HAProxy handles the format string
# with HTTP variables in TCP listeners.
# Currently it either throws an error or just fails
# to log the message.
if protocol not in constants.HAPROXY_HTTP_PROTOCOLS:
log_format = log_format.replace('%{+Q}r', '-')
log_format = log_format.replace('%r', '-')
log_format = log_format.replace('%{+Q}ST', '-')
log_format = log_format.replace('%ST', '-')
log_format = log_format.replace(' ', '\\ ')
return log_format
def render_loadbalancer_obj(self, host_amphora, listeners,
tls_certs=None, socket_path=None,
feature_compatibility=None):
"""Renders a templated configuration from a load balancer object
:param host_amphora: The Amphora this configuration is hosted on
:param listener: The listener configuration
:param tls_certs: Dict of the TLS certificates for the listener
:param socket_path: The socket path for Haproxy process
:return: Rendered configuration
"""
feature_compatibility = feature_compatibility or {}
loadbalancer = self._transform_loadbalancer(
host_amphora,
listeners[0].load_balancer,
listeners,
tls_certs,
feature_compatibility,)
if not socket_path:
socket_path = '%s/%s.sock' % (self.base_amp_path,
listeners[0].load_balancer.id)
state_file_path = '%s/%s/servers-state' % (
self.base_amp_path,
listeners[0].load_balancer.id)
return self._get_template().render(
{'loadbalancer': loadbalancer,
'stats_sock': socket_path,
'log_http': self.log_http,
'log_server': self.log_server,
'state_file': state_file_path,
'administrative_log_facility':
CONF.amphora_agent.administrative_log_facility,
'user_log_facility': CONF.amphora_agent.user_log_facility,
'connection_logging': self.connection_logging},
constants=constants, lib_consts=lib_consts)
def _transform_loadbalancer(self, host_amphora, loadbalancer, listeners,
tls_certs, feature_compatibility):
"""Transforms a load balancer into an object that will
be processed by the templating system
"""
listener_transforms = []
for listener in listeners:
if listener.protocol in constants.LVS_PROTOCOLS:
continue
listener_transforms.append(self._transform_listener(
listener, tls_certs, feature_compatibility, loadbalancer))
ret_value = {
'id': loadbalancer.id,
'vip_address': loadbalancer.vip.ip_address,
'listeners': listener_transforms,
'topology': loadbalancer.topology,
'enabled': loadbalancer.enabled,
'peer_port': listeners[0].peer_port,
'host_amphora': self._transform_amphora(
host_amphora, feature_compatibility),
'amphorae': loadbalancer.amphorae
}
# NOTE(sbalukoff): Global connection limit should be a sum of all
# listeners' connection limits.
connection_limit_sum = 0
for listener in listeners:
if listener.protocol in constants.LVS_PROTOCOLS:
continue
if listener.connection_limit and listener.connection_limit > -1:
connection_limit_sum += listener.connection_limit
else:
connection_limit_sum += (
CONF.haproxy_amphora.default_connection_limit)
# If there's a limit between 0 and MAX, set it, otherwise just set MAX
if 0 < connection_limit_sum < constants.HAPROXY_MAX_MAXCONN:
ret_value['global_connection_limit'] = connection_limit_sum
else:
ret_value['global_connection_limit'] = (
constants.HAPROXY_MAX_MAXCONN)
return ret_value
def _transform_amphora(self, amphora, feature_compatibility):
"""Transform an amphora into an object that will
be processed by the templating system.
"""
return {
'id': amphora.id,
'lb_network_ip': amphora.lb_network_ip,
'vrrp_ip': amphora.vrrp_ip,
'ha_ip': amphora.ha_ip,
'vrrp_port_id': amphora.vrrp_port_id,
'ha_port_id': amphora.ha_port_id,
'role': amphora.role,
'status': amphora.status,
'vrrp_interface': amphora.vrrp_interface,
'vrrp_priority': amphora.vrrp_priority
}
def _transform_listener(self, listener, tls_certs, feature_compatibility,
loadbalancer):
"""Transforms a listener into an object that will
be processed by the templating system
"""
ret_value = {
'id': listener.id,
'protocol_port': listener.protocol_port,
'protocol_mode': PROTOCOL_MAP[listener.protocol],
'protocol': listener.protocol,
'insert_headers': listener.insert_headers,
'enabled': listener.enabled,
'timeout_client_data': (
listener.timeout_client_data or
CONF.haproxy_amphora.timeout_client_data),
'timeout_member_connect': (
listener.timeout_member_connect or
CONF.haproxy_amphora.timeout_member_connect),
'timeout_member_data': (
listener.timeout_member_data or
CONF.haproxy_amphora.timeout_member_data),
'timeout_tcp_inspect': (listener.timeout_tcp_inspect or
CONF.haproxy_amphora.timeout_tcp_inspect),
}
if self.connection_logging:
ret_value['user_log_format'] = (
self._format_log_string(loadbalancer, listener.protocol))
if listener.connection_limit and listener.connection_limit > -1:
ret_value['connection_limit'] = listener.connection_limit
else:
ret_value['connection_limit'] = (
CONF.haproxy_amphora.default_connection_limit)
if listener.tls_certificate_id:
ret_value['crt_list_filename'] = os.path.join(
CONF.haproxy_amphora.base_cert_dir,
loadbalancer.id, '{}.pem'.format(listener.id))
if tls_certs is not None:
if listener.client_ca_tls_certificate_id:
ret_value['client_ca_tls_path'] = '%s' % (
os.path.join(
self.base_crt_dir, loadbalancer.id,
tls_certs[listener.client_ca_tls_certificate_id]))
ret_value['client_auth'] = CLIENT_AUTH_MAP.get(
listener.client_authentication)
if listener.client_crl_container_id:
ret_value['client_crl_path'] = '%s' % (
os.path.join(self.base_crt_dir, loadbalancer.id,
tls_certs[listener.client_crl_container_id]))
tls_enabled = False
if listener.protocol == constants.PROTOCOL_TERMINATED_HTTPS:
tls_enabled = True
if listener.tls_ciphers is not None:
ret_value['tls_ciphers'] = listener.tls_ciphers
if listener.tls_versions is not None:
ret_value['tls_versions'] = listener.tls_versions
if listener.alpn_protocols is not None:
ret_value['alpn_protocols'] = ",".join(listener.alpn_protocols)
pools = []
pool_gen = (pool for pool in listener.pools if
pool.provisioning_status != constants.PENDING_DELETE)
for pool in pool_gen:
kwargs = {}
if tls_certs is not None and tls_certs.get(pool.id):
kwargs = {'pool_tls_certs': tls_certs.get(pool.id)}
pools.append(self._transform_pool(
pool, feature_compatibility, tls_enabled, **kwargs))
ret_value['pools'] = pools
policy_gen = (policy for policy in listener.l7policies if
policy.provisioning_status != constants.PENDING_DELETE)
if listener.default_pool:
for pool in pools:
if pool['id'] == listener.default_pool.id:
ret_value['default_pool'] = pool
break
l7policies = [self._transform_l7policy(
x, feature_compatibility, tls_enabled, tls_certs)
for x in policy_gen]
ret_value['l7policies'] = l7policies
return ret_value
def _transform_pool(self, pool, feature_compatibility,
listener_tls_enabled, pool_tls_certs=None):
"""Transforms a pool into an object that will
be processed by the templating system
"""
proxy_protocol_version = None
if pool.protocol == constants.PROTOCOL_PROXY:
proxy_protocol_version = 1
if pool.protocol == lib_consts.PROTOCOL_PROXYV2:
proxy_protocol_version = 2
ret_value = {
'id': pool.id,
'protocol': PROTOCOL_MAP[pool.protocol],
'proxy_protocol': proxy_protocol_version,
'listener_tls_enabled': listener_tls_enabled,
'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'),
'members': [],
'health_monitor': '',
'session_persistence': '',
'enabled': pool.enabled,
'operating_status': pool.operating_status,
'stick_size': CONF.haproxy_amphora.haproxy_stick_size,
constants.HTTP_REUSE: feature_compatibility.get(
constants.HTTP_REUSE, False),
'ca_tls_path': '',
'crl_path': '',
'tls_enabled': pool.tls_enabled
}
members_gen = (mem for mem in pool.members if
mem.provisioning_status != constants.PENDING_DELETE)
members = [self._transform_member(x, feature_compatibility)
for x in members_gen]
ret_value['members'] = members
health_mon = pool.health_monitor
if (health_mon and
health_mon.provisioning_status != constants.PENDING_DELETE):
ret_value['health_monitor'] = self._transform_health_monitor(
health_mon, feature_compatibility)
if pool.session_persistence:
ret_value[
'session_persistence'] = self._transform_session_persistence(
pool.session_persistence, feature_compatibility)
if (pool.tls_certificate_id and pool_tls_certs and
pool_tls_certs.get('client_cert')):
ret_value['client_cert'] = pool_tls_certs.get('client_cert')
if pool.tls_enabled is True:
if pool.tls_ciphers is not None:
ret_value['tls_ciphers'] = pool.tls_ciphers
if pool.tls_versions is not None:
ret_value['tls_versions'] = pool.tls_versions
if (pool.alpn_protocols is not None and
feature_compatibility.get(constants.POOL_ALPN, False)):
ret_value['alpn_protocols'] = ",".join(pool.alpn_protocols)
if (pool.ca_tls_certificate_id and pool_tls_certs and
pool_tls_certs.get('ca_cert')):
ret_value['ca_cert'] = pool_tls_certs.get('ca_cert')
if (pool.crl_container_id and pool_tls_certs and
pool_tls_certs.get('crl')):
ret_value['crl'] = pool_tls_certs.get('crl')
return ret_value
@staticmethod
def _transform_session_persistence(persistence, feature_compatibility):
"""Transforms session persistence into an object that will
be processed by the templating system
"""
return {
'type': persistence.type,
'cookie_name': persistence.cookie_name
}
@staticmethod
def _transform_member(member, feature_compatibility):
"""Transforms a member into an object that will
be processed by the templating system
"""
return {
'id': member.id,
'address': member.ip_address,
'protocol_port': member.protocol_port,
'weight': member.weight,
'enabled': member.enabled,
'subnet_id': member.subnet_id,
'operating_status': member.operating_status,
'monitor_address': member.monitor_address,
'monitor_port': member.monitor_port,
'backup': member.backup
}
def _transform_health_monitor(self, monitor, feature_compatibility):
"""Transforms a health monitor into an object that will
be processed by the templating system
"""
codes = None
if monitor.expected_codes:
codes = '|'.join(octavia_utils.expand_expected_codes(
monitor.expected_codes))
return {
'id': monitor.id,
'type': monitor.type,
'delay': monitor.delay,
'timeout': monitor.timeout,
'fall_threshold': monitor.fall_threshold,
'rise_threshold': monitor.rise_threshold,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': codes,
'enabled': monitor.enabled,
'http_version': monitor.http_version,
'domain_name': monitor.domain_name,
}
def _transform_l7policy(self, l7policy, feature_compatibility,
listener_tls_enabled, tls_certs=None):
"""Transforms an L7 policy into an object that will
be processed by the templating system
"""
ret_value = {
'id': l7policy.id,
'action': l7policy.action,
'redirect_url': l7policy.redirect_url,
'redirect_prefix': l7policy.redirect_prefix,
'enabled': l7policy.enabled
}
if (l7policy.redirect_pool and
l7policy.redirect_pool.provisioning_status !=
constants.PENDING_DELETE):
kwargs = {}
if tls_certs is not None and tls_certs.get(
l7policy.redirect_pool.id):
kwargs = {'pool_tls_certs':
tls_certs.get(l7policy.redirect_pool.id)}
ret_value['redirect_pool'] = self._transform_pool(
l7policy.redirect_pool, feature_compatibility,
listener_tls_enabled, **kwargs)
else:
ret_value['redirect_pool'] = None
if (l7policy.action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL,
constants.L7POLICY_ACTION_REDIRECT_PREFIX] and
l7policy.redirect_http_code):
ret_value['redirect_http_code'] = l7policy.redirect_http_code
else:
ret_value['redirect_http_code'] = None
rule_gen = (rule for rule in l7policy.l7rules if rule.enabled and
rule.provisioning_status != constants.PENDING_DELETE)
l7rules = [self._transform_l7rule(x, feature_compatibility)
for x in rule_gen]
ret_value['l7rules'] = l7rules
return ret_value
def _transform_l7rule(self, l7rule, feature_compatibility):
"""Transforms an L7 rule into an object that will
be processed by the templating system
"""
return {
'id': l7rule.id,
'type': l7rule.type,
'compare_type': l7rule.compare_type,
'key': l7rule.key,
'value': self._escape_haproxy_config_string(l7rule.value),
'invert': l7rule.invert,
'enabled': l7rule.enabled
}
@staticmethod
def _escape_haproxy_config_string(value):
"""Escapes certain characters in a given string such that
haproxy will parse the string as a single value
"""
# Escape backslashes first
value = re.sub(r'\\', r'\\\\', value)
# Spaces next
value = re.sub(' ', '\\ ', value)
return value
|
class JinjaTemplater(object):
def __init__(self,
base_amp_path=None,
|
training_star_task.py
|
from prefect import Task
from loguru import logger
from tqdm import tqdm
from crossmodal_embedding.models import CrossModalEmbedding, SiameseNet
from crossmodal_embedding.models import InputData, InputDataTest
from sklearn.metrics import precision_recall_fscore_support, f1_score
import torch.optim as optim
import torch.nn as nn
import torch
import torch.nn as nn
from crossmodal_embedding.util.evaluation import (
compute_map_basic,
compute_map_with_unification,
)
from torch.utils.data import WeightedRandomSampler
import sys
import json
from torch.utils.tensorboard import SummaryWriter
class TrainingTaskStar(Task):
def create_weights(self, df):
positives = 0
negatives = 0
weights = list()
for index, row in df.iterrows():
if row["score"] == 0:
negatives = negatives + 1
else:
positives = positives + 1
weight_positive = 1.0 / float(positives)
weight_negative = 1.0 / float(negatives)
|
weights.append(weight_negative)
else:
weights.append(weight_positive)
return torch.tensor(weights)
def run(
self,
train,
test,
dev,
num_negatives,
output_log,
output_model,
vocab_size,
batch_size=10,
num_epochs=5,
learning_rate=0.0001,
max_sequence_len=100,
hidden_size=10,
out_embedding=128,
attention_heads=5,
word_embedding=50,
decay=0.01,
):
logger.info(f" Negative Examples: {num_negatives}")
logger.info("Let's train the Cross-Modal Embedding ! (^・ω・^ )")
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Check for multi_GPUS
multiple_gpus = 0
train_class_weight = self.create_weights(train)
train_dataset = InputData(train)
logger.info(f"TRAIN: {len(train_dataset)}")
dev_dataset = InputData(dev)
logger.info(f"DEV: {len(dev_dataset)}")
test_dataset = InputDataTest(test, vocab_size)
logger.info(f"TEST: {len(test_dataset)}")
sampler_train = WeightedRandomSampler(
train_class_weight, len(train_class_weight)
)
# Data loader
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, sampler=sampler_train,
)
dev_loader = torch.utils.data.DataLoader(
dataset=dev_dataset, batch_size=batch_size, shuffle=False
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False
)
model = SiameseNet(
out_embedding,
batch_size,
vocab_size,
max_len=max_sequence_len,
hidden_size=hidden_size,
out_embedding=out_embedding,
device=device,
attention_heads=attention_heads,
word_embedding=word_embedding,
)
if torch.cuda.device_count() > 1:
logger.info(
f"**********Let's use {torch.cuda.device_count()} GPUs!********"
)
multiple_gpus = 1
model = nn.DataParallel(model)
else:
logger.info("********* Only one GPU *******")
model = model.to(device)
# Loss and optimizer
criterion = nn.NLLLoss()
optimizer = torch.optim.AdamW(
model.parameters(), lr=learning_rate, weight_decay=decay
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, "min", verbose=True, patience=1, cooldown=3
)
# Train the model
best_value = 0
all_best = dict()
result_dict = dict()
total_step = len(train_loader)
for epoch in tqdm(range(num_epochs), desc=f"Epoch"):
epoch_loss = 0.0
running_loss = 0.0
model.train()
t = tqdm(iter(train_loader), leave=False, total=len(train_loader))
for (
i,
(statement1, st1_mask, st1_len, statement2, st2_mask, st2_len, score),
) in enumerate(t):
# Move tensors to the configured device
statement1 = statement1.to(device)
st1_mask = st1_mask.to(device)
st1_len = st1_len.to(device)
statement2 = statement2.to(device)
st2_mask = st2_mask.to(device)
st2_len = st2_len.to(device)
score = score.to(device)
optimizer.zero_grad()
sim = model(
statement1, st1_mask, st1_len, statement2, st2_mask, st2_len
)
loss = criterion(sim, score)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
# print statistics
running_loss += loss.item()
if i % 10 == 0:
t.set_description("loss: {:.4f}".format(running_loss / 10))
running_loss = 0
logger.info(
f"********Epoch: {epoch+1} *****Loss: {epoch_loss / len(train_loader)}"
)
result_dict[epoch] = dict()
result_dict[epoch]["train_loss"] = epoch_loss / len(train_loader)
scheduler.step(epoch_loss / len(train_loader))
if (epoch + 1) % 1 == 0:
model.eval()
with torch.no_grad():
logger.info("Evaluating on Train set!")
t = tqdm(iter(train_loader), leave=False, total=len(train_loader))
y_pred_list = []
y_real_list = []
for (
i,
(
statement1,
st1_mask,
st1_len,
statement2,
st2_mask,
st2_len,
score,
),
) in enumerate(t):
# Move tensors to the configured device
statement1 = statement1.to(device)
st1_mask = st1_mask.to(device)
st1_len = st1_len.to(device)
statement2 = statement2.to(device)
st2_mask = st2_mask.to(device)
st2_len = st2_len.to(device)
y_real_list.extend(score.cpu().tolist())
score = score.to(device)
sim = model(
statement1, st1_mask, st1_len, statement2, st2_mask, st2_len
)
y_dev_pred = torch.argmax(sim, dim=1)
# y_dev_pred = torch.argmax(sim, dim=1)
y_pred_list.extend(y_dev_pred.cpu().tolist())
f1_value = f1_score(y_real_list, y_pred_list)
(precision, recall, _, _,) = precision_recall_fscore_support(
y_real_list, y_pred_list, average="binary"
)
# logger.info("**** TRAINING SET **** ")
# logger.info(f"F1-value: {f1_value}")
# logger.info(f"Precision: {precision}")
# logger.info(f"Recall: {recall}")
logger.info("Evaluating on Dev set!")
t = tqdm(iter(dev_loader), leave=False, total=len(dev_loader))
y_pred_list = []
y_real_list = []
epoch_test_loss = 0.0
for (
i,
(
statement1,
st1_mask,
st1_len,
statement2,
st2_mask,
st2_len,
score,
),
) in enumerate(t):
statement1 = statement1.to(device)
st1_mask = st1_mask.to(device)
st1_len = st1_len.to(device)
statement2 = statement2.to(device)
st2_mask = st2_mask.to(device)
st2_len = st2_len.to(device)
y_real_list.extend(score.cpu().tolist())
score = score.to(device)
sim = model(
statement1, st1_mask, st2_len, statement2, st2_mask, st2_len
)
loss_test = criterion(sim, score)
epoch_test_loss += loss_test.item()
y_dev_pred = torch.argmax(sim, dim=1)
y_pred_list.extend(y_dev_pred.cpu().tolist())
logger.info(f"DEV LOSS: {epoch_test_loss / len(dev_loader)}")
# scheduler.step(epoch_test_loss / len(dev_loader))
f1_value = f1_score(y_real_list, y_pred_list)
(precision, recall, _, _,) = precision_recall_fscore_support(
y_real_list, y_pred_list, average="binary"
)
# logger.info("**** DEV SET **** ")
# logger.info(f"F1-value: {f1_value}")
# logger.info(f"Precision: {precision.tolist()}")
# logger.info(f"Recall: {recall.tolist()}")
result_dict[epoch]["f1"] = f1_value
result_dict[epoch]["precision"] = precision.tolist()
result_dict[epoch]["recall"] = recall.tolist()
if f1_value > best_value:
best_value = f1_value
model = model.to("cpu")
if multiple_gpus:
torch.save(
model.module.state_dict(), f"./models/{output_model}",
)
else:
torch.save(
model.state_dict(), f"./models/{output_model}",
)
all_best["f1"] = f1_value
all_best["precision"] = precision.tolist()
all_best["recall"] = recall.tolist()
model = model.to(device)
best_model = model
with torch.no_grad():
best_model.eval()
logger.info("Evaluating on Test set!")
all_embeddings = dict()
t = tqdm(iter(test_loader), leave=False, total=len(test_loader))
y_pred_list = []
y_real_list = []
for (
i,
(statement1, st1_mask, st1_len, statement2, st2_mask, st2_len, score),
) in enumerate(t):
# Move tensors to the configured device
statement1 = statement1.to(device)
st1_mask = st1_mask.to(device)
st1_len = st1_len.to(device)
statement2 = statement2.to(device)
st2_mask = st2_mask.to(device)
st2_len = st2_len.to(device)
y_real_list.extend(score.cpu().tolist())
score = score.to(device)
sim = best_model(
statement1, st1_mask, st1_len, statement2, st2_mask, st2_len
)
# y_dev_pred = torch.round(sim)
y_dev_pred = torch.argmax(sim, dim=1)
y_pred_list.extend(y_dev_pred.cpu().tolist())
f1_value = f1_score(y_real_list, y_pred_list)
(precision, recall, _, _,) = precision_recall_fscore_support(
y_real_list, y_pred_list, average="binary"
)
logger.info("****** PARAMETERS ********")
logger.info(f"Num negatives: {num_negatives}")
logger.info(f"Batch_size: {batch_size}")
logger.info(f"Max len: {max_sequence_len}")
logger.info(f"Word embedding: {word_embedding}")
logger.info(f"Out embedding: {out_embedding}")
logger.info(f"Hidden Size: {hidden_size}")
logger.info(f"Decay: {decay}")
logger.info(f"ATT heads: {attention_heads}")
logger.info(f"Learning rate: {learning_rate}")
logger.info("****** BEST RESULTS TEST******")
logger.info(f"F1 SCORE {f1_value}")
logger.info(f"PRECISION: {precision}")
logger.info(f"RECALL: {recall}")
all_best["f1_test"] = f1_value
all_best["precision_test"] = precision.tolist()
all_best["recall_test"] = recall.tolist()
logger.info("******** BEST RESULTS DEV **********")
logger.info(all_best)
with open(f"./logs/{output_log}", "w") as f:
json.dump(result_dict, f)
with open(f"./logs/best_{output_log}", "w") as f:
json.dump(result_dict, f)
|
for index, row in df.iterrows():
if row["score"] == 0:
|
issue_823.rs
|
//! Tests auto-converted from "sass-spec/spec/libsass-closed-issues/issue_823.hrx"
#[allow(unused)]
fn
|
() -> crate::TestRunner {
super::runner()
}
#[test]
#[ignore] // wrong result
fn test() {
assert_eq!(
runner().ok("%test {\
\n > {\
\n .red {\
\n color: #F00;\
\n }\
\n }\
\n}\n\
\np {\
\n @extend %test;\n\
\n > {\
\n a {\
\n @extend %test;\
\n }\
\n }\
\n}\n"),
"p > a > .red, p > .red {\
\n color: #F00;\
\n}\n"
);
}
|
runner
|
api_op_ListChannels.go
|
// Code generated by smithy-go-codegen DO NOT EDIT.
package chimesdkmessaging
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Lists all Channels created under a single Chime App as a paginated list. You can
// specify filters to narrow results. Functionality & restrictions
//
// * Use privacy =
// PUBLIC to retrieve all public channels in the account.
//
// * Only an
// AppInstanceAdmin can set privacy = PRIVATE to list the private channels in an
// account.
//
// The x-amz-chime-bearer request header is mandatory. Use the
// AppInstanceUserArn of the user that makes the API call as the value in the
// header.
func (c *Client) ListChannels(ctx context.Context, params *ListChannelsInput, optFns ...func(*Options)) (*ListChannelsOutput, error) {
if params == nil {
params = &ListChannelsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListChannels", params, optFns, c.addOperationListChannelsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListChannelsOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListChannelsInput struct {
// The ARN of the AppInstance.
//
// This member is required.
AppInstanceArn *string
// The AppInstanceUserArn of the user that makes the API call.
//
// This member is required.
ChimeBearer *string
// The maximum number of channels that you want to return.
MaxResults *int32
// The token passed by previous API calls until all requested channels are
// returned.
NextToken *string
// The privacy setting. PUBLIC retrieves all the public channels. PRIVATE retrieves
// private channels. Only an AppInstanceAdmin can retrieve private channels.
Privacy types.ChannelPrivacy
noSmithyDocumentSerde
}
type ListChannelsOutput struct {
// The information about each channel.
Channels []types.ChannelSummary
// The token returned from previous API requests until the number of channels is
// reached.
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationListChannelsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpListChannels{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListChannels{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpListChannelsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListChannels(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// ListChannelsAPIClient is a client that implements the ListChannels operation.
type ListChannelsAPIClient interface {
ListChannels(context.Context, *ListChannelsInput, ...func(*Options)) (*ListChannelsOutput, error)
}
var _ ListChannelsAPIClient = (*Client)(nil)
// ListChannelsPaginatorOptions is the paginator options for ListChannels
type ListChannelsPaginatorOptions struct {
// The maximum number of channels that you want to return.
Limit int32
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// ListChannelsPaginator is a paginator for ListChannels
type ListChannelsPaginator struct {
options ListChannelsPaginatorOptions
client ListChannelsAPIClient
params *ListChannelsInput
nextToken *string
firstPage bool
}
// NewListChannelsPaginator returns a new ListChannelsPaginator
func NewListChannelsPaginator(client ListChannelsAPIClient, params *ListChannelsInput, optFns ...func(*ListChannelsPaginatorOptions)) *ListChannelsPaginator {
if params == nil {
params = &ListChannelsInput{}
}
options := ListChannelsPaginatorOptions{}
if params.MaxResults != nil {
options.Limit = *params.MaxResults
}
for _, fn := range optFns {
fn(&options)
}
return &ListChannelsPaginator{
options: options,
client: client,
params: params,
firstPage: true,
nextToken: params.NextToken,
}
}
// HasMorePages returns a boolean indicating whether more pages are available
func (p *ListChannelsPaginator) HasMorePages() bool {
return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
}
// NextPage retrieves the next ListChannels page.
func (p *ListChannelsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListChannelsOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
var limit *int32
if p.options.Limit > 0 {
limit = &p.options.Limit
}
params.MaxResults = limit
result, err := p.client.ListChannels(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
|
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken &&
prevToken != nil &&
p.nextToken != nil &&
*prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opListChannels(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "chime",
OperationName: "ListChannels",
}
}
|
prevToken := p.nextToken
|
get_inclusion_states.rs
|
use crate::error::Result;
use bee_crypto::ternary::Hash;
use iota_conversion::Trinary;
use crate::response::{GetInclusionStatesResponse, GetInclusionStatesResponseBuilder};
use crate::Client;
/// Builder to construct getInclusionStates API
#[derive(Debug)]
pub struct GetInclusionStatesBuilder {
transactions: Vec<String>,
}
impl GetInclusionStatesBuilder {
pub(crate) fn new() -> Self
|
/// Add list of transaction hashes for which you want to get the inclusion state
pub fn transactions(mut self, transactions: &[Hash]) -> Self {
self.transactions = transactions
.iter()
.map(|h| h.as_bytes().trytes().unwrap())
.collect();
self
}
/// Send getInclusionStates request
pub async fn send(self) -> Result<GetInclusionStatesResponse> {
let body = json!({
"command": "getInclusionStates",
"transactions": self.transactions,
});
let res: GetInclusionStatesResponseBuilder = response!(body);
res.build().await
}
}
|
{
Self {
transactions: Default::default(),
}
}
|
driver_wrapper.py
|
# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging.config
import os
import screeninfo
from toolium.config_driver import ConfigDriver
from toolium.config_parser import ExtendedConfigParser
from toolium.driver_wrappers_pool import DriverWrappersPool
from toolium.utils.driver_utils import Utils
from toolium.utils.path_utils import get_valid_filename
class DriverWrapper(object):
"""Wrapper with the webdriver and the configuration needed to execute tests
:type driver: selenium.webdriver.remote.webdriver.WebDriver or appium.webdriver.webdriver.WebDriver
:type config: toolium.config_parser.ExtendedConfigParser or configparser.ConfigParser
:type utils: toolium.utils.driver_utils.Utils
:type app_strings: dict
:type session_id: str
:type remote_node: str
:type remote_node_video_enabled: bool
:type logger: logging.Logger
:type config_properties_filenames: str
:type config_log_filename: str
:type output_log_filename: str
:type visual_baseline_directory: str
:type baseline_name: str
"""
driver = None #: webdriver instance
config = ExtendedConfigParser() #: driver configuration
utils = None #: test utils instance
app_strings = None #: mobile application strings
session_id = None #: remote webdriver session id
server_type = None #: remote server type
remote_node = None #: remote grid node
remote_node_video_enabled = False #: True if the remote grid node has the video recorder enabled
logger = None #: logger instance
# Configuration and output files
config_properties_filenames = None #: configuration filenames separated by commas
config_log_filename = None #: configuration log file
output_log_filename = None #: output log file
visual_baseline_directory = None #: folder with the baseline images
baseline_name = None #: baseline name
def __init__(self):
if not DriverWrappersPool.is_empty():
# Copy config object and other properties from default driver
default_wrapper = DriverWrappersPool.get_default_wrapper()
self.config = default_wrapper.config.deepcopy()
self.logger = default_wrapper.logger
self.config_properties_filenames = default_wrapper.config_properties_filenames
self.config_log_filename = default_wrapper.config_log_filename
self.output_log_filename = default_wrapper.output_log_filename
self.visual_baseline_directory = default_wrapper.visual_baseline_directory
self.baseline_name = default_wrapper.baseline_name
# Create utils instance and add wrapper to the pool
self.utils = Utils(self)
DriverWrappersPool.add_wrapper(self)
def configure_logger(self, tc_config_log_filename=None, tc_output_log_filename=None):
"""Configure selenium instance logger
:param tc_config_log_filename: test case specific logging config file
:param tc_output_log_filename: test case specific output logger file
"""
# Get config logger filename
config_log_filename = DriverWrappersPool.get_configured_value('Config_log_filename', tc_config_log_filename,
'logging.conf')
config_log_filename = os.path.join(DriverWrappersPool.config_directory, config_log_filename)
# Configure logger only if logging filename has changed
if self.config_log_filename != config_log_filename:
# Get output logger filename
output_log_filename = DriverWrappersPool.get_configured_value('Output_log_filename', tc_output_log_filename,
'toolium.log')
output_log_filename = os.path.join(DriverWrappersPool.output_directory, output_log_filename)
output_log_filename = output_log_filename.replace('\\', '\\\\')
try:
logging.config.fileConfig(config_log_filename, {'logfilename': output_log_filename}, False)
except Exception as exc:
print("[WARN] Error reading logging config file '{}': {}".format(config_log_filename, exc))
self.config_log_filename = config_log_filename
self.output_log_filename = output_log_filename
self.logger = logging.getLogger(__name__)
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None):
"""Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties
"""
prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames,
'properties.cfg;local-properties.cfg')
prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in
prop_filenames.split(';')]
prop_filenames = ';'.join(prop_filenames)
# Configure config only if properties filename has changed
if self.config_properties_filenames != prop_filenames:
# Initialize the config object
self.config = ExtendedConfigParser.get_config_from_file(prop_filenames)
self.config_properties_filenames = prop_filenames
# Override properties with system properties
self.config.update_properties(os.environ)
# Override properties with behave userdata properties
if behave_properties:
self.config.update_properties(behave_properties)
# Modify config properties before driver creation
self.finalize_properties_configuration()
def finalize_properties_configuration(self):
# Override method if config properties (self.config object) need custom modifications before driver creation
pass
def configure_visual_baseline(self):
"""Configure baseline directory"""
# Get baseline name and translate config variables
baseline_name = self.config.get_optional('VisualTests', 'baseline_name', '{Driver_type}')
baseline_name = self.config.translate_config_variables(baseline_name)
# Configure baseline directory if baseline name has changed
if self.baseline_name != baseline_name:
self.baseline_name = baseline_name
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
get_valid_filename(baseline_name))
def update_visual_baseline(self):
"""Configure baseline directory after driver is created"""
# Update baseline with real platformVersion value
if '{PlatformVersion}' in self.baseline_name:
try:
platform_version = self.driver.desired_capabilities['platformVersion']
except KeyError:
platform_version = None
self.baseline_name = self.baseline_name.replace('{PlatformVersion}', str(platform_version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with real version value
if '{Version}' in self.baseline_name:
try:
splitted_version = self.driver.desired_capabilities['version'].split('.')
version = '.'.join(splitted_version[:2])
except KeyError:
version = None
self.baseline_name = self.baseline_name.replace('{Version}', str(version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with remote node value
if '{RemoteNode}' in self.baseline_name:
self.baseline_name = self.baseline_name.replace('{RemoteNode}', str(self.remote_node))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
def configure(self, tc_config_files, is_selenium_test=True, behave_properties=None):
"""Configure initial selenium instance using logging and properties files for Selenium or Appium tests
:param tc_config_files: test case specific config files
:param is_selenium_test: true if test is a selenium or appium test case
:param behave_properties: dict with behave user data properties
"""
# Configure config and output directories
DriverWrappersPool.configure_common_directories(tc_config_files)
# Configure logger
self.configure_logger(tc_config_files.config_log_filename, tc_config_files.output_log_filename)
# Initialize the config object
self.configure_properties(tc_config_files.config_properties_filenames, behave_properties)
# Configure visual directories
if is_selenium_test:
driver_info = self.config.get('Driver', 'type')
DriverWrappersPool.configure_visual_directories(driver_info)
self.configure_visual_baseline()
def connect(self, maximize=True):
"""Set up the selenium driver and connect to the server
:param maximize: True if the driver should be maximized
:returns: selenium driver
"""
if not self.config.get('Driver', 'type') or self.config.get('Driver', 'type') in ['api', 'no_driver']:
return None
self.driver = ConfigDriver(self.config, self.utils).create_driver()
# Save session id and remote node to download video after the test execution
self.session_id = self.driver.session_id
self.server_type, self.remote_node = self.utils.get_remote_node()
if self.server_type == 'grid':
self.remote_node_video_enabled = self.utils.is_remote_video_enabled(self.remote_node)
else:
self.remote_node_video_enabled = True if self.server_type in ['ggr', 'selenoid'] else False
# Save app_strings in mobile tests
if self.is_mobile_test() and not self.is_web_test() and self.config.getboolean_optional('Driver',
'appium_app_strings'):
self.app_strings = self.driver.app_strings()
if self.is_maximizable():
# Bounds and screen
bounds_x, bounds_y = self.get_config_window_bounds()
self.driver.set_window_position(bounds_x, bounds_y)
self.logger.debug('Window bounds: %s x %s', bounds_x, bounds_y)
# Maximize browser
if maximize:
# Set window size or maximize
window_width = self.config.get_optional('Driver', 'window_width')
window_height = self.config.get_optional('Driver', 'window_height')
if window_width and window_height:
self.driver.set_window_size(window_width, window_height)
else:
self.driver.maximize_window()
# Log window size
window_size = self.utils.get_window_size()
self.logger.debug('Window size: %s x %s', window_size['width'], window_size['height'])
# Update baseline
self.update_visual_baseline()
# Discard previous logcat logs
self.utils.discard_logcat_logs()
# Set implicitly wait timeout
self.utils.set_implicitly_wait()
return self.driver
def get_config_window_bounds(self):
"""Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
"""
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
monitor = screeninfo.get_monitors()[monitor_index]
bounds_x += monitor.x
bounds_y += monitor.y
except NotImplementedError:
self.logger.warning('Current environment doesn\'t support get_monitors')
return bounds_x, bounds_y
def is_android_test(self):
"""Check if actual test must be executed in an Android mobile
:returns: True if test must be executed in an Android mobile
"""
return self.utils.get_driver_name() == 'android'
def is_ios_test(self):
"""Check if actual test must be executed in an iOS mobile
:returns: True if test must be executed in an iOS mobile
"""
return self.utils.get_driver_name() in ('ios', 'iphone')
def is_mobile_test(self):
"""Check if actual test must be executed in a mobile
:returns: True if test must be executed in a mobile
"""
return self.is_android_test() or self.is_ios_test()
def is_web_test(self):
"""Check if actual test must be executed in a browser
:returns: True if test must be executed in a browser
"""
appium_browser_name = self.config.get_optional('AppiumCapabilities', 'browserName')
return not self.is_mobile_test() or appium_browser_name not in (None, '')
def is_android_web_test(self):
"""Check if actual test must be executed in a browser of an Android mobile
:returns: True if test must be executed in a browser of an Android mobile
"""
return self.is_android_test() and self.is_web_test()
def is_ios_web_test(self):
"""Check if actual test must be executed in a browser of an iOS mobile
:returns: True if test must be executed in a browser of an iOS mobile
"""
return self.is_ios_test() and self.is_web_test()
def is_maximizable(self):
"""Check if the browser is maximizable
:returns: True if the browser is maximizable
"""
return not self.is_mobile_test()
def should_reuse_driver(self, scope, test_passed, context=None):
"""Check if the driver should be reused
:param scope: execution scope (function, module, class or session)
:param test_passed: True if the test has passed
:param context: behave context
:returns: True if the driver should be reused
"""
reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver')
reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session')
restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or
self.config.getboolean_optional('Driver', 'restart_driver_fail'))
if context and scope == 'function':
reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags')
and context.reuse_driver_from_tags)
return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session'))
and (test_passed or not restart_driver_after_failure))
def ge
|
elf):
"""
Get driver platform where tests are running
:return: platform name
"""
platform = ''
if 'platform' in self.driver.desired_capabilities:
platform = self.driver.desired_capabilities['platform']
elif 'platformName' in self.driver.desired_capabilities:
platform = self.driver.desired_capabilities['platformName']
return platform
|
t_driver_platform(s
|
test_error_handling.py
|
# Copyright 2016 - Nokia Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.services import workbooks as wb_service
from mistral.services import workflows as wf_service
from mistral.tests.unit.engine import base
from mistral.utils import expression_utils
from mistral.workflow import states
from mistral_lib import actions as actions_base
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
class InvalidUnicodeAction(actions_base.Action):
def run(self, context):
return b'\xf8'
def test(self):
pass
class ErrorHandlingEngineTest(base.EngineTestCase):
def test_invalid_workflow_input(self):
# Check that in case of invalid input workflow objects aren't even
# created.
wf_text = """
version: '2.0'
wf:
input:
- param1
- param2
tasks:
task1:
action: std.noop
"""
wf_service.create_workflows(wf_text)
self.assertRaises(
exc.InputException,
self.engine.start_workflow,
'wf',
'',
{'wrong_param': 'some_value'}
)
self.assertEqual(0, len(db_api.get_workflow_executions()))
self.assertEqual(0, len(db_api.get_task_executions()))
self.assertEqual(0, len(db_api.get_action_executions()))
def test_first_task_error(self):
# Check that in case of an error in first task workflow objects are
# still persisted properly.
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.fail
on-success: task2
task2:
action: std.noop
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertIsNotNone(db_api.get_workflow_execution(wf_ex.id))
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
self._assert_single_item(task_execs, name='task1', state=states.ERROR)
def test_action_error(self):
# Check that state of all workflow objects (workflow executions,
# task executions, action executions) is properly persisted in case
# of action error.
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.fail
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
self._assert_single_item(task_execs, name='task1', state=states.ERROR)
def test_task_error(self):
# Check that state of all workflow objects (workflow executions,
# task executions, action executions) is properly persisted in case
# of an error at task level.
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.noop
publish:
my_var: <% invalid_yaql_function() %>
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
# Now we need to make sure that task is in ERROR state but action
# is in SUCCESS because error occurred in 'publish' clause which
# must not affect action state.
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = self._assert_single_item(
task_execs,
name='task1',
state=states.ERROR
)
action_execs = task_ex.executions
self.assertEqual(1, len(action_execs))
self._assert_single_item(
action_execs,
name='std.noop',
state=states.SUCCESS
)
def test_task_error_with_on_handlers(self):
# Check that state of all workflow objects (workflow executions,
# task executions, action executions) is properly persisted in case
# of an error at task level and this task has on-XXX handlers.
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.noop
publish:
my_var: <% invalid_yaql_function() %>
on-success:
- task2
on-error:
- task3
task2:
description: This task must never run.
action: std.noop
task3:
action: std.noop
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
# Now we need to make sure that task is in ERROR state but action
# is in SUCCESS because error occurred in 'publish' clause which
# must not affect action state.
task_execs = wf_ex.task_executions
# NOTE: task3 must not run because on-error handler triggers
# only on error outcome of an action (or workflow) associated
# with a task.
self.assertEqual(1, len(task_execs))
task_ex = self._assert_single_item(
task_execs,
name='task1',
state=states.ERROR
)
action_execs = task_ex.executions
self.assertEqual(1, len(action_execs))
self._assert_single_item(
action_execs,
name='std.noop',
state=states.SUCCESS
)
def test_workflow_error(self):
# Check that state of all workflow objects (workflow executions,
# task executions, action executions) is properly persisted in case
# of an error at task level.
wf_text = """
version: '2.0'
wf:
output:
my_output: <% $.invalid_yaql_variable %>
tasks:
task1:
action: std.noop
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
# Now we need to make sure that task and action are in SUCCESS
# state because mistake at workflow level (output evaluation)
# must not affect them.
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = self._assert_single_item(
task_execs,
name='task1',
state=states.SUCCESS
)
action_execs = task_ex.executions
self.assertEqual(1, len(action_execs))
self._assert_single_item(
action_execs,
name='std.noop',
state=states.SUCCESS
)
def test_action_error_with_wait_before_policy(self):
# Check that state of all workflow objects (workflow executions,
# task executions, action executions) is properly persisted in case
# of action error and task has 'wait-before' policy. It is an
# implicit test for task continuation because 'wait-before' inserts
# a delay between preparing task execution object and scheduling
# actions. If an error happens during scheduling actions (e.g.
# invalid YAQL in action parameters) then we also need to handle
# this properly, meaning that task and workflow state should go
# into ERROR state.
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.echo output=<% invalid_yaql_function() %>
wait-before: 1
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = self._assert_single_item(
task_execs,
name='task1',
state=states.ERROR
)
action_execs = task_ex.executions
self.assertEqual(0, len(action_execs))
def test_action_error_with_wait_after_policy(self):
# Check that state of all workflow objects (workflow executions,
# task executions, action executions) is properly persisted in case
# of action error and task has 'wait-after' policy. It is an
# implicit test for task completion because 'wait-after' inserts
# a delay between actual task completion and logic that calculates
# next workflow commands. If an error happens while calculating
# next commands (e.g. invalid YAQL in on-XXX clauses) then we also
# need to handle this properly, meaning that task and workflow state
# should go into ERROR state.
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.noop
wait-after: 1
on-success:
- task2: <% invalid_yaql_function() %>
task2:
action: std.noop
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(1, len(task_execs))
task_ex = self._assert_single_item(
task_execs,
name='task1',
state=states.ERROR
)
action_execs = task_ex.executions
self.assertEqual(1, len(action_execs))
self._assert_single_item(
action_execs,
name='std.noop',
state=states.SUCCESS
)
def test_error_message_format_key_error(self):
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.noop
on-success:
- succeed: <% $.invalid_yaql %>
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
state_info = task_ex.state_info
self.assertIsNotNone(state_info)
self.assertLess(state_info.find('error'), state_info.find('data'))
def test_error_message_format_unknown_function(self):
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.noop
publish:
my_var: <% invalid_yaql_function() %>
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
state_info = task_ex.state_info
self.assertIsNotNone(state_info)
self.assertGreater(state_info.find('error='), 0)
self.assertLess(state_info.find('error='), state_info.find('data='))
def test_error_message_format_invalid_on_task_run(self):
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.echo output={{ _.invalid_var }}
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
state_info = task_ex.state_info
self.assertIsNotNone(state_info)
self.assertGreater(state_info.find('error='), 0)
self.assertLess(state_info.find('error='), state_info.find('wf='))
def test_error_message_format_on_task_continue(self):
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.echo output={{ _.invalid_var }}
wait-before: 1
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
state_info = task_ex.state_info
self.assertIsNotNone(state_info)
self.assertGreater(state_info.find('error='), 0)
self.assertLess(state_info.find('error='), state_info.find('wf='))
def test_error_message_format_on_action_complete(self):
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.noop
publish:
my_var: <% invalid_yaql_function() %>
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
state_info = task_ex.state_info
print(state_info)
self.assertIsNotNone(state_info)
self.assertGreater(state_info.find('error='), 0)
self.assertLess(state_info.find('error='), state_info.find('wf='))
def test_error_message_format_complete_task(self):
wf_text = """
version: '2.0'
wf:
tasks:
task1:
action: std.noop
wait-after: 1
on-success:
- task2: <% invalid_yaql_function() %>
task2:
action: std.noop
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
state_info = task_ex.state_info
self.assertIsNotNone(state_info)
self.assertGreater(state_info.find('error='), 0)
self.assertLess(state_info.find('error='), state_info.find('wf='))
def test_error_message_format_on_adhoc_action_error(self):
wb_text = """
version: '2.0'
name: wb
actions:
my_action:
input:
- output
output: <% invalid_yaql_function() %>
base: std.echo
base-input:
output: <% $.output %>
workflows:
wf:
tasks:
task1:
action: my_action output="test"
"""
wb_service.create_workbook_v2(wb_text)
wf_ex = self.engine.start_workflow('wb.wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
state_info = task_ex.state_info
self.assertIsNotNone(state_info)
self.assertGreater(state_info.find('error='), 0)
self.assertLess(state_info.find('error='), state_info.find('action='))
def test_publish_bad_yaql(self):
wf_text = """---
version: '2.0'
wf:
type: direct
input:
- my_dict:
- id: 1
value: 11
tasks:
task1:
action: std.noop
publish:
problem_var: <% $.my_dict.where($.value = 13).id.first() %>
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
action_ex = task_ex.action_executions[0]
self.assertEqual(states.SUCCESS, action_ex.state)
self.assertEqual(states.ERROR, task_ex.state)
self.assertIsNotNone(task_ex.state_info)
self.assertEqual(states.ERROR, wf_ex.state)
def test_publish_bad_jinja(self):
wf_text = """---
version: '2.0'
wf:
type: direct
input:
- my_dict:
- id: 1
value: 11
tasks:
task1:
action: std.noop
publish:
problem_var: '{{ (_.my_dict|some_invalid_filter).id }}'
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = wf_ex.task_executions[0]
action_ex = task_ex.action_executions[0]
self.assertEqual(states.SUCCESS, action_ex.state)
self.assertEqual(states.ERROR, task_ex.state)
self.assertIsNotNone(task_ex.state_info)
self.assertEqual(states.ERROR, wf_ex.state)
def test_invalid_task_input(self):
|
def test_invalid_action_result(self):
self.register_action_class(
'test.invalid_unicode_action',
InvalidUnicodeAction
)
wf_text = """---
version: '2.0'
wf:
tasks:
task1:
action: test.invalid_unicode_action
on-success: task2
task2:
action: std.noop
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(1, len(wf_ex.task_executions))
task_ex = wf_ex.task_executions[0]
self.assertIn("UnicodeDecodeError: utf", wf_ex.state_info)
self.assertIn("UnicodeDecodeError: utf", task_ex.state_info)
@mock.patch(
'mistral.utils.expression_utils.get_yaql_context',
mock.MagicMock(
side_effect=[
db_exc.DBDeadlock(), # Emulating DB deadlock
expression_utils.get_yaql_context({}) # Successful run
]
)
)
def test_db_error_in_yaql_expression(self):
# This test just checks that the workflow completes successfully
# even if a DB deadlock occurs during YAQL expression evaluation.
# The engine in this case should should just retry the transactional
# method.
wf_text = """---
version: '2.0'
wf:
tasks:
task1:
action: std.echo output="Hello"
publish:
my_var: <% 1 + 1 %>
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(1, len(wf_ex.task_executions))
task_ex = wf_ex.task_executions[0]
self.assertDictEqual({'my_var': 2}, task_ex.published)
@mock.patch(
'mistral.utils.expression_utils.get_jinja_context',
mock.MagicMock(
side_effect=[
db_exc.DBDeadlock(), # Emulating DB deadlock
expression_utils.get_jinja_context({}) # Successful run
]
)
)
def test_db_error_in_jinja_expression(self):
# This test just checks that the workflow completes successfully
# even if a DB deadlock occurs during Jinja expression evaluation.
# The engine in this case should should just retry the transactional
# method.
wf_text = """---
version: '2.0'
wf:
tasks:
task1:
action: std.echo output="Hello"
publish:
my_var: "{{ 1 + 1 }}"
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(1, len(wf_ex.task_executions))
task_ex = wf_ex.task_executions[0]
self.assertDictEqual({'my_var': 2}, task_ex.published)
|
wf_text = """---
version: '2.0'
wf:
tasks:
task1:
action: std.noop
on-success: task2
task2:
action: std.echo output=<% $.non_existing_function_AAA() %>
"""
wf_service.create_workflows(wf_text)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
tasks = wf_ex.task_executions
self.assertEqual(2, len(tasks))
self._assert_single_item(tasks, name='task1', state=states.SUCCESS)
t2 = self._assert_single_item(tasks, name='task2', state=states.ERROR)
self.assertIsNotNone(t2.state_info)
self.assertIn('Can not evaluate YAQL expression', t2.state_info)
self.assertIsNotNone(wf_ex.state_info)
self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info)
|
main.py
|
import rstools
from tkinter import *
from functools import partial
class mainWindow:
def __init__(self,master) -> None:
self.master = master
self.constraint = IntVar()
self.constring = []
Label(self.master , text="Revised Simplex Method", font=("Arial",25)).pack()
Label(self.master,text="select number of constraint").pack()
Scale(self.master,variable=self.constraint,from_=2,to=4,orient=HORIZONTAL).pack()
Button(self.master,text="Submit",command=self.next).pack()
def next(self):
level1 = Toplevel()
level1.geometry("400x300")
a = self.constraint.get()
yy1 = 5
for i in range(a+1):
if i==0:
l1 = Label(level1,text="Z")
l1.place(x=120,y=yy1)
else:
l2 = Label(level1,text="Constraint"+str(i))
|
l2.place(x=70,y=yy1)
yy1+=20
yy = 5
for i in range(a+1):
va = StringVar()
e = Entry(level1,textvariable=va)
e.place(x=135,y=yy)
self.constring.append(va)
yy+=20
finalanswer = partial(self.finalanswer,level1)
Button(level1,text="calculate",command=finalanswer).place(x=225,y=20*(a+2))
level1.mainloop()
def finalanswer(self,level1):
Decodedstring = []
for i in self.constring:
Decodedstring.append(i.get())
a = len(Decodedstring)
cj = rstools.optimizationFunction(Decodedstring[0])
A = []
b = []
for i in range(1,a):
A.append(rstools.constraintsFunction(Decodedstring[i])[:-1])
b.append([rstools.constraintsFunction(Decodedstring[i])[-1]])
cb = [[0]*(a-1)]
cb = rstools.transpose(cb)
B = rstools.B(a-1)
print(A,B,b,cb,cj)
fans = rstools.answer(A,B,b,cb,cj)
fans0 = fans[0]
fans1 = fans[1]
yy = 150
a = rstools.variables(Decodedstring[0])
for i in range(len(fans0)):
Label(level1,text=a[fans1[i]]+" ="+str(fans0[i][0])).place(x=200,y=yy)
yy+=20
if __name__ == "__main__":
app = Tk()
app.title("Linear Programming Problem")
app.geometry("500x400")
win = mainWindow(app)
app.mainloop()
| |
Signal.go
|
package jetsongpio
//Signal is used to set signals and for returned signals. Flags can be set using its methods
type Signal int
//CreateSignal creates an unidentified signal. It is just a convenence function.
//If wanting to set a wanting to create a signal flag then use flg:=CreateSignal()
//These flags are not thread safe. So don't pass pointers of them.
func CreateSignal() Signal
|
func sig() Signal {
return Signal(0)
}
//LOW sets signal to LOW and returns LOW.
//
//LOW is used for general GPIO uses like checking if a pin is LOW or setting a pin LOW
func (s *Signal) LOW() Signal {
*s = 1
return *s
}
//HIGH sets signal to HIGH and returns HIGH.
//
//HIGH is used for general GPIO uses like checking if a pin is HIGH or setting a pin HIGH
func (s *Signal) HIGH() Signal {
*s = 2
return *s
}
//RISING sets signal to RISING and returns RISING.
//
//RISING is used for interupts or polling
func (s *Signal) RISING() Signal {
*s = 3
return *s
}
//FALLING sets signal to FALLING and returns FALLING.
//
//FALLING is used for interupts or polling
func (s *Signal) FALLING() Signal {
*s = 4
return *s
}
//NONE sets signal to NONE and returns NONE.
//
//NONE is used for interupts or polling
func (s *Signal) NONE() Signal {
*s = 5
return *s
}
//BOTH sets signal to BOTH and returns BOTH.
//
//BOTH is used for interupts or polling
func (s *Signal) BOTH() Signal {
*s = 6
return *s
}
func (s Signal) String() string {
flg := s
switch s {
case flg.HIGH():
return "HIGH"
case flg.LOW():
return "LOW"
case flg.RISING():
return "RISING"
case flg.FALLING():
return "FALLING"
case flg.NONE():
return "NONE"
case flg.BOTH():
return "BOTH"
default:
return "Undefined"
}
}
|
{
return Signal(0)
}
|
interface.ts
|
import { BaseObjectLikeTypeImpl, BaseTypeImpl, createType, TypedPropertyInformation } from '../base-type.js';
import type {
LiteralValue,
MergeIntersection,
MessageDetails,
Properties,
PropertiesInfo,
Result,
Type,
TypeImpl,
TypeOfProperties,
ValidationOptions,
Visitor,
Writable,
} from '../interfaces.js';
import { decodeOptionalName, defaultObjectRep, define, extensionName, hasOwnProperty, prependPathToDetails } from '../utils/index.js';
import { LiteralType } from './literal.js';
import { unknownRecord } from './unknown.js';
/**
* Options for {@link object}.
*/
export interface InterfaceTypeOptions {
/** The optional name for the type, uses a default TypeScript-like name if no name is given. */
name?: string;
/** Mark all properties as optional in this type. */
partial?: boolean;
/** Discriminate between missing keys and undefined values. Is off by default because that is what TypeScript does. */
strictMissingKeys?: boolean;
/**
* Force this type (including all nested property-types recursively) to be validated in 'check' mode.
*
* @remarks
* The result of the validation (when successful) will be the original input.
*
* Note: Any autoCast or parser on nested types will have no effect in 'check' mode.
*/
checkOnly?: boolean;
// /** When constructing values, allow unknown properties to pass unvalidated into the constructed value. */
// TODO: allowUnknownProperties?: boolean; // default: false
}
/**
* The implementation behind types created with {@link object} and {@link partial}.
*/
export class InterfaceType<Props extends Properties, ResultType>
extends BaseObjectLikeTypeImpl<ResultType>
implements TypedPropertyInformation<Props>
{
readonly name: string;
readonly basicType!: 'object';
readonly isDefaultName: boolean;
readonly typeConfig: undefined;
constructor(readonly props: Props, readonly options: InterfaceTypeOptions) {
super();
this.isDefaultName = !options.name;
this.name = options.name || defaultObjectRep(this.propsInfo);
}
/** The keys (property-names) for this object-like type. */
readonly keys = Object.keys(this.props) as Array<keyof Props>;
readonly propsInfo = toPropsInfo(this.props, this.options.partial);
readonly possibleDiscriminators = this.options.partial ? [] : getPossibleDiscriminators(this.props);
protected typeValidator(input: unknown, options: ValidationOptions): Result<ResultType> {
if (this.options.checkOnly) {
// can copy here, because this is done after adding the 'visitedMap'
options = { ...options, mode: 'check' };
}
if (!unknownRecord.is(input)) {
return this.createResult(input, undefined, { kind: 'invalid basic type', expected: 'object' });
}
const { strictMissingKeys, partial } = this.options;
const constructResult = {} as Record<string, unknown>;
const details: MessageDetails[] = [];
for (const [key, innerType] of Object.entries(this.props)) {
const missingKey = !hasOwnProperty(input, key);
if (partial) {
if (missingKey || (!strictMissingKeys && input[key] === undefined)) {
continue;
}
} else if (missingKey && strictMissingKeys) {
details.push(missingProperty(key, innerType));
continue;
}
const innerResult = innerType.validate(input[key], options);
if (innerResult.ok) {
constructResult[key] = innerResult.value;
} else if (missingKey) {
details.push(missingProperty(key, innerType));
} else {
details.push(...prependPathToDetails(innerResult, key));
}
}
return this.createResult(input, options.mode === 'construct' ? constructResult : input, details);
}
/** Clone this type with all properties marked optional. */
toPartial(name = `Partial<${this.name}>`): PartialType<Props> {
return createType(new InterfaceType(this.props, { ...this.options, partial: true, name }));
}
/** Create a type with all properties of the current type, plus the given optional properties. */
withOptional<PartialProps extends Properties>(
...args: [props: PartialProps] | [name: string, props: PartialProps]
): TypeImpl<BaseObjectLikeTypeImpl<MergeIntersection<ResultType & Partial<TypeOfProperties<Writable<PartialProps>>>>>> &
TypedPropertyInformation<Props & PartialProps> {
const [name = this.isDefaultName ? undefined : this.name, props] = decodeOptionalName<[PartialProps]>(args);
const newType = this.and(partial(props));
return name ? newType.withName(name) : newType;
}
accept<R>(visitor: Visitor<R>): R {
return visitor.visitObjectLikeType(this);
}
}
define(InterfaceType, 'basicType', 'object');
// Defined outside class definition, because TypeScript somehow ends up in a wild-typings-goose-chase that takes
// up to a minute or more. We have to make sure consuming libs don't have to pay this penalty ever.
define(InterfaceType, 'createAutoCastAllType', function (this: InterfaceType<Properties, any>) {
const name = extensionName(this, 'autoCastAll');
const props: Properties = {};
for (const [key, value] of Object.entries(this.props)) {
props[key] = value.autoCastAll;
}
return createType(new InterfaceType(props, { ...this.options, name }).autoCast);
});
function
|
(property: string, type: BaseTypeImpl<unknown>): MessageDetails {
return { kind: 'missing property', property, type };
}
export type FullType<Props extends Properties> = TypeImpl<InterfaceType<Props, TypeOfProperties<Writable<Props>>>>;
/**
* Create a type-validator that validates (or parses) an object structure.
*
* @remarks
* This is a basic building block for more complex structured types, can be nested.
*
* @param args - the options and properties of the new type
*/
export function object<Props extends Properties>(
...args: [props: Props] | [name: string, props: Props] | [options: InterfaceTypeOptions, props: Props]
): FullType<Props> {
const [options, props] = getOptions(args);
return createType(new InterfaceType(props, options));
}
export type PartialType<Props extends Properties> = TypeImpl<InterfaceType<Props, Partial<TypeOfProperties<Writable<Props>>>>>;
/**
* Create a type-validator that validates (or parses) an object structure with only optional properties.
*
* @remarks
* This is a basic building block for more complex structured types, can be nested.
*
* @param args - the optional name and (required) properties of the new type
*/
export function partial<Props extends Properties>(...args: [props: Props] | [name: string, props: Props]): PartialType<Props> {
const [options, props] = getOptions(args);
return createType(new InterfaceType(props, { ...options, partial: true }));
}
function getOptions<Props extends Properties>(
args: [props: Props] | [name: string, props: Props] | [options: InterfaceTypeOptions, props: Props],
): [InterfaceTypeOptions, Props] {
if (args.length === 1) {
return [{}, args[0]];
}
const [options, props] = args;
return typeof options === 'string' ? [{ name: options }, props] : [options, props];
}
function toPropsInfo<Props extends Properties>(props: Props, partial = false): PropertiesInfo<Props> {
const result = {} as PropertiesInfo;
for (const [key, type] of Object.entries(props)) {
result[key] = { partial, type };
}
return result as PropertiesInfo<Props>;
}
function getPossibleDiscriminators(props: Record<string, Type<unknown> | BaseObjectLikeTypeImpl<unknown> | LiteralType<LiteralValue>>) {
const result: BaseObjectLikeTypeImpl<unknown>['possibleDiscriminators'] = [];
for (const [key, prop] of Object.entries(props)) {
if ('possibleDiscriminators' in prop) {
result.push(...prop.possibleDiscriminators.map(({ path, values }) => ({ path: [key, ...path], values })));
} else if (prop instanceof LiteralType) {
result.push({ path: [key], values: [prop.value] });
}
}
return result;
}
|
missingProperty
|
gp_mpc_hexa.py
|
"""Model Predictive Control with a Gaussian Process model.
Based on:
* L. Hewing, J. Kabzan and M. N. Zeilinger, "Cautious Model Predictive Control Using Gaussian Process Regression,"
in IEEE Transactions on Control Systems Technology, vol. 28, no. 6, pp. 2736-2743, Nov. 2020, doi: 10.1109/TCST.2019.2949757.
Implementation details:
1. The previous time step MPC solution is used to compute the set constraints and GP dynamics rollout.
Here, the dynamics are rolled out using the Mean Equivelence method, the fastest, but least accurate.
2. The GP is approximated using the Fully Independent Training Conditional (FITC) outlined in
* J. Quinonero-Candela, C. E. Rasmussen, and R. Herbrich, “A unifying view of sparse approximate Gaussian process regression,”
Journal of Machine Learning Research, vol. 6, pp. 1935–1959, 2005.
https://www.jmlr.org/papers/volume6/quinonero-candela05a/quinonero-candela05a.pdf
* E. Snelson and Z. Ghahramani, “Sparse gaussian processes using pseudo-inputs,” in Advances in Neural Information Processing
Systems, Y. Weiss, B. Scholkopf, and J. C. Platt, Eds., 2006, pp. 1257–1264.
and the inducing points are the previous MPC solution.
3. Each dimension of the learned error dynamics is an independent Zero Mean SE Kernel GP.
"""
import scipy
import numpy as np
import casadi as cs
import time
import torch
import gpytorch
from copy import deepcopy
from skopt.sampler import Lhs
from functools import partial
from sklearn.model_selection import train_test_split
from safe_control_gym.controllers.mpc.linear_mpc import LinearMPC, MPC
from safe_control_gym.controllers.mpc.mpc_utils import discretize_linear_system
from safe_control_gym.controllers.mpc.gp_utils import GaussianProcessCollection, ZeroMeanIndependentGPModel, covSEard
from safe_control_gym.envs.benchmark_env import Task
class GPMPC(MPC):
"""MPC with Gaussian Process as dynamics residual.
"""
def __init__(
self,
env_func,
seed: int = 1337,
horizon: int = 5,
q_mpc: list = [1],
r_mpc: list = [1],
additional_constraints: list = None,
use_prev_start: bool = True,
train_iterations: int = 800,
validation_iterations: int = 200,
optimization_iterations: list = None,
learning_rate: list = None,
normalize_training_data: bool = False,
use_gpu: bool = False,
gp_model_path: str = None,
prob: float = 0.955,
initial_rollout_std: float = 0.005,
input_mask: list = None,
target_mask: list = None,
gp_approx: str = 'mean_eq',
sparse_gp: bool = False,
online_learning: bool = False,
inertial_prop: list = [1.0],
prior_param_coeff: float = 1.0,
output_dir: str = "results/temp",
**kwargs
):
"""Initialize GP-MPC.
Args:
env_func (gym.Env): functionalized initialization of the environment.
seed (int): random seed.
horizon (int): MPC planning horizon.
Q, R (np.array): cost weight matrix.
use_prev_start (bool): Warmstart mpc with the previous solution.
train_iterations (int): the number of training examples to use for each dimension of the GP.
validation_iterations (int): the number of points to use use for the test set during training.
optimization_iterations (list): the number of optimization iterations for each dimension of the GP.
learning_rate (list): the learning rate for training each dimension of the GP.
normalize_training_data (bool): Normalize the training data.
use_gpu (bool): use GPU while training the gp.
gp_model_path (str): path to a pretrained GP model. If None, will train a new one.
output_dir (str): directory to store model and results.
prob (float): desired probabilistic safety level.
initial_rollout_std (float): the initial std (across all states) for the mean_eq rollout.
inertial_prop (list): to initialize the inertial properties of the prior model.
prior_param_coeff (float): constant multiplying factor to adjust the prior model intertial properties.
input_mask (list): list of which input dimensions to use in GP model. If None, all are used.
target_mask (list): list of which output dimensions to use in the GP model. If None, all are used.
gp_approx (str): 'mean_eq' used mean equivalence rollout for the GP dynamics. Only one that works currently.
online_learning (bool): if true, GP kernel values will be updated using past trajectory values.
additional_constraints (list): list of Constraint objects defining additional constraints to be used.
"""
print("############################################### GP-MPC hexa ###########################################")
self.prior_env_func = partial(env_func,
inertial_prop=np.array(inertial_prop)*prior_param_coeff)
self.prior_param_coeff = prior_param_coeff
# Initialize the method using linear MPC.
self.prior_ctrl = LinearMPC(
self.prior_env_func,
horizon=horizon,
q_mpc=q_mpc,
r_mpc=r_mpc,
use_prev_start=use_prev_start,
output_dir=output_dir,
additional_constraints=additional_constraints,
)
self.prior_ctrl.reset()
super().__init__(
self.prior_env_func,
horizon=horizon,
q_mpc=q_mpc,
r_mpc=r_mpc,
use_prev_start=use_prev_start,
output_dir=output_dir,
additional_constraints=additional_constraints,
**kwargs)
# Setup environments.
self.env_func = env_func
self.env = env_func(randomized_init=False)
self.env_training = env_func(randomized_init=True)
# No training data accumulated yet so keep the dynamics function as linear prior.
self.train_data = None
self.prior_dynamics_func = self.prior_ctrl.linear_dynamics_func
# GP and training parameters.
self.gaussian_process = None
self.train_iterations = train_iterations
self.validation_iterations = validation_iterations
self.optimization_iterations = optimization_iterations
self.learning_rate = learning_rate
self.gp_model_path = gp_model_path
self.normalize_training_data = normalize_training_data
self.use_gpu = use_gpu
self.seed = seed
self.prob = prob
self.sparse_gp = sparse_gp
if input_mask is None:
self.input_mask = np.arange(self.model.nx + self.model.nu).tolist()
else:
self.input_mask = input_mask
if target_mask is None:
self.target_mask = np.arange(self.model.nx).tolist()
else:
self.target_mask = target_mask
Bd = np.eye(self.model.nx)
self.Bd = Bd[:, self.target_mask]
self.gp_approx = gp_approx
self.online_learning = online_learning
self.last_obs = None
self.last_action = None
self.initial_rollout_std = initial_rollout_std
def setup_prior_dynamics(self):
"""Computes
|
t_gp_dynamics_func(self):
"""Updates symbolic dynamics.
With actual control frequency, initialize GP model and add to the combined dynamics.
"""
self.setup_prior_dynamics()
# Compute the probabilistic constraint inverse CDF according to section III.D.b in Hewing 2019.
self.inverse_cdf = scipy.stats.norm.ppf(1 - (1/self.model.nx - (self.prob + 1)/(2*self.model.nx)))
self.create_sparse_GP_machinery()
def create_sparse_GP_machinery(self):
"""This setups the gaussian process approximations for FITC formulation.
"""
lengthscales, signal_var, noise_var, gp_K_plus_noise = self.gaussian_process.get_hyperparameters(as_numpy=True)
self.length_scales = lengthscales.squeeze()
self.signal_var = signal_var.squeeze()
self.noise_var = noise_var.squeeze()
self.gp_K_plus_noise = gp_K_plus_noise
Nx = len(self.input_mask)
Ny = len(self.target_mask)
N = self.gaussian_process.n_training_samples
# Create CasADI function for computing the kernel K_z_zind with parameters for z, z_ind, length scales and signal variance.
# We need the CasADI version of this so that it can by symbolically differentiated in in the MPC optimization.
z1 = cs.SX.sym('z1', Nx)
z2 = cs.SX.sym('z2', Nx)
ell_s = cs.SX.sym('ell', Nx)
sf2_s = cs.SX.sym('sf2')
z_ind = cs.SX.sym('z_ind', self.T, Nx)
covSE = cs.Function('covSE', [z1, z2, ell_s, sf2_s],
[covSEard(z1, z2, ell_s, sf2_s)])
ks = cs.SX.zeros(1, self.T)
for i in range(self.T):
ks[i] = covSE(z1, z_ind[i, :], ell_s, sf2_s)
ks_func = cs.Function('K_s', [z1, z_ind, ell_s, sf2_s], [ks])
K_z_zind = cs.SX.zeros(Ny, self.T)
for i in range(Ny):
K_z_zind[i,:] = ks_func(z1, z_ind, self.length_scales[i,:], self.signal_var[i])
# This will be mulitplied by the mean_post_factor computed at every time step to compute the approximate mean.
self.K_z_zind_func = cs.Function('K_z_zind', [z1, z_ind],[K_z_zind],['z1', 'z2'],['K'])
def preprocess_training_data(self,
x_seq,
u_seq,
x_next_seq
):
"""Converts trajectory data for GP trianing.
Args:
x_seq (list): state sequence of np.array (nx,).
u_seq (list): action sequence of np.array (nu,).
x_next_seq (list): next state sequence of np.array (nx,).
Returns:
np.array: inputs for GP training, (N, nx+nu).
np.array: targets for GP training, (N, nx).
"""
# Get the predicted dynamics. This is a linear prior, thus we need to account for the fact that
# it is linearized about an eq using self.X_GOAL and self.U_GOAL.
x_pred_seq = self.prior_dynamics_func(x0=x_seq.T - self.prior_ctrl.X_LIN[:, None],
p=u_seq.T - self.prior_ctrl.U_LIN[:,None])['xf'].toarray()
targets = (x_next_seq.T - (x_pred_seq+self.prior_ctrl.X_LIN[:,None])).transpose() # (N, nx).
inputs = np.hstack([x_seq, u_seq]) # (N, nx+nu).
return inputs, targets
def precompute_probabilistic_limits(self,
print_sets=True
):
"""This updates the constraint value limits to account for the uncertainty in the dynamics rollout.
Args:
print_sets (bool): True to print out the sets for debugging purposes.
"""
nx, nu = self.model.nx, self.model.nu
T = self.T
state_covariances = np.zeros((self.T+1, nx, nx))
input_covariances = np.zeros((self.T, nu, nu))
# Initilize lists for the tightening of each constraint.
state_constraint_set = []
for state_constraint in self.constraints.state_constraints:
state_constraint_set.append(np.zeros((state_constraint.num_constraints, T+1)))
input_constraint_set = []
for input_constraint in self.constraints.input_constraints:
input_constraint_set.append(np.zeros((input_constraint.num_constraints, T)))
if self.x_prev is not None and self.u_prev is not None:
cov_x = np.diag([self.initial_rollout_std**2]*nx)
for i in range(T):
state_covariances[i] = cov_x
cov_u = self.lqr_gain @ cov_x @ self.lqr_gain.T
input_covariances[i] = cov_u
cov_xu = cov_x @ self.lqr_gain.T
z = np.hstack((self.x_prev[:,i], self.u_prev[:,i]))
if self.gp_approx == 'taylor':
raise NotImplementedError("Taylor GP approximation is currently not working.")
elif self.gp_approx == 'mean_eq':
_, cov_d_tensor = self.gaussian_process.predict(z[None,:], return_pred=False)
cov_d = cov_d_tensor.detach().numpy()
else:
raise NotImplementedError('gp_approx method is incorrect or not implemented')
# Loop through input constraints and tighten by the required ammount.
for ui, input_constraint in enumerate(self.constraints.input_constraints):
input_constraint_set[ui][:, i] = -1*self.inverse_cdf * \
np.absolute(input_constraint.A) @ np.sqrt(np.diag(cov_u))
for si, state_constraint in enumerate(self.constraints.state_constraints):
state_constraint_set[si][:, i] = -1*self.inverse_cdf * \
np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))
if self.gp_approx == 'taylor':
raise NotImplementedError("Taylor GP rollout not implemented.")
elif self.gp_approx == 'mean_eq':
# Compute the next step propogated state covariance using mean equivilence.
cov_x = self.discrete_dfdx @ cov_x @ self.discrete_dfdx.T + \
self.discrete_dfdx @ cov_xu @ self.discrete_dfdu.T + \
self.discrete_dfdu @ cov_xu.T @ self.discrete_dfdx.T + \
self.discrete_dfdu @ cov_u @ self.discrete_dfdu.T + \
self.Bd @ cov_d @ self.Bd.T
else:
raise NotImplementedError('gp_approx method is incorrect or not implemented')
# Udate Final covariance.
for si, state_constraint in enumerate(self.constraints.state_constraints):
state_constraint_set[si][:,-1] = -1 * self.inverse_cdf * \
np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))
state_covariances[-1] = cov_x
if print_sets:
print("Probabilistic State Constraint values along Horizon:")
print(state_constraint_set)
print("Probabilistic Input Constraint values along Horizon:")
print(input_constraint_set)
self.results_dict['input_constraint_set'].append(input_constraint_set)
self.results_dict['state_constraint_set'].append(state_constraint_set)
self.results_dict['state_horizon_cov'].append(state_covariances)
self.results_dict['input_horizon_cov'].append(input_covariances)
return state_constraint_set, input_constraint_set
def precompute_sparse_gp_values(self):
"""Uses the last MPC solution to precomupte values associated with the FITC GP approximation.
"""
n_data_points = self.gaussian_process.n_training_samples
dim_gp_inputs = len(self.input_mask)
dim_gp_outputs = len(self.target_mask)
inputs = self.train_data['train_inputs']
targets = self.train_data['train_targets']
# Get the inducing points.
if self.x_prev is not None and self.u_prev is not None:
# Use the previous MPC solution as in Hewing 2019.
z_ind = np.hstack((self.x_prev[:,:-1].T, self.u_prev.T))
z_ind = z_ind[:,self.input_mask]
else:
# If there is no previous solution. Choose T random training set points.
inds = self.env.np_random.choice(range(n_data_points), size=self.T)
#z_ind = self.data_inputs[inds][:, self.input_mask]
z_ind = inputs[inds][:, self.input_mask]
K_zind_zind = self.gaussian_process.kernel(torch.Tensor(z_ind).double())
K_zind_zind_inv = self.gaussian_process.kernel_inv(torch.Tensor(z_ind).double())
K_x_zind = self.gaussian_process.kernel(torch.from_numpy(inputs[:, self.input_mask]).double(),
torch.Tensor(z_ind).double())
Q_X_X = K_x_zind @ K_zind_zind_inv @ K_x_zind.transpose(1,2)
Gamma = torch.diagonal(self.gaussian_process.K_plus_noise + Q_X_X, 0, 1, 2)
Gamma_inv = torch.diag_embed(1/Gamma)
Sigma = torch.pinverse(K_zind_zind + K_x_zind.transpose(1,2) @ Gamma_inv @ K_x_zind)
mean_post_factor = torch.zeros((dim_gp_outputs, self.T))
for i in range(dim_gp_outputs):
mean_post_factor[i] = Sigma[i] @ K_x_zind[i].T @ Gamma_inv[i] @ \
torch.from_numpy(targets[:,self.target_mask[i]]).double()
return mean_post_factor.detach().numpy(), Sigma.detach().numpy(), K_zind_zind_inv.detach().numpy(), z_ind
def setup_gp_optimizer(self):
"""Sets up nonlinear optimization problem including cost objective, variable bounds and dynamics constraints.
"""
nx, nu = self.model.nx, self.model.nu
T = self.T
# Define optimizer and variables.
opti = cs.Opti()
# States.
x_var = opti.variable(nx, T + 1)
# Inputs.
u_var = opti.variable(nu, T)
# Initial state.
x_init = opti.parameter(nx, 1)
# Reference (equilibrium point or trajectory, last step for terminal cost).
x_ref = opti.parameter(nx, T + 1)
# Chance constraint limits.
state_constraint_set = []
for state_constraint in self.constraints.state_constraints:
state_constraint_set.append(opti.parameter(state_constraint.num_constraints, T+1))
input_constraint_set = []
for input_constraint in self.constraints.input_constraints:
input_constraint_set.append(opti.parameter(input_constraint.num_constraints, T))
# Sparse GP mean postfactor matrix.
mean_post_factor = opti.parameter(len(self.target_mask), T)
# Sparse GP inducing points.
z_ind = opti.parameter(T, len(self.input_mask))
# Cost (cumulative).
cost = 0
cost_func = self.model.loss
for i in range(T):
cost += cost_func(x=x_var[:, i],
u=u_var[:, i],
Xr=x_ref[:, i],
Ur=np.zeros((nu, 1)),
Q=self.Q,
R=self.R)["l"]
# Terminal cost.
cost += cost_func(x=x_var[:, -1],
u=np.zeros((nu, 1)),
Xr=x_ref[:, -1],
Ur=np.zeros((nu, 1)),
Q=self.Q,
R=self.R)["l"]
opti.minimize(cost)
z = cs.vertcat(x_var[:,:-1], u_var)
z = z[self.input_mask,:]
for i in range(self.T):
# Dynamics constraints using the dynamics of the prior and the mean of the GP.
# This follows the tractable dynamics formulation in Section III.B in Hewing 2019.
# Note that for the GP approximation, we are purposely using elementwise multiplication *.
if self.sparse_gp:
next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],
p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \
self.prior_ctrl.X_LIN[:,None]+ self.Bd @ cs.sum2(self.K_z_zind_func(z1=z[:,i].T, z2=z_ind)['K'] * mean_post_factor)
else:
# Sparse GP approximation doesn't always work well, thus, use Exact GP regression. This is much slower,
# but for unstable systems, make performance much better.
next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],
p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \
self.prior_ctrl.X_LIN[:,None]+ self.Bd @ self.gaussian_process.casadi_predict(z=z[:,i])['mean']
opti.subject_to(x_var[:, i + 1] == next_state)
# Probabilistic state and input constraints according to Hewing 2019 constraint tightening.
for s_i, state_constraint in enumerate(self.state_constraints_sym):
opti.subject_to(state_constraint(x_var[:, i]) <= state_constraint_set[s_i][:,i])
for u_i, input_constraint in enumerate(self.input_constraints_sym):
opti.subject_to(input_constraint(u_var[:, i]) <= input_constraint_set[u_i][:,i])
# Final state constraints.
for s_i, state_constraint in enumerate(self.state_constraints_sym):
opti.subject_to(state_constraint(x_var[:, -1]) <= state_constraint_set[s_i][:,-1])
# Initial condition constraints.
opti.subject_to(x_var[:, 0] == x_init)
# Create solver (IPOPT solver in this version).
opts = {"ipopt.print_level": 4,
"ipopt.sb": "yes",
"ipopt.max_iter": 100, #100,
"print_time": 1}
opti.solver('ipopt', opts)
self.opti_dict = {
"opti": opti,
"x_var": x_var,
"u_var": u_var,
"x_init": x_init,
"x_ref": x_ref,
"state_constraint_set": state_constraint_set,
"input_constraint_set": input_constraint_set,
"mean_post_factor": mean_post_factor,
"z_ind": z_ind,
"cost": cost
}
def select_action_with_gp(self,
obs
):
"""Solves nonlinear MPC problem to get next action.
Args:
obs (np.array): current state/observation.
Returns:
np.array: input/action to the task/env.
"""
opti_dict = self.opti_dict
opti = opti_dict["opti"]
x_var = opti_dict["x_var"]
u_var = opti_dict["u_var"]
x_init = opti_dict["x_init"]
x_ref = opti_dict["x_ref"]
state_constraint_set = opti_dict["state_constraint_set"]
input_constraint_set = opti_dict["input_constraint_set"]
mean_post_factor = opti_dict["mean_post_factor"]
z_ind = opti_dict["z_ind"]
cost = opti_dict["cost"]
# Assign the initial state.
opti.set_value(x_init, obs)
# Assign reference trajectory within horizon.
goal_states = self.get_references()
opti.set_value(x_ref, goal_states)
if self.mode == "tracking":
self.traj_step += 1
# Set the probabilistic state and input constraint set limits.
state_constraint_set_prev, input_constraint_set_prev = self.precompute_probabilistic_limits()
for si in range(len(self.constraints.state_constraints)):
opti.set_value(state_constraint_set[si], state_constraint_set_prev[si])
for ui in range(len(self.constraints.input_constraints)):
opti.set_value(input_constraint_set[ui], input_constraint_set_prev[ui])
mean_post_factor_val, Sigma, K_zind_zind_inv, z_ind_val = self.precompute_sparse_gp_values()
opti.set_value(mean_post_factor, mean_post_factor_val)
opti.set_value(z_ind, z_ind_val)
# Initial guess for the optimization problem.
if self.warmstart and self.x_prev is not None and self.u_prev is not None:
# shift previous solutions by 1 step
x_guess = deepcopy(self.x_prev)
u_guess = deepcopy(self.u_prev)
x_guess[:, :-1] = x_guess[:, 1:]
u_guess[:-1] = u_guess[1:]
opti.set_initial(x_var, x_guess)
opti.set_initial(u_var, u_guess)
# Solve the optimization problem.
try:
sol = opti.solve()
x_val, u_val = sol.value(x_var), sol.value(u_var)
except RuntimeError:
x_val, u_val = opti.debug.value(x_var), opti.debug.value(u_var)
u_val = np.atleast_2d(u_val)
self.x_prev = x_val
self.u_prev = u_val
self.results_dict['horizon_states'].append(deepcopy(self.x_prev))
self.results_dict['horizon_inputs'].append(deepcopy(self.u_prev))
zi = np.hstack((x_val[:,0], u_val[:,0]))
zi = zi[self.input_mask]
gp_contribution = np.sum(self.K_z_zind_func(z1=zi, z2=z_ind_val)['K'].toarray() * mean_post_factor_val,axis=1)
print("GP Mean eq Contribution: %s" % gp_contribution)
zi = np.hstack((x_val[:,0], u_val[:,0]))
pred, _, _ = self.gaussian_process.predict(zi[None,:])
print("True GP value: %s" % pred.numpy())
lin_pred = self.prior_dynamics_func(x0=x_val[:,0]-self.prior_ctrl.X_LIN,
p=u_val[:, 0]-self.prior_ctrl.U_LIN)['xf'].toarray() + \
self.prior_ctrl.X_LIN[:,None]
self.results_dict['linear_pred'].append(lin_pred)
self.results_dict['gp_mean_eq_pred'].append(gp_contribution)
self.results_dict['gp_pred'].append(pred.numpy())
# Take the first one from solved action sequence.
if u_val.ndim > 1:
action = u_val[:, 0]
else:
action = np.array([u_val[0]])
self.prev_action = action,
return action
def learn(self,
input_data=None,
target_data=None,
gp_model=None,
plot=False
):
"""Performs GP training.
Args:
input_data, target_data (optiona, np.array): data to use for training
gp_model (str): if not None, this is the path to pretrained models to use instead of training new ones.
plot (bool): to plot validation trajectories or not.
Returns:
training_results (dict): Dictionary of the training results.
"""
if gp_model is None:
gp_model = self.gp_model_path
self.prior_ctrl.remove_constraints(self.prior_ctrl.additional_constraints)
self.reset()
if self.online_learning:
input_data = np.zeros((self.train_iterations, len(self.input_mask)))
target_data = np.zeros((self.train_iterations, len(self.target_mask)))
if input_data is None and target_data is None:
train_inputs = []
train_targets = []
train_info = []
############
# Use Latin Hypercube Sampling to generate states withing environment bounds.
lhs_sampler = Lhs(lhs_type='classic', criterion='maximin')
limits = [(self.env.INIT_STATE_RAND_INFO[key].low, self.env.INIT_STATE_RAND_INFO[key].high) for key in
self.env.INIT_STATE_RAND_INFO]
# todo: parameterize this if we actually want it.
num_eq_samples = 0
samples = lhs_sampler.generate(limits,
self.train_iterations + self.validation_iterations - num_eq_samples,
random_state=self.seed)
# todo: choose if we want eq samples or not.
delta = 0.01
eq_limits = [(self.prior_ctrl.X_LIN[eq]-delta, self.prior_ctrl.X_LIN[eq]+delta) for eq in range(self.model.nx)]
if num_eq_samples > 0:
eq_samples = lhs_sampler.generate(eq_limits, num_eq_samples, random_state=self.seed)
#samples = samples.append(eq_samples)
init_state_samples = np.array(samples + eq_samples)
else:
init_state_samples = np.array(samples)
input_limits = np.vstack((self.constraints.input_constraints[0].lower_bounds,
self.constraints.input_constraints[0].upper_bounds)).T
input_samples = lhs_sampler.generate(input_limits,
self.train_iterations + self.validation_iterations,
random_state=self.seed)
input_samples = np.array(input_samples) # not being used currently
seeds = self.env.np_random.randint(0,99999, size=self.train_iterations + self.validation_iterations)
load_from_file = False
if load_from_file:
gpmpc_data = np.load("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand_good1.npz")
x_seq_all = gpmpc_data["x_seq_all"]
x_next_seq_all = gpmpc_data["x_next_seq_all"]
u_seq_all = gpmpc_data["u_seq_all"]
else:
x_seq_all = []
u_seq_all = []
x_next_seq_all = []
for i in range(self.train_iterations + self.validation_iterations):
if load_from_file:
x_seq = x_seq_all[i]
x_next_seq = x_next_seq_all[i]
u_seq = u_seq_all[i]
else:
# For random initial state training.
init_state = init_state_samples[i,:]
# Collect data with prior controller.
run_env = self.env_func(init_state=init_state, randomized_init=False, seed=int(seeds[i]))
episode_results = self.prior_ctrl.run(env=run_env, max_steps=1, gp_training = True)
run_env.close()
x_obs = episode_results['obs'][-3:,:]
u_seq = episode_results['action'][-1:,:]
run_env.close()
x_seq = x_obs[:-1,:]
x_next_seq = x_obs[1:,:]
x_seq_all.append(x_seq)
x_next_seq_all.append(x_next_seq)
u_seq_all.append(u_seq)
train_inputs_i, train_targets_i = self.preprocess_training_data(x_seq, u_seq, x_next_seq)
train_inputs.append(train_inputs_i)
train_targets.append(train_targets_i)
np.savez("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand.npz", x_seq_all = x_seq_all, x_next_seq_all = x_next_seq_all, u_seq_all = u_seq_all)
###########
else:
train_inputs = input_data
train_targets = target_data
# assign all data
train_inputs = np.vstack(train_inputs)
train_targets = np.vstack(train_targets)
self.data_inputs = train_inputs
self.data_targets = train_targets
train_idx, test_idx = train_test_split(
#list(range(self.train_iterations + self.validation_iterations)),
list(range(train_inputs.shape[0])),
test_size=self.validation_iterations/(self.train_iterations+self.validation_iterations),
random_state=self.seed
)
train_inputs = self.data_inputs[train_idx, :]
train_targets = self.data_targets[train_idx, :]
self.train_data = {'train_inputs': train_inputs, 'train_targets': train_targets}
test_inputs = self.data_inputs[test_idx, :]
test_targets = self.data_targets[test_idx, :]
self.test_data = {'test_inputs': test_inputs, 'test_targets': test_targets}
train_inputs_tensor = torch.Tensor(train_inputs).double()
train_targets_tensor = torch.Tensor(train_targets).double()
test_inputs_tensor = torch.Tensor(test_inputs).double()
test_targets_tensor = torch.Tensor(test_targets).double()
if plot:
init_state = np.array([-1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
valid_env = self.env_func(init_state=init_state,
randomized_init=False)
validation_results = self.prior_ctrl.run(env=valid_env,
max_steps=40)
valid_env.close()
x_obs = validation_results['obs']
u_seq = validation_results['action']
x_seq = x_obs[:-1, :]
x_next_seq = x_obs[1:, :]
# Define likelihood.
likelihood = gpytorch.likelihoods.GaussianLikelihood(
noise_constraint=gpytorch.constraints.GreaterThan(1e-6),
).double()
self.gaussian_process = GaussianProcessCollection(ZeroMeanIndependentGPModel,
likelihood,
len(self.target_mask),
input_mask=self.input_mask,
target_mask=self.target_mask,
normalize=self.normalize_training_data
)
if gp_model:
self.gaussian_process.init_with_hyperparam(train_inputs_tensor,
train_targets_tensor,
gp_model)
else:
# Train the GP.
self.gaussian_process.train(train_inputs_tensor,
train_targets_tensor,
test_inputs_tensor,
test_targets_tensor,
n_train=self.optimization_iterations,
learning_rate=self.learning_rate,
gpu=self.use_gpu,
dir=self.output_dir)
# Plot validation.
if plot:
validation_inputs, validation_targets = self.preprocess_training_data(x_seq, u_seq, x_next_seq)
fig_count = 0
fig_count = self.gaussian_process.plot_trained_gp(torch.Tensor(validation_inputs).double(),
torch.Tensor(validation_targets).double(),
fig_count=fig_count)
self.set_gp_dynamics_func()
self.setup_gp_optimizer()
self.prior_ctrl.add_constraints(self.prior_ctrl.additional_constraints)
self.prior_ctrl.reset()
# Collect training results.
training_results = {}
training_results['train_targets'] = train_targets
training_results['train_inputs'] = train_inputs
try:
training_results['info'] = train_info
except UnboundLocalError:
training_results['info'] = None
return training_results
def select_action(self,
obs
):
"""Select the action based on the given observation.
Args:
obs (np.array): current observed state.
Returns:
action (np.array): desired policy action.
"""
if self.gaussian_process is None:
action = self.prior_ctrl.select_action(obs)
else:
if(self.last_obs is not None and self.last_action is not None and self.online_learning):
print("[ERROR]: Not yet supported.")
exit()
t1 = time.perf_counter()
action = self.select_action_with_gp(obs)
t2 = time.perf_counter()
print("GP SELECT ACTION TIME: %s" %(t2 - t1))
self.last_obs = obs
self.last_action = action
return action
def close(self):
"""Clean up.
"""
self.env_training.close()
self.env.close()
def reset_results_dict(self):
"""
"""
"Result the results_dict before running."
super().reset_results_dict()
self.results_dict['input_constraint_set'] = []
self.results_dict['state_constraint_set'] = []
self.results_dict['state_horizon_cov'] = []
self.results_dict['input_horizon_cov'] = []
self.results_dict['gp_mean_eq_pred'] = []
self.results_dict['gp_pred'] = []
self.results_dict['linear_pred'] = []
def reset(self):
"""Reset the controller before running.
"""
# Setup reference input.
if self.env.TASK == Task.STABILIZATION:
self.mode = "stabilization"
self.x_goal = self.env.X_GOAL
elif self.env.TASK == Task.TRAJ_TRACKING:
self.mode = "tracking"
self.traj = self.env.X_GOAL.T
self.traj_step = 0
# Dynamics model.
if self.gaussian_process is not None:
self.set_gp_dynamics_func()
# CasADi optimizer.
self.setup_gp_optimizer()
self.prior_ctrl.reset()
# Previously solved states & inputs, useful for warm start.
self.x_prev = None
self.u_prev = None
|
the LQR gain used for propograting GP uncertainty from the prior model dynamics.
"""
# Determine the LQR gain K to propogate the input uncertainty (doing this at each timestep will increase complexity).
A, B = discretize_linear_system(self.prior_ctrl.dfdx, self.prior_ctrl.dfdu, self.dt)
Q_lqr = self.Q
R_lqr = self.R
P = scipy.linalg.solve_discrete_are(A, B, Q_lqr, R_lqr)
btp = np.dot(B.T, P)
self.lqr_gain = -np.dot(np.linalg.inv(self.R + np.dot(btp, B)), np.dot(btp, A))
self.discrete_dfdx = A
self.discrete_dfdu = B
def se
|
plugin-proposal-throw-expressions_vx.x.x.js
|
// flow-typed signature: 49dd8de5240c00acb572bb6edb681042
// flow-typed version: <<STUB>>/@babel/plugin-proposal-throw-expressions_v^7.8.3/flow_v0.118.0
/**
|
* Fill this stub out by replacing all the `any` types.
*
* Once filled out, we encourage you to share your work with the
* community by sending a pull request to:
* https://github.com/flowtype/flow-typed
*/
declare module '@babel/plugin-proposal-throw-expressions' {
declare module.exports: any;
}
/**
* We include stubs for each file inside this npm package in case you need to
* require those files directly. Feel free to delete any files that aren't
* needed.
*/
declare module '@babel/plugin-proposal-throw-expressions/lib' {
declare module.exports: any;
}
// Filename aliases
declare module '@babel/plugin-proposal-throw-expressions/lib/index' {
declare module.exports: $Exports<'@babel/plugin-proposal-throw-expressions/lib'>;
}
declare module '@babel/plugin-proposal-throw-expressions/lib/index.js' {
declare module.exports: $Exports<'@babel/plugin-proposal-throw-expressions/lib'>;
}
|
* This is an autogenerated libdef stub for:
*
* '@babel/plugin-proposal-throw-expressions'
*
|
dummy_interpreter.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from typing import Tuple, Dict, Any, Optional
from droidlet.dialog.dialogue_objects import DialogueObject
from ..interpreter import ReferenceObjectInterpreter, FilterInterpreter, interpret_reference_object
from ..condition_helper import ConditionInterpreter
from .attribute_helper import MCAttributeInterpreter
class DummyInterpreter(DialogueObject):
def __init__(self, speaker: str, **kwargs):
super().__init__(**kwargs)
self.speaker = speaker
self.provisional: Dict = {}
self.action_dict_frozen = False
self.loop_data = None
self.subinterpret = {
"attribute": MCAttributeInterpreter(),
"filters": FilterInterpreter(),
"reference_objects": ReferenceObjectInterpreter(interpret_reference_object),
"condition": ConditionInterpreter(),
}
self.action_handlers = {} # noqa
def step(self) -> Tuple[Optional[str], Any]:
|
return None, None
|
|
MeetingsList.js
|
// @flow
import React, { Component } from 'react';
import {
getLocalizedDateFormatter,
getLocalizedDurationFormatter,
translate
} from '../../../i18n';
import { Icon, IconTrash } from '../../../icons';
import Container from './Container';
import Text from './Text';
type Props = {
/**
* Indicates if the list is disabled or not.
*/
disabled: boolean,
/**
* Indicates if the URL should be hidden or not.
*/
hideURL: boolean,
/**
* Function to be invoked when an item is pressed. The item's URL is passed.
*/
onPress: Function,
/**
* Rendered when the list is empty. Should be a rendered element.
*/
listEmptyComponent: Object,
/**
* An array of meetings.
*/
meetings: Array<Object>,
/**
* Handler for deleting an item.
*/
onItemDelete?: Function,
/**
* Invoked to obtain translated strings.
*/
t: Function
};
/**
* Generates a date string for a given date.
*
* @param {Object} date - The date.
* @private
* @returns {string}
*/
function _toDateString(date) {
return getLocalizedDateFormatter(date).format('ll');
}
/**
* Generates a time (interval) string for a given times.
*
* @param {Array<Date>} times - Array of times.
* @private
* @returns {string}
*/
function _toTimeString(times) {
if (times && times.length > 0) {
return (
times
.map(time => getLocalizedDateFormatter(time).format('LT'))
.join(' - '));
}
return undefined;
}
/**
* Implements a React/Web {@link Component} for displaying a list with
|
*/
class MeetingsList extends Component<Props> {
/**
* Constructor of the MeetingsList component.
*
* @inheritdoc
*/
constructor(props: Props) {
super(props);
this._onPress = this._onPress.bind(this);
this._renderItem = this._renderItem.bind(this);
}
/**
* Renders the content of this component.
*
* @returns {React.ReactNode}
*/
render() {
const { listEmptyComponent, meetings, t } = this.props;
/**
* If there are no recent meetings we don't want to display anything
*/
if (meetings) {
return (
<Container
aria-label = { t('welcomepage.recentList') }
className = 'meetings-list'
role = 'menu'
tabIndex = '-1'>
{
meetings.length === 0
? listEmptyComponent
: meetings.map(this._renderItem)
}
</Container>
);
}
return null;
}
_onPress: string => Function;
/**
* Returns a function that is used in the onPress callback of the items.
*
* @param {string} url - The URL of the item to navigate to.
* @private
* @returns {Function}
*/
_onPress(url) {
const { disabled, onPress } = this.props;
if (!disabled && url && typeof onPress === 'function') {
return () => onPress(url);
}
return null;
}
_onKeyPress: string => Function;
/**
* Returns a function that is used in the onPress callback of the items.
*
* @param {string} url - The URL of the item to navigate to.
* @private
* @returns {Function}
*/
_onKeyPress(url) {
const { disabled, onPress } = this.props;
if (!disabled && url && typeof onPress === 'function') {
return e => {
if (e.key === ' ' || e.key === 'Enter') {
onPress(url);
}
};
}
return null;
}
_onDelete: Object => Function;
/**
* Returns a function that is used on the onDelete callback.
*
* @param {Object} item - The item to be deleted.
* @private
* @returns {Function}
*/
_onDelete(item) {
const { onItemDelete } = this.props;
return evt => {
evt.stopPropagation();
onItemDelete && onItemDelete(item);
};
}
_onDeleteKeyPress: Object => Function;
/**
* Returns a function that is used on the onDelete keypress callback.
*
* @param {Object} item - The item to be deleted.
* @private
* @returns {Function}
*/
_onDeleteKeyPress(item) {
const { onItemDelete } = this.props;
return e => {
if (onItemDelete && (e.key === ' ' || e.key === 'Enter')) {
e.preventDefault();
e.stopPropagation();
onItemDelete(item);
}
};
}
_renderItem: (Object, number) => React$Node;
/**
* Renders an item for the list.
*
* @param {Object} meeting - Information about the meeting.
* @param {number} index - The index of the item.
* @returns {Node}
*/
_renderItem(meeting, index) {
const {
date,
duration,
elementAfter,
time,
title,
url
} = meeting;
const { hideURL = false, onItemDelete, t } = this.props;
const onPress = this._onPress(url);
const onKeyPress = this._onKeyPress(url);
const rootClassName
= `item ${onPress ? 'with-click-handler' : 'without-click-handler'}`;
return (
<Container
aria-label = { title }
className = { rootClassName }
key = { index }
onClick = { onPress }
onKeyPress = { onKeyPress }
role = 'menuitem'
tabIndex = { 0 }>
<Container className = 'left-column'>
<Text className = 'title'>
{_toDateString(date)}
</Text>
<Text className = 'subtitle'>
{_toTimeString(time)}
</Text>
</Container>
<Container className = 'right-column'>
<Text className = 'title'>
{title}
</Text>
{
hideURL || !url ? null : (
<Text>
{ url}
</Text>)
}
{
typeof duration === 'number' ? (
<Text className = 'subtitle'>
{ getLocalizedDurationFormatter(duration)}
</Text>) : null
}
</Container>
<Container className = 'actions'>
{elementAfter || null}
{ onItemDelete && <Icon
ariaLabel = { t('welcomepage.recentListDelete') }
className = 'delete-meeting'
onClick = { this._onDelete(meeting) }
onKeyPress = { this._onDeleteKeyPress(meeting) }
role = 'button'
src = { IconTrash }
tabIndex = { 0 } />}
</Container>
</Container>
);
}
}
export default translate(MeetingsList);
|
* meetings.
*
* @extends Component
|
functional_test.rs
|
use crate::schema;
use crate::Index;
use crate::IndexSettings;
use crate::IndexSortByField;
use crate::Order;
use crate::Searcher;
|
use rand::Rng;
use std::collections::HashSet;
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
assert!(searcher.segment_readers().len() < 20);
assert_eq!(searcher.num_docs() as usize, vals.len());
for segment_reader in searcher.segment_readers() {
let store_reader = segment_reader.get_store_reader()?;
for doc_id in 0..segment_reader.max_doc() {
let _doc = store_reader.get(doc_id)?;
}
}
Ok(())
}
#[test]
#[ignore]
fn test_functional_store() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let reader = index.reader()?;
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 12_000_000)?;
let mut doc_set: Vec<u64> = Vec::new();
let mut doc_id = 0u64;
for iteration in 0..get_num_iterations() {
dbg!(iteration);
let num_docs: usize = rng.gen_range(0..4);
if !doc_set.is_empty() {
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
}
for _ in 0..num_docs {
doc_set.push(doc_id);
index_writer.add_document(doc!(id_field=>doc_id));
doc_id += 1;
}
index_writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
check_index_content(&searcher, &doc_set)?;
}
Ok(())
}
fn get_num_iterations() -> usize {
std::env::var("NUM_FUNCTIONAL_TEST_ITERATIONS")
.map(|str| str.parse().unwrap())
.unwrap_or(2000)
}
#[test]
#[ignore]
fn test_functional_indexing_sorted() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED | FAST);
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
let text_field_options = TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
.set_index_option(schema::IndexRecordOption::WithFreqsAndPositions),
)
.set_stored();
let text_field = schema_builder.add_text_field("text_field", text_field_options);
let schema = schema_builder.build();
let mut index_builder = Index::builder().schema(schema);
index_builder = index_builder.settings(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Desc,
}),
..Default::default()
});
let index = index_builder.create_from_tempdir().unwrap();
let reader = index.reader()?;
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 120_000_000)?;
let mut committed_docs: HashSet<u64> = HashSet::new();
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..get_num_iterations() {
let random_val = rng.gen_range(0..20);
if random_val == 0 {
index_writer.commit()?;
committed_docs.extend(&uncommitted_docs);
uncommitted_docs.clear();
reader.reload()?;
let searcher = reader.searcher();
// check that everything is correct.
check_index_content(
&searcher,
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
)?;
} else if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u64(id_field, random_val);
index_writer.delete_term(doc_id_term);
} else {
uncommitted_docs.insert(random_val);
let mut doc = Document::new();
doc.add_u64(id_field, random_val);
for i in 1u64..10u64 {
doc.add_u64(multiples_field, random_val * i);
}
doc.add_text(text_field, get_text());
index_writer.add_document(doc);
}
}
Ok(())
}
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed \
do eiusmod tempor incididunt ut labore et dolore magna aliqua. \
Ut enim ad minim veniam, quis nostrud exercitation ullamco \
laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
dolor in reprehenderit in voluptate velit esse cillum dolore eu \
fugiat nulla pariatur. Excepteur sint occaecat cupidatat non \
proident, sunt in culpa qui officia deserunt mollit anim id est \
laborum.";
fn get_text() -> String {
use rand::seq::SliceRandom;
let mut rng = thread_rng();
let tokens: Vec<_> = LOREM.split(" ").collect();
let random_val = rng.gen_range(0..20);
(0..random_val)
.map(|_| tokens.choose(&mut rng).unwrap())
.cloned()
.collect::<Vec<_>>()
.join(" ")
}
#[test]
#[ignore]
fn test_functional_indexing_unsorted() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED);
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
let text_field_options = TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
.set_index_option(schema::IndexRecordOption::WithFreqsAndPositions),
)
.set_stored();
let text_field = schema_builder.add_text_field("text_field", text_field_options);
let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema)?;
let reader = index.reader()?;
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 120_000_000)?;
let mut committed_docs: HashSet<u64> = HashSet::new();
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..get_num_iterations() {
let random_val = rng.gen_range(0..20);
if random_val == 0 {
index_writer.commit()?;
committed_docs.extend(&uncommitted_docs);
uncommitted_docs.clear();
reader.reload()?;
let searcher = reader.searcher();
// check that everything is correct.
check_index_content(
&searcher,
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
)?;
} else if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u64(id_field, random_val);
index_writer.delete_term(doc_id_term);
} else {
uncommitted_docs.insert(random_val);
let mut doc = Document::new();
doc.add_u64(id_field, random_val);
for i in 1u64..10u64 {
doc.add_u64(multiples_field, random_val * i);
}
doc.add_text(text_field, get_text());
index_writer.add_document(doc);
}
}
Ok(())
}
|
use crate::{doc, schema::*};
use rand::thread_rng;
|
CompanyRoutes.js
|
import CompanyCreateWarehouse from "views/Companies/CompanyCreateWarehouse.jsx";
import CompanyIndex from "views/Companies/CompanyIndex.jsx";
import CompanyWarehouse from "views/Companies/CompanyWarehouse.jsx";
import companyAddProductWarehouse from "views/Companies/companyAddProductWarehouse.jsx";
import CompanyProfile from "views/Companies/CompanyProfile.jsx";
import companyProvideFund from "views/Companies/companyProvideFund.jsx";
import CompanyDashboard from "views/Companies/CompanyDashboard.jsx";
var routes = [
{
path: "/index",
name: "Dashboard",
icon: "ni ni-tv-2 text-green",
component: CompanyIndex,
layout: "/company"
},
{
path: "/profile",
name: "Profile",
icon: "ni ni-tv-2 text-blue",
component: CompanyProfile,
layout: "/company"
},
{
path: "/payment",
name: "Transfer Fund",
icon: "ni ni-tv-2 text-green",
component: companyProvideFund,
layout: "/company"
},
{
path: "/addproduct",
name: "Product Warehouse",
icon: "ni ni-tv-2 text-green",
|
{
path: "/warehouse",
name: "Warehouse",
icon: "ni ni-tv-2 text-green",
component: CompanyWarehouse,
layout: "/company"
},
{
path: "/createwarehouse",
name: "Create Warehouse",
icon: "ni ni-tv-2 text-green",
component: CompanyCreateWarehouse,
layout: "/company"
}
];
export default routes;
|
component: companyAddProductWarehouse,
layout: "/company"
},
|
reinstall_request_v2.py
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ReinstallRequestV2(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'instance_groups': 'list[InstanceGroupsV2]',
'ambari_stack_details': 'AmbariStackDetails',
'blueprint_name': 'str',
'kerberos_password': 'str',
'kerberos_principal': 'str'
}
attribute_map = {
'instance_groups': 'instanceGroups',
'ambari_stack_details': 'ambariStackDetails',
'blueprint_name': 'blueprintName',
'kerberos_password': 'kerberosPassword',
'kerberos_principal': 'kerberosPrincipal'
}
def
|
(self, instance_groups=None, ambari_stack_details=None, blueprint_name=None, kerberos_password=None, kerberos_principal=None):
"""
ReinstallRequestV2 - a model defined in Swagger
"""
self._instance_groups = None
self._ambari_stack_details = None
self._blueprint_name = None
self._kerberos_password = None
self._kerberos_principal = None
if instance_groups is not None:
self.instance_groups = instance_groups
if ambari_stack_details is not None:
self.ambari_stack_details = ambari_stack_details
self.blueprint_name = blueprint_name
if kerberos_password is not None:
self.kerberos_password = kerberos_password
if kerberos_principal is not None:
self.kerberos_principal = kerberos_principal
@property
def instance_groups(self):
"""
Gets the instance_groups of this ReinstallRequestV2.
collection of instance groupst
:return: The instance_groups of this ReinstallRequestV2.
:rtype: list[InstanceGroupsV2]
"""
return self._instance_groups
@instance_groups.setter
def instance_groups(self, instance_groups):
"""
Sets the instance_groups of this ReinstallRequestV2.
collection of instance groupst
:param instance_groups: The instance_groups of this ReinstallRequestV2.
:type: list[InstanceGroupsV2]
"""
self._instance_groups = instance_groups
@property
def ambari_stack_details(self):
"""
Gets the ambari_stack_details of this ReinstallRequestV2.
details of the Ambari stack
:return: The ambari_stack_details of this ReinstallRequestV2.
:rtype: AmbariStackDetails
"""
return self._ambari_stack_details
@ambari_stack_details.setter
def ambari_stack_details(self, ambari_stack_details):
"""
Sets the ambari_stack_details of this ReinstallRequestV2.
details of the Ambari stack
:param ambari_stack_details: The ambari_stack_details of this ReinstallRequestV2.
:type: AmbariStackDetails
"""
self._ambari_stack_details = ambari_stack_details
@property
def blueprint_name(self):
"""
Gets the blueprint_name of this ReinstallRequestV2.
blueprint name for the cluster
:return: The blueprint_name of this ReinstallRequestV2.
:rtype: str
"""
return self._blueprint_name
@blueprint_name.setter
def blueprint_name(self, blueprint_name):
"""
Sets the blueprint_name of this ReinstallRequestV2.
blueprint name for the cluster
:param blueprint_name: The blueprint_name of this ReinstallRequestV2.
:type: str
"""
if blueprint_name is None:
raise ValueError("Invalid value for `blueprint_name`, must not be `None`")
self._blueprint_name = blueprint_name
@property
def kerberos_password(self):
"""
Gets the kerberos_password of this ReinstallRequestV2.
kerberos admin password
:return: The kerberos_password of this ReinstallRequestV2.
:rtype: str
"""
return self._kerberos_password
@kerberos_password.setter
def kerberos_password(self, kerberos_password):
"""
Sets the kerberos_password of this ReinstallRequestV2.
kerberos admin password
:param kerberos_password: The kerberos_password of this ReinstallRequestV2.
:type: str
"""
if kerberos_password is not None and len(kerberos_password) > 50:
raise ValueError("Invalid value for `kerberos_password`, length must be less than or equal to `50`")
if kerberos_password is not None and len(kerberos_password) < 5:
raise ValueError("Invalid value for `kerberos_password`, length must be greater than or equal to `5`")
self._kerberos_password = kerberos_password
@property
def kerberos_principal(self):
"""
Gets the kerberos_principal of this ReinstallRequestV2.
kerberos principal
:return: The kerberos_principal of this ReinstallRequestV2.
:rtype: str
"""
return self._kerberos_principal
@kerberos_principal.setter
def kerberos_principal(self, kerberos_principal):
"""
Sets the kerberos_principal of this ReinstallRequestV2.
kerberos principal
:param kerberos_principal: The kerberos_principal of this ReinstallRequestV2.
:type: str
"""
self._kerberos_principal = kerberos_principal
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ReinstallRequestV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
__init__
|
conf.py
|
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Settings for OpenTelemetry tracer are all namespaced in the OPENTELEMETRY_TRACE setting.
For example your project's `settings.py` file might look like this:
OPENTELEMETRY_TRACE = {
'TRACER': 'myapp.tracer',
}
This module provides the `setting` object, that is used to access
OpenTelemetry settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
import os
import importlib
from django.conf import settings as django_settings
from ...internal.logger import get_logger
log = get_logger(__name__)
# List of available settings with their defaults
DEFAULTS = {
'AGENT_HOSTNAME': 'localhost',
'AGENT_PORT': 8126,
'AUTO_INSTRUMENT': True,
'INSTRUMENT_CACHE': True,
'INSTRUMENT_DATABASE': True,
'INSTRUMENT_TEMPLATE': True,
'DEFAULT_DATABASE_PREFIX': '',
'DEFAULT_SERVICE': 'django',
'DEFAULT_CACHE_SERVICE': '',
'ENABLED': True,
'DISTRIBUTED_TRACING': True,
'ANALYTICS_ENABLED': None,
'ANALYTICS_SAMPLE_RATE': True,
'TRACE_QUERY_STRING': None,
'TAGS': {},
'TRACER': 'oteltrace.tracer',
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'TRACER',
)
# List of settings that have been removed
REMOVED_SETTINGS = ()
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as e:
msg = 'Could not import "{}" for setting "{}". {}: {}.'.format(
val,
setting_name,
e.__class__.__name__,
e,
)
raise ImportError(msg)
class OpenTelemetrySettings(object):
"""
A settings object, that allows OpenTelemetry settings to be accessed as properties.
For example:
from oteltrace.contrib.django.conf import settings
tracer = settings.TRACER
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
if user_settings:
self._user_settings = self.__check_user_settings(user_settings)
self.defaults = defaults or DEFAULTS
if os.environ.get('OPENTELEMETRY_ENV'):
self.defaults['TAGS'].update({'env': os.environ.get('OPENTELEMETRY_ENV')})
if os.environ.get('OPENTELEMETRY_SERVICE_NAME'):
self.defaults['DEFAULT_SERVICE'] = os.environ.get('OPENTELEMETRY_SERVICE_NAME')
host = os.environ.get('OTEL_AGENT_HOST', os.environ.get('OPENTELEMETRY_TRACE_AGENT_HOSTNAME'))
if host:
self.defaults['AGENT_HOSTNAME'] = host
port = os.environ.get('OTEL_TRACE_AGENT_PORT', os.environ.get('OPENTELEMETRY_TRACE_AGENT_PORT'))
if port:
# if the agent port is a string, the underlying library that creates the socket
# stops working
try:
port = int(port)
except ValueError:
log.warning('OTEL_TRACE_AGENT_PORT is not an integer value; default to 8126')
else:
self.defaults['AGENT_PORT'] = port
self.import_strings = import_strings or IMPORT_STRINGS
@property
def user_settings(self):
if not hasattr(self, '_user_settings'):
self._user_settings = getattr(django_settings, 'OPENTELEMETRY_TRACE', {})
# TODO[manu]: prevents docs import errors; provide a better implementation
if 'ENABLED' not in self._user_settings:
self._user_settings['ENABLED'] = not django_settings.DEBUG
return self._user_settings
def
|
(self, attr):
if attr not in self.defaults:
raise AttributeError('Invalid setting: "{}"'.format(attr))
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Otherwise, fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if attr in self.import_strings:
val = import_from_string(val, attr)
# Cache the result
setattr(self, attr, val)
return val
def __check_user_settings(self, user_settings):
SETTINGS_DOC = 'http://pypi.datadoghq.com/trace/docs/#module-oteltrace.contrib.django'
for setting in REMOVED_SETTINGS:
if setting in user_settings:
raise RuntimeError(
'The "{}" setting has been removed, check "{}".'.format(setting, SETTINGS_DOC)
)
return user_settings
settings = OpenTelemetrySettings(None, DEFAULTS, IMPORT_STRINGS)
def reload_settings(*args, **kwargs):
"""
Triggers a reload when Django emits the reloading signal
"""
global settings
setting, value = kwargs['setting'], kwargs['value']
if setting == 'OPENTELEMETRY_TRACE':
settings = OpenTelemetrySettings(value, DEFAULTS, IMPORT_STRINGS)
|
__getattr__
|
docker_cli_run_test.go
|
package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/docker/pkg/integration/checker"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/nat"
"github.com/docker/libnetwork/resolvconf"
"github.com/docker/libnetwork/types"
"github.com/go-check/check"
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
)
// "test123" should be printed by docker run
func (s *DockerSuite) TestRunEchoStdout(c *check.C) {
out, _ := dockerCmd(c, "run", "busybox", "echo", "test123")
if out != "test123\n" {
c.Fatalf("container should've printed 'test123', got '%s'", out)
}
}
// "test" should be printed
func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) {
out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test")
if out != "test\n" {
c.Errorf("container should've printed 'test'")
}
}
// docker run should not leak file descriptors. This test relies on Unix
// specific functionality and cannot run on Windows.
func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd")
// normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory
if out != "0 1 2 3\n" {
c.Errorf("container should've printed '0 1 2 3', not: %s", out)
}
}
// it should be possible to lookup Google DNS
// this will fail when Internet access is unavailable
func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) {
testRequires(c, Network, NotArm)
image := DefaultImage
if daemonPlatform == "windows" {
// nslookup isn't present in Windows busybox. Is built-in.
image = WindowsBaseImage
}
dockerCmd(c, "run", image, "nslookup", "google.com")
}
// the exit code should be 0
func (s *DockerSuite) TestRunExitCodeZero(c *check.C) {
dockerCmd(c, "run", "busybox", "true")
}
// the exit code should be 1
func (s *DockerSuite) TestRunExitCodeOne(c *check.C) {
_, exitCode, err := dockerCmdWithError("run", "busybox", "false")
c.Assert(err, checker.NotNil)
c.Assert(exitCode, checker.Equals, 1)
}
// it should be possible to pipe in data via stdin to a process running in a container
func (s *DockerSuite) TestRunStdinPipe(c *check.C) {
// TODO Windows: This needs some work to make compatible.
testRequires(c, DaemonIsLinux)
runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat")
runCmd.Stdin = strings.NewReader("blahblah")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out = strings.TrimSpace(out)
dockerCmd(c, "wait", out)
logsOut, _ := dockerCmd(c, "logs", out)
containerLogs := strings.TrimSpace(logsOut)
if containerLogs != "blahblah" {
c.Errorf("logs didn't print the container's logs %s", containerLogs)
}
dockerCmd(c, "rm", out)
}
// the container's ID should be printed when starting a container in detached mode
func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
out = strings.TrimSpace(out)
dockerCmd(c, "wait", out)
rmOut, _ := dockerCmd(c, "rm", out)
rmOut = strings.TrimSpace(rmOut)
if rmOut != out {
c.Errorf("rm didn't print the container ID %s %s", out, rmOut)
}
}
// the working directory should be set correctly
func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) {
dir := "/root"
image := "busybox"
if daemonPlatform == "windows" {
dir = `C:/Windows`
}
// First with -w
out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd")
out = strings.TrimSpace(out)
if out != dir {
c.Errorf("-w failed to set working directory")
}
// Then with --workdir
out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd")
out = strings.TrimSpace(out)
if out != dir {
c.Errorf("--workdir failed to set working directory")
}
}
// pinging Google's DNS resolver should fail when we disable the networking
func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) {
count := "-c"
image := "busybox"
if daemonPlatform == "windows" {
count = "-n"
image = WindowsBaseImage
}
// First using the long form --net
out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8")
if err != nil && exitCode != 1 {
c.Fatal(out, err)
}
if exitCode != 1 {
c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
}
}
//test --link use container name to link target
func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as the networking
// settings are not populated back yet on inspect.
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox")
ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress")
out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts")
if !strings.Contains(out, ip+" test") {
c.Fatalf("use a container name to link target failed")
}
}
//test --link use container id to link target
func (s *DockerSuite) TestRunLinksContainerWithContainerId(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as the networking
// settings are not populated back yet on inspect.
testRequires(c, DaemonIsLinux)
cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox")
cID = strings.TrimSpace(cID)
ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress")
out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts")
if !strings.Contains(out, ip+" test") {
c.Fatalf("use a container id to link target failed")
}
}
func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// run a container in user-defined network udlinkNet with a link for an existing container
// and a link for a container that doesn't exist
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo",
"--link=third:bar", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// ping to first and its alias foo must succeed
_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
// ping to third and its alias must fail
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third")
c.Assert(err, check.NotNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
c.Assert(err, check.NotNil)
// start third container now
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top")
c.Assert(waitRun("third"), check.IsNil)
// ping to third and its alias must succeed now
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo",
"busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// ping to first and its alias foo must succeed
_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
// Restart first container
dockerCmd(c, "restart", "first")
c.Assert(waitRun("first"), check.IsNil)
// ping to first and its alias foo must still succeed
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
// Restart second container
dockerCmd(c, "restart", "second")
c.Assert(waitRun("second"), check.IsNil)
// ping to first and its alias foo must still succeed
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
defaults := []string{"bridge", "host", "none"}
for _, net := range defaults {
out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error())
}
}
func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "net1")
cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Check if default short-id alias is added automatically
id := strings.TrimSpace(cid1)
aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases")
c.Assert(aliases, checker.Contains, stringid.TruncateID(id))
cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check if default short-id alias is added automatically
id = strings.TrimSpace(cid2)
aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases")
c.Assert(aliases, checker.Contains, stringid.TruncateID(id))
// ping to first and its network-scoped aliases
_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2")
c.Assert(err, check.IsNil)
// ping first container's short-id alias
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1))
c.Assert(err, check.IsNil)
// Restart first container
dockerCmd(c, "restart", "first")
c.Assert(waitRun("first"), check.IsNil)
// ping to first and its network-scoped aliases must succeed
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2")
c.Assert(err, check.IsNil)
// ping first container's short-id alias
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1))
c.Assert(err, check.IsNil)
}
// Issue 9677.
func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) {
out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true")
if err != nil {
if !strings.Contains(out, "flag provided but not defined: --exec-opt") { // no daemon (client-only)
c.Fatal(err, out)
}
}
}
// Regression test for #4979
func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
var (
out string
exitCode int
)
// Create a file in a volume
if daemonPlatform == "windows" {
out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`)
} else {
out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
}
if exitCode != 0 {
c.Fatal("1", out, exitCode)
}
// Read the file from another container using --volumes-from to access the volume in the second container
if daemonPlatform == "windows" {
out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`)
} else {
out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file")
}
if exitCode != 0 {
c.Fatal("2", out, exitCode)
}
}
// Volume path is a symlink which also exists on the host, and the host side is a file not a dir
// But the volume call is just a normal volume, not a bind mount
func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) {
var (
dockerFile string
containerPath string
cmd string
)
// TODO Windows (Post TP5): This test cannot run on a Windows daemon as
// Windows does not support symlinks inside a volume path
testRequires(c, SameHostDaemon, DaemonIsLinux)
name := "test-volume-symlink"
dir, err := ioutil.TempDir("", name)
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(dir)
// In the case of Windows to Windows CI, if the machine is setup so that
// the temp directory is not the C: drive, this test is invalid and will
// not work.
if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" {
c.Skip("Requires TEMP to point to C: drive")
}
f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700)
if err != nil {
c.Fatal(err)
}
f.Close()
if daemonPlatform == "windows" {
dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir)
containerPath = `c:\test\test`
cmd = "tasklist"
} else {
dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir)
containerPath = "/test/test"
cmd = "true"
}
if _, err := buildImage(name, dockerFile, false); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-v", containerPath, name, cmd)
}
// Volume path is a symlink in the container
func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) {
var (
dockerFile string
containerPath string
cmd string
)
// TODO Windows (Post TP5): This test cannot run on a Windows daemon as
// Windows does not support symlinks inside a volume path
testRequires(c, SameHostDaemon, DaemonIsLinux)
name := "test-volume-symlink2"
if daemonPlatform == "windows" {
dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name)
containerPath = `c:\test\test`
cmd = "tasklist"
} else {
dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name)
containerPath = "/test/test"
cmd = "true"
}
if _, err := buildImage(name, dockerFile, false); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-v", containerPath, name, cmd)
}
func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) {
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform == "windows" && windowsDaemonKV < 14350 {
c.Skip("Needs later Windows build for RO volumes")
}
if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 {
c.Fatalf("run should fail because volume is ro: exit code %d", code)
}
}
func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) {
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform == "windows" && windowsDaemonKV < 14350 {
c.Skip("Needs later Windows build for RO volumes")
}
var (
volumeDir string
fileInVol string
)
if daemonPlatform == "windows" {
volumeDir = `c:/test` // Forward-slash as using busybox
fileInVol = `c:/test/file`
} else {
testRequires(c, DaemonIsLinux)
volumeDir = "/test"
fileInVol = `/test/file`
}
dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true")
if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 {
c.Fatalf("run should fail because volume is ro: exit code %d", code)
}
}
// Regression test for #1201
func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) {
var (
volumeDir string
fileInVol string
)
if daemonPlatform == "windows" {
volumeDir = `c:/test` // Forward-slash as using busybox
fileInVol = `c:/test/file`
} else {
volumeDir = "/test"
fileInVol = "/test/file"
}
dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true")
dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol)
if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) {
c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out)
}
dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol)
}
func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) {
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform == "windows" && windowsDaemonKV < 14350 {
c.Skip("Needs later Windows build for RO volumes")
}
dockerCmd(c, "run", "--name", "parent", "-v", prefix+slash+"test:"+prefix+slash+"test:ro", "busybox", "true")
// Expect this "rw" mode to be be ignored since the inherited volume is "ro"
if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil {
c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`")
}
dockerCmd(c, "run", "--name", "parent2", "-v", prefix+slash+"test:"+prefix+slash+"test:ro", "busybox", "true")
// Expect this to be read-only since both are "ro"
if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil {
c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`")
}
}
// Test for GH#10618
func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
path1 := randomTmpDirPath("test1", daemonPlatform)
path2 := randomTmpDirPath("test2", daemonPlatform)
someplace := ":/someplace"
if daemonPlatform == "windows" {
// Windows requires that the source directory exists before calling HCS
testRequires(c, SameHostDaemon)
someplace = `:c:\someplace`
if err := os.MkdirAll(path1, 0755); err != nil {
c.Fatalf("Failed to create %s: %q", path1, err)
}
defer os.RemoveAll(path1)
if err := os.MkdirAll(path2, 0755); err != nil {
c.Fatalf("Failed to create %s: %q", path1, err)
}
defer os.RemoveAll(path2)
}
mountstr1 := path1 + someplace
mountstr2 := path2 + someplace
if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil {
c.Fatal("Expected error about duplicate mount definitions")
} else {
if !strings.Contains(out, "Duplicate mount point") {
c.Fatalf("Expected 'duplicate mount point' error, got %v", out)
}
}
// Test for https://github.com/docker/docker/issues/22093
volumename1 := "test1"
volumename2 := "test2"
volume1 := volumename1 + someplace
volume2 := volumename2 + someplace
if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil {
c.Fatal("Expected error about duplicate mount definitions")
} else {
if !strings.Contains(out, "Duplicate mount point") {
c.Fatalf("Expected 'duplicate mount point' error, got %v", out)
}
}
// create failed should have create volume volumename1 or volumename2
// we should remove volumename2 or volumename2 successfully
out, _ := dockerCmd(c, "volume", "ls")
if strings.Contains(out, volumename1) {
dockerCmd(c, "volume", "rm", volumename1)
} else {
dockerCmd(c, "volume", "rm", volumename2)
}
}
// Test for #1351
func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) {
prefix := ""
if daemonPlatform == "windows" {
prefix = `c:`
}
dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo")
dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo")
}
func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) {
prefix := ""
if daemonPlatform == "windows" {
prefix = `c:`
}
dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo")
dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar")
dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar")
}
// this tests verifies the ID format for the container
func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) {
out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true")
if err != nil {
c.Fatal(err)
}
if exit != 0 {
c.Fatalf("expected exit code 0 received %d", exit)
}
match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n"))
if err != nil {
c.Fatal(err)
}
if !match {
c.Fatalf("Invalid container ID: %s", out)
}
}
// Test that creating a container with a volume doesn't crash. Regression test for #995.
func (s *DockerSuite) TestRunCreateVolume(c *check.C) {
prefix := ""
if daemonPlatform == "windows" {
prefix = `c:`
}
dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true")
}
// Test that creating a volume with a symlink in its path works correctly. Test for #5152.
// Note that this bug happens only with symlinks with a target that starts with '/'.
func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) {
// Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...)
testRequires(c, DaemonIsLinux)
image := "docker-test-createvolumewithsymlink"
buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-")
buildCmd.Stdin = strings.NewReader(`FROM busybox
RUN ln -s home /bar`)
buildCmd.Dir = workingDirectory
err := buildCmd.Run()
if err != nil {
c.Fatalf("could not build '%s': %v", image, err)
}
_, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo")
if err != nil || exitCode != 0 {
c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
}
volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo")
c.Assert(err, checker.IsNil)
_, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink")
if err != nil || exitCode != 0 {
c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode)
}
_, err = os.Stat(volPath)
if !os.IsNotExist(err) {
c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath)
}
}
// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`.
func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) {
// TODO Windows (Post TP5): This test cannot run on a Windows daemon as
// Windows does not support symlinks inside a volume path
testRequires(c, DaemonIsLinux)
name := "docker-test-volumesfromsymlinkpath"
prefix := ""
dfContents := `FROM busybox
RUN ln -s home /foo
VOLUME ["/foo/bar"]`
if daemonPlatform == "windows" {
prefix = `c:`
dfContents = `FROM ` + WindowsBaseImage + `
RUN mkdir c:\home
RUN mklink /D c:\foo c:\home
VOLUME ["c:/foo/bar"]
ENTRYPOINT c:\windows\system32\cmd.exe`
}
buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-")
buildCmd.Stdin = strings.NewReader(dfContents)
buildCmd.Dir = workingDirectory
err := buildCmd.Run()
if err != nil {
c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err)
}
out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name)
if err != nil || exitCode != 0 {
c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out)
}
_, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar")
if err != nil || exitCode != 0 {
c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
}
}
func (s *DockerSuite) TestRunExitCode(c *check.C) {
var (
exit int
err error
)
_, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72")
if err == nil {
c.Fatal("should not have a non nil error")
}
if exit != 72 {
c.Fatalf("expected exit code 72 received %d", exit)
}
}
func (s *DockerSuite) TestRunUserDefaults(c *check.C) {
expected := "uid=0(root) gid=0(root)"
if daemonPlatform == "windows" {
expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)"
}
out, _ := dockerCmd(c, "run", "busybox", "id")
if !strings.Contains(out, expected) {
c.Fatalf("expected '%s' got %s", expected, out)
}
}
func (s *DockerSuite) TestRunUserByName(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id")
if !strings.Contains(out, "uid=0(root) gid=0(root)") {
c.Fatalf("expected root user got %s", out)
}
}
func (s *DockerSuite) TestRunUserByID(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id")
if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") {
c.Fatalf("expected daemon user got %s", out)
}
}
func (s *DockerSuite) TestRunUserByIDBig(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux, NotArm)
out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id")
if err == nil {
c.Fatal("No error, but must be.", out)
}
if !strings.Contains(out, libcontainerUser.ErrRange.Error()) {
c.Fatalf("expected error about uids range, got %s", out)
}
}
func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id")
if err == nil {
c.Fatal("No error, but must be.", out)
}
if !strings.Contains(out, libcontainerUser.ErrRange.Error()) {
c.Fatalf("expected error about uids range, got %s", out)
}
}
func (s *DockerSuite) TestRunUserByIDZero(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id")
if err != nil {
c.Fatal(err, out)
}
if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") {
c.Fatalf("expected daemon user got %s", out)
}
}
func (s *DockerSuite) TestRunUserNotFound(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
_, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id")
if err == nil {
c.Fatal("unknown user should cause container to fail")
}
}
func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) {
sleepTime := "2"
group := sync.WaitGroup{}
group.Add(2)
errChan := make(chan error, 2)
for i := 0; i < 2; i++ {
go func() {
defer group.Done()
_, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime)
errChan <- err
}()
}
group.Wait()
close(errChan)
for err := range errChan {
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestRunEnvironment(c *check.C) {
// TODO Windows: Environment handling is different between Linux and
// Windows and this test relies currently on unix functionality.
testRequires(c, DaemonIsLinux)
cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env")
cmd.Env = append(os.Environ(),
"TRUE=false",
"TRICKY=tri\ncky\n",
)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
}
actualEnv := strings.Split(strings.TrimSpace(out), "\n")
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOSTNAME=testing",
"FALSE=true",
"TRUE=false",
"TRICKY=tri",
"cky",
"",
"HOME=/root",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) {
// TODO Windows: Environment handling is different between Linux and
// Windows and this test relies currently on unix functionality.
testRequires(c, DaemonIsLinux)
// Test to make sure that when we use -e on env vars that are
// not set in our local env that they're removed (if present) in
// the container
cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env")
cmd.Env = appendBaseEnv(true)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
}
actualEnv := strings.Split(strings.TrimSpace(out), "\n")
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/root",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) {
// TODO Windows: Environment handling is different between Linux and
// Windows and this test relies currently on unix functionality.
testRequires(c, DaemonIsLinux)
// Test to make sure that when we use -e on env vars that are
// already in the env that we're overriding them
cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env")
cmd.Env = appendBaseEnv(true, "HOSTNAME=bar")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
}
actualEnv := strings.Split(strings.TrimSpace(out), "\n")
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/root2",
"HOSTNAME=bar",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func (s *DockerSuite) TestRunContainerNetwork(c *check.C) {
if daemonPlatform == "windows" {
// Windows busybox does not have ping. Use built in ping instead.
dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1")
} else {
dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1")
}
}
func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) {
// TODO Windows: This is Linux specific as --link is not supported and
// this will be deprecated in favor of container networking model.
testRequires(c, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "--name", "linked", "busybox", "true")
_, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true")
if err == nil {
c.Fatal("Expected error")
}
}
// #7851 hostname outside container shows FQDN, inside only shortname
// For testing purposes it is not required to set host's hostname directly
// and use "--net=host" (as the original issue submitter did), as the same
// codepath is executed with "docker run -h <hostname>". Both were manually
// tested, but this testcase takes the simpler path of using "run -h .."
func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) {
// TODO Windows: -h is not yet functional.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname")
if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" {
c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual)
}
}
func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) {
// Not applicable for Windows as Windows daemon does not support
// the concept of --privileged, and mknod is a Unix concept.
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) {
// Not applicable for Windows as Windows daemon does not support
// the concept of --privileged, and mknod is a Unix concept.
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls")
if err == nil {
c.Fatal(err, out)
}
}
func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls")
if err == nil {
c.Fatal(err, out)
}
}
func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunGroupAdd(c *check.C) {
// Not applicable for Windows as there is no concept of --group-add
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id")
groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777"
if actual := strings.Trim(out, "\r\n"); actual != groupsList {
c.Fatalf("expected output %s received %s", groupsList, actual)
}
}
func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) {
// Not applicable for Windows as there is no concept of --privileged
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux, NotArm)
if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 {
c.Fatal("sys should not be writable in a non privileged container")
}
}
func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 {
c.Fatalf("sys should be writable in privileged container")
}
}
func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux)
if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 {
c.Fatal("proc should not be writable in a non privileged container")
}
}
func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of --privileged
testRequires(c, DaemonIsLinux, NotUserNamespace)
if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 {
c.Fatalf("proc should be writable in privileged container")
}
}
func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) {
// Not applicable on Windows as /dev/ is a Unix specific concept
// TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null")
deviceLineFields := strings.Fields(out)
deviceLineFields[6] = ""
deviceLineFields[7] = ""
deviceLineFields[8] = ""
expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"}
if !(reflect.DeepEqual(deviceLineFields, expected)) {
c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out)
}
}
func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) {
// Not applicable on Windows as /dev/ is a Unix specific concept
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero")
if actual := strings.Trim(out, "\r\n"); actual[0] == '0' {
c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual)
}
}
func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) {
// Not applicable on Windows as it does not support chroot
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "busybox", "chroot", "/", "true")
}
func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) {
// Not applicable on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo")
if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" {
c.Fatalf("expected output /dev/nulo, received %s", actual)
}
}
func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) {
// Not applicable on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero")
if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" {
c.Fatalf("expected output /dev/zero, received %s", actual)
}
}
func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) {
// Not applicable on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux, NotUserNamespace)
_, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero")
if err == nil {
c.Fatalf("run container with device mode ro should fail")
}
}
func (s *DockerSuite) TestRunModeHostname(c *check.C) {
// Not applicable on Windows as Windows does not support -h
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname")
if actual := strings.Trim(out, "\r\n"); actual != "testhostname" {
c.Fatalf("expected 'testhostname', but says: %q", actual)
}
out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname")
hostname, err := os.Hostname()
if err != nil {
c.Fatal(err)
}
if actual := strings.Trim(out, "\r\n"); actual != hostname {
c.Fatalf("expected %q, but says: %q", hostname, actual)
}
}
func (s *DockerSuite) TestRunRootWorkdir(c *check.C) {
out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd")
expected := "/\n"
if daemonPlatform == "windows" {
expected = "C:" + expected
}
if out != expected {
c.Fatalf("pwd returned %q (expected %s)", s, expected)
}
}
func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) {
if daemonPlatform == "windows" {
// Windows busybox will fail with Permission Denied on items such as pagefile.sys
dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`)
} else {
dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host")
}
}
func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) {
mount := "/:/"
targetDir := "/host"
if daemonPlatform == "windows" {
mount = `c:\:c\`
targetDir = "c:/host" // Forward slash as using busybox
}
out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir)
if err == nil {
c.Fatal(out, err)
}
}
// Verify that a container gets default DNS when only localhost resolvers exist
func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) {
// Not applicable on Windows as this is testing Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
// preserve original resolv.conf for restoring after test
origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
if os.IsNotExist(err) {
c.Fatalf("/etc/resolv.conf does not exist")
}
// defer restored original conf
defer func() {
if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil {
c.Fatal(err)
}
}()
// test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost
// 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by
// GetNameservers(), leading to a replacement of nameservers with the default set
tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1")
if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
c.Fatal(err)
}
actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf")
// check that the actual defaults are appended to the commented out
// localhost resolver (which should be preserved)
// NOTE: if we ever change the defaults from google dns, this will break
expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n"
if actual != expected {
c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual)
}
}
func (s *DockerSuite) TestRunDNSOptions(c *check.C) {
// Not applicable on Windows as Windows does not support --dns*, or
// the Unix-specific functionality of resolv.conf.
testRequires(c, DaemonIsLinux)
out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf")
// The client will get a warning on stderr when setting DNS to a localhost address; verify this:
if !strings.Contains(stderr, "Localhost DNS setting") {
c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr)
}
actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1)
if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" {
c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual)
}
out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf")
actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1)
if actual != "nameserver 127.0.0.1 options ndots:3" {
c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual)
}
}
func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf")
actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1)
if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" {
c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual)
}
}
func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) {
// Not applicable on Windows as testing Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
if os.IsNotExist(err) {
c.Fatalf("/etc/resolv.conf does not exist")
}
hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP)
hostSearch := resolvconf.GetSearchDomains(origResolvConf)
var out string
out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf")
if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" {
c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0]))
}
actualSearch := resolvconf.GetSearchDomains([]byte(out))
if len(actualSearch) != len(hostSearch) {
c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
}
for i := range actualSearch {
if actualSearch[i] != hostSearch[i] {
c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
}
}
out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP)
if len(actualNameservers) != len(hostNameservers) {
c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers))
}
for i := range actualNameservers {
if actualNameservers[i] != hostNameservers[i] {
c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i])
}
}
if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" {
c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0]))
}
// test with file
tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1")
if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
c.Fatal(err)
}
// put the old resolvconf back
defer func() {
if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil {
c.Fatal(err)
}
}()
resolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
if os.IsNotExist(err) {
c.Fatalf("/etc/resolv.conf does not exist")
}
hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP)
hostSearch = resolvconf.GetSearchDomains(resolvConf)
out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf")
if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 {
c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers)
}
actualSearch = resolvconf.GetSearchDomains([]byte(out))
if len(actualSearch) != len(hostSearch) {
c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
}
for i := range actualSearch {
if actualSearch[i] != hostSearch[i] {
c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
}
}
}
// Test to see if a non-root user can resolve a DNS name. Also
// check if the container resolv.conf file has at least 0644 perm.
func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) {
// Not applicable on Windows as Windows does not support --user
testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm)
dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org")
cID, err := getIDByName("testperm")
if err != nil {
c.Fatal(err)
}
fmode := (os.FileMode)(0644)
finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf"))
if err != nil {
c.Fatal(err)
}
if (finfo.Mode() & fmode) != fmode {
c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String())
}
}
// Test if container resolv.conf gets updated the next time it restarts
// if host /etc/resolv.conf has changed. This only applies if the container
// uses the host's /etc/resolv.conf and does not have any dns options provided.
func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) {
// Not applicable on Windows as testing unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
c.Skip("Unstable test, to be re-activated once #19937 is resolved")
tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n")
tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1")
//take a copy of resolv.conf for restoring after test completes
resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
c.Fatal(err)
}
// This test case is meant to test monitoring resolv.conf when it is
// a regular file not a bind mounc. So we unmount resolv.conf and replace
// it with a file containing the original settings.
cmd := exec.Command("umount", "/etc/resolv.conf")
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
//cleanup
defer func() {
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
}()
//1. test that a restarting container gets an updated resolv.conf
dockerCmd(c, "run", "--name=first", "busybox", "true")
containerID1, err := getIDByName("first")
if err != nil {
c.Fatal(err)
}
// replace resolv.conf with our temporary copy
bytesResolvConf := []byte(tmpResolvConf)
if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
// start the container again to pickup changes
dockerCmd(c, "start", "first")
// check for update in container
containerResolv, err := readContainerFile(containerID1, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
}
/* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
} */
//2. test that a restarting container does not receive resolv.conf updates
// if it modified the container copy of the starting point resolv.conf
dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf")
containerID2, err := getIDByName("second")
if err != nil {
c.Fatal(err)
}
//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
// start the container again
dockerCmd(c, "start", "second")
// check for update in container
containerResolv, err = readContainerFile(containerID2, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if bytes.Equal(containerResolv, resolvConfSystem) {
c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv))
}
//3. test that a running container's resolv.conf is not modified while running
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
runningContainerID := strings.TrimSpace(out)
// replace resolv.conf
if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
// check for update in container
containerResolv, err = readContainerFile(runningContainerID, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
}
//4. test that a running container's resolv.conf is updated upon restart
// (the above container is still running..)
dockerCmd(c, "restart", runningContainerID)
// check for update in container
containerResolv, err = readContainerFile(runningContainerID, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv))
}
//5. test that additions of a localhost resolver are cleaned from
// host resolv.conf before updating container's resolv.conf copies
// replace resolv.conf with a localhost-only nameserver copy
bytesResolvConf = []byte(tmpLocalhostResolvConf)
if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
// start the container again to pickup changes
dockerCmd(c, "start", "first")
// our first exited container ID should have been updated, but with default DNS
// after the cleanup of resolv.conf found only a localhost nameserver:
containerResolv, err = readContainerFile(containerID1, "resolv.conf")
if err != nil {
c.Fatal(err)
}
expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n"
if !bytes.Equal(containerResolv, []byte(expected)) {
c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv))
}
//6. Test that replacing (as opposed to modifying) resolv.conf triggers an update
// of containers' resolv.conf.
// Restore the original resolv.conf
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
// Run the container so it picks up the old settings
dockerCmd(c, "run", "--name=third", "busybox", "true")
containerID3, err := getIDByName("third")
if err != nil {
c.Fatal(err)
}
// Create a modified resolv.conf.aside and override resolv.conf with it
bytesResolvConf = []byte(tmpResolvConf)
if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf")
if err != nil {
c.Fatal(err)
}
// start the container again to pickup changes
dockerCmd(c, "start", "third")
// check for update in container
containerResolv, err = readContainerFile(containerID3, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
}
//cleanup, restore original resolv.conf happens in defer func()
}
func (s *DockerSuite) TestRunAddHost(c *check.C) {
// Not applicable on Windows as it does not support --add-host
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts")
actual := strings.Trim(out, "\r\n")
if actual != "86.75.30.9\textra" {
c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual)
}
}
// Regression test for #6983
func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) {
_, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true")
if exitCode != 0 {
c.Fatalf("Container should have exited with error code 0")
}
}
// Regression test for #6983
func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) {
_, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true")
if exitCode != 0 {
c.Fatalf("Container should have exited with error code 0")
}
}
// Regression test for #6983
func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) {
_, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true")
if exitCode != 0 {
c.Fatalf("Container should have exited with error code 0")
}
}
// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode
// but using --attach instead of -a to make sure we read the flag correctly
func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true")
_, stderr, _, err := runCommandWithStdoutStderr(cmd)
if err == nil {
c.Fatal("Container should have exited with error code different than 0")
} else if !strings.Contains(stderr, "Conflicting options: -a and -d") {
c.Fatal("Should have been returned an error with conflicting options -a and -d")
}
}
func (s *DockerSuite) TestRunState(c *check.C) {
// TODO Windows: This needs some rework as Windows busybox does not support top
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSpace(out)
state := inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1 := inspectField(c, id, "State.Pid")
if pid1 == "0" {
c.Fatal("Container state Pid 0")
}
dockerCmd(c, "stop", id)
state = inspectField(c, id, "State.Running")
if state != "false" {
c.Fatal("Container state is 'running'")
}
pid2 := inspectField(c, id, "State.Pid")
if pid2 == pid1 {
c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
}
dockerCmd(c, "start", id)
state = inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid3 := inspectField(c, id, "State.Pid")
if pid3 == pid1 {
c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
}
}
// Test for #1737
func (s *DockerSuite) TestRunCopyVolumeUidGid(c *check.C) {
// Not applicable on Windows as it does not support uid or gid in this way
testRequires(c, DaemonIsLinux)
name := "testrunvolumesuidgid"
_, err := buildImage(name,
`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`,
true)
if err != nil {
c.Fatal(err)
}
// Test that the uid and gid is copied from the image to the volume
out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'")
out = strings.TrimSpace(out)
if out != "dockerio:dockerio" {
c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out)
}
}
// Test for #1582
func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) {
// TODO Windows, post TP5. Windows does not yet support volume functionality
// that copies from the image to the volume.
testRequires(c, DaemonIsLinux)
name := "testruncopyvolumecontent"
_, err := buildImage(name,
`FROM busybox
RUN mkdir -p /hello/local && echo hello > /hello/local/world`,
true)
if err != nil {
c.Fatal(err)
}
// Test that the content is copied from the image to the volume
out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello")
if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) {
c.Fatal("Container failed to transfer content to volume")
}
}
func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) {
name := "testrunmdcleanuponentrypoint"
if _, err := buildImage(name,
`FROM busybox
ENTRYPOINT ["echo"]
CMD ["testingpoint"]`,
true); err != nil {
c.Fatal(err)
}
out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name)
if exit != 0 {
c.Fatalf("expected exit code 0 received %d, out: %q", exit, out)
}
out = strings.TrimSpace(out)
expected := "root"
if daemonPlatform == "windows" {
expected = `user manager\containeradministrator`
}
if out != expected {
c.Fatalf("Expected output %s, got %q", expected, out)
}
}
// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) {
existingFile := "/bin/cat"
expected := "not a directory"
if daemonPlatform == "windows" {
existingFile = `\windows\system32\ntdll.dll`
expected = `Cannot mkdir: \windows\system32\ntdll.dll is not a directory.`
}
out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox")
if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) {
c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode)
}
}
func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) {
name := "testrunexitonstdinclose"
meow := "/bin/cat"
delay := 60
if daemonPlatform == "windows" {
meow = "cat"
}
runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow)
stdin, err := runCmd.StdinPipe()
if err != nil {
c.Fatal(err)
}
stdout, err := runCmd.StdoutPipe()
if err != nil {
c.Fatal(err)
}
if err := runCmd.Start(); err != nil {
c.Fatal(err)
}
if _, err := stdin.Write([]byte("hello\n")); err != nil {
c.Fatal(err)
}
r := bufio.NewReader(stdout)
line, err := r.ReadString('\n')
if err != nil {
c.Fatal(err)
}
line = strings.TrimSpace(line)
if line != "hello" {
c.Fatalf("Output should be 'hello', got '%q'", line)
}
if err := stdin.Close(); err != nil {
c.Fatal(err)
}
finish := make(chan error)
go func() {
finish <- runCmd.Wait()
close(finish)
}()
select {
case err := <-finish:
c.Assert(err, check.IsNil)
case <-time.After(time.Duration(delay) * time.Second):
c.Fatal("docker run failed to exit on stdin close")
}
state := inspectField(c, name, "State.Running")
if state != "false" {
c.Fatal("Container must be stopped after stdin closing")
}
}
// Test run -i --restart xxx doesn't hang
func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) {
name := "test-inter-restart"
runCmd := exec.Command(dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh")
stdin, err := runCmd.StdinPipe()
c.Assert(err, checker.IsNil)
err = runCmd.Start()
c.Assert(err, checker.IsNil)
c.Assert(waitRun(name), check.IsNil)
_, err = stdin.Write([]byte("exit 11\n"))
c.Assert(err, checker.IsNil)
finish := make(chan error)
go func() {
finish <- runCmd.Wait()
close(finish)
}()
delay := 10 * time.Second
select {
case <-finish:
case <-time.After(delay):
c.Fatal("run -i --restart hangs")
}
c.Assert(waitRun(name), check.IsNil)
dockerCmd(c, "stop", name)
}
// Test for #2267
func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) {
// Cannot run on Windows as Windows does not support diff.
testRequires(c, DaemonIsLinux)
name := "writehosts"
out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts")
if !strings.Contains(out, "test2267") {
c.Fatal("/etc/hosts should contain 'test2267'")
}
out, _ = dockerCmd(c, "diff", name)
if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
c.Fatal("diff should be empty")
}
}
func
|
(out string, c *check.C) bool {
name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32)
dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello")
cID, err := getIDByName(name)
c.Assert(err, check.IsNil)
baseDiff, _ := dockerCmd(c, "diff", cID)
baseArr := strings.Split(baseDiff, "\n")
sort.Strings(baseArr)
outArr := strings.Split(out, "\n")
sort.Strings(outArr)
return sliceEq(baseArr, outArr)
}
func sliceEq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// Test for #2267
func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) {
// Cannot run on Windows as Windows does not support diff.
testRequires(c, DaemonIsLinux)
name := "writehostname"
out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname")
if !strings.Contains(out, "test2267") {
c.Fatal("/etc/hostname should contain 'test2267'")
}
out, _ = dockerCmd(c, "diff", name)
if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
c.Fatal("diff should be empty")
}
}
// Test for #2267
func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) {
// Cannot run on Windows as Windows does not support diff.
testRequires(c, DaemonIsLinux)
name := "writeresolv"
out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf")
if !strings.Contains(out, "test2267") {
c.Fatal("/etc/resolv.conf should contain 'test2267'")
}
out, _ = dockerCmd(c, "diff", name)
if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
c.Fatal("diff should be empty")
}
}
func (s *DockerSuite) TestRunWithBadDevice(c *check.C) {
// Cannot run on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux)
name := "baddevice"
out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true")
if err == nil {
c.Fatal("Run should fail with bad device")
}
expected := `"/etc": not a device node`
if !strings.Contains(out, expected) {
c.Fatalf("Output should contain %q, actual out: %q", expected, out)
}
}
func (s *DockerSuite) TestRunEntrypoint(c *check.C) {
name := "entrypoint"
out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar")
expected := "foobar"
if out != expected {
c.Fatalf("Output should be %q, actual out: %q", expected, out)
}
}
func (s *DockerSuite) TestRunBindMounts(c *check.C) {
testRequires(c, SameHostDaemon)
if daemonPlatform == "linux" {
testRequires(c, DaemonIsLinux, NotUserNamespace)
}
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
tmpDir, err := ioutil.TempDir("", "docker-test-container")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
writeFile(path.Join(tmpDir, "touch-me"), "", c)
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform != "windows" || windowsDaemonKV >= 14350 {
// Test reading from a read-only bind mount
out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp")
if !strings.Contains(out, "touch-me") {
c.Fatal("Container failed to read from bind mount")
}
}
// test writing to bind mount
if daemonPlatform == "windows" {
dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla")
} else {
dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla")
}
readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist
// test mounting to an illegal destination directory
_, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".")
if err == nil {
c.Fatal("Container bind mounted illegal directory")
}
// Windows does not (and likely never will) support mounting a single file
if daemonPlatform != "windows" {
// test mount a file
dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla")
content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist
expected := "yotta"
if content != expected {
c.Fatalf("Output should be %q, actual out: %q", expected, content)
}
}
}
// Ensure that CIDFile gets deleted if it's empty
// Perform this test by making `docker run` fail
func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) {
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpCidFile := path.Join(tmpDir, "cid")
image := "emptyfs"
if daemonPlatform == "windows" {
// Windows can't support an emptyfs image. Just use the regular Windows image
image = WindowsBaseImage
}
out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image)
if err == nil {
c.Fatalf("Run without command must fail. out=%s", out)
} else if !strings.Contains(out, "No command specified") {
c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err)
}
if _, err := os.Stat(tmpCidFile); err == nil {
c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile)
}
}
// #2098 - Docker cidFiles only contain short version of the containerId
//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test"
// TestRunCidFile tests that run --cidfile returns the longid
func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) {
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
if err != nil {
c.Fatal(err)
}
tmpCidFile := path.Join(tmpDir, "cid")
defer os.RemoveAll(tmpDir)
out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true")
id := strings.TrimSpace(out)
buffer, err := ioutil.ReadFile(tmpCidFile)
if err != nil {
c.Fatal(err)
}
cid := string(buffer)
if len(cid) != 64 {
c.Fatalf("--cidfile should be a long id, not %q", id)
}
if cid != id {
c.Fatalf("cid must be equal to %s, got %s", id, cid)
}
}
func (s *DockerSuite) TestRunSetMacAddress(c *check.C) {
mac := "12:34:56:78:9a:bc"
var out string
if daemonPlatform == "windows" {
out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'")
mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs
} else {
out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'")
}
actualMac := strings.TrimSpace(out)
if actualMac != mac {
c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac)
}
}
func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) {
// TODO Windows. Network settings are not propagated back to inspect.
testRequires(c, DaemonIsLinux)
mac := "12:34:56:78:9a:bc"
out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top")
id := strings.TrimSpace(out)
inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress")
if inspectedMac != mac {
c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac)
}
}
// test docker run use an invalid mac address
func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) {
out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox")
//use an invalid mac address should with an error out
if err == nil || !strings.Contains(out, "is not a valid mac address") {
c.Fatalf("run with an invalid --mac-address should with error out")
}
}
func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) {
// TODO Windows. Network settings are not propagated back to inspect.
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top")
id := strings.TrimSpace(out)
ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress")
iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip),
"!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT")
out, _, err := runCommandWithOutput(iptCmd)
if err != nil {
c.Fatal(err, out)
}
if err := deleteContainer(id); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top")
}
func (s *DockerSuite) TestRunPortInUse(c *check.C) {
// TODO Windows. The duplicate NAT message returned by Windows will be
// changing as is currently completely undecipherable. Does need modifying
// to run sh rather than top though as top isn't in Windows busybox.
testRequires(c, SameHostDaemon, DaemonIsLinux)
port := "1234"
dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top")
out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top")
if err == nil {
c.Fatalf("Binding on used port must fail")
}
if !strings.Contains(out, "port is already allocated") {
c.Fatalf("Out must be about \"port is already allocated\", got %s", out)
}
}
// https://github.com/docker/docker/issues/12148
func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) {
// TODO Windows. -P is not yet supported
testRequires(c, DaemonIsLinux)
// allocate a dynamic port to get the most recent
out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top")
id := strings.TrimSpace(out)
out, _ = dockerCmd(c, "port", id, "80")
strPort := strings.Split(strings.TrimSpace(out), ":")[1]
port, err := strconv.ParseInt(strPort, 10, 64)
if err != nil {
c.Fatalf("invalid port, got: %s, error: %s", strPort, err)
}
// allocate a static port and a dynamic port together, with static port
// takes the next recent port in dynamic port range.
dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top")
}
// Regression test for #7792
func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
// TODO Windows: Post TP5. Updated, but Windows does not support nested mounts currently.
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir2)
// Create a temporary tmpfs mounc.
fooDir := filepath.Join(tmpDir, "foo")
if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil {
c.Fatalf("failed to mkdir at %s - %s", fooDir, err)
}
if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run",
"-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir),
"-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir),
"-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2),
"-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir),
"busybox:latest", "sh", "-c",
"ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me")
}
// Regression test for https://github.com/docker/docker/issues/8259
func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) {
// Not applicable on Windows as Windows does not support volumes
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
linkPath := os.TempDir() + "/testlink2"
if err := os.Symlink(tmpDir, linkPath); err != nil {
c.Fatal(err)
}
defer os.RemoveAll(linkPath)
// Create first container
dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test")
// Create second container with same symlinked path
// This will fail if the referenced issue is hit with a "Volume exists" error
dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test")
}
//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container
func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) {
// While Windows supports volumes, it does not support --add-host hence
// this test is not applicable on Windows.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf")
if !strings.Contains(out, "nameserver 127.0.0.1") {
c.Fatal("/etc volume mount hides /etc/resolv.conf")
}
out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname")
if !strings.Contains(out, "test123") {
c.Fatal("/etc volume mount hides /etc/hostname")
}
out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts")
out = strings.Replace(out, "\n", " ", -1)
if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") {
c.Fatal("/etc volume mount hides /etc/hosts")
}
}
func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
// TODO Windows (Post TP5). Windows does not support volumes which
// are pre-populated such as is built in the dockerfile used in this test.
testRequires(c, DaemonIsLinux)
if _, err := buildImage("dataimage",
`FROM busybox
RUN mkdir -p /foo
RUN touch /foo/bar`,
true); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "--name", "test", "-v", "/foo", "busybox")
if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") {
c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out)
}
tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform)
if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") {
c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out)
}
}
func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
// just run with unknown image
cmd := exec.Command(dockerBinary, "run", "asdfsg")
stdout := bytes.NewBuffer(nil)
cmd.Stdout = stdout
if err := cmd.Run(); err == nil {
c.Fatal("Run with unknown image should fail")
}
if stdout.Len() != 0 {
c.Fatalf("Stdout contains output from pull: %s", stdout)
}
}
func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
testRequires(c, SameHostDaemon)
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
if _, err := buildImage("run_volumes_clean_paths",
`FROM busybox
VOLUME `+prefix+`/foo/`,
true); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths")
out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash)
if err != errMountNotFound {
c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out)
}
out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`)
c.Assert(err, check.IsNil)
if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) {
c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out)
}
out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash)
if err != errMountNotFound {
c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out)
}
out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar")
c.Assert(err, check.IsNil)
if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) {
c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out)
}
}
// Regression test for #3631
func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) {
// TODO Windows: This should be able to run on Windows if can find an
// alternate to /dev/zero and /dev/stdout.
testRequires(c, DaemonIsLinux)
cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv")
stdout, err := cont.StdoutPipe()
if err != nil {
c.Fatal(err)
}
if err := cont.Start(); err != nil {
c.Fatal(err)
}
n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil)
if err != nil {
c.Fatal(err)
}
expected := 2 * 1024 * 2000
if n != expected {
c.Fatalf("Expected %d, got %d", expected, n)
}
}
func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) {
// TODO Windows: -P is not currently supported. Also network
// settings are not propagated back.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top")
id := strings.TrimSpace(out)
portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports")
var ports nat.PortMap
if err := unmarshalJSON([]byte(portstr), &ports); err != nil {
c.Fatal(err)
}
for port, binding := range ports {
portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0])
if portnum < 3000 || portnum > 3003 {
c.Fatalf("Port %d is out of range ", portnum)
}
if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 {
c.Fatalf("Port is not mapped for the port %s", port)
}
}
}
func (s *DockerSuite) TestRunExposePort(c *check.C) {
out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox")
c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out"))
c.Assert(out, checker.Contains, "invalid range format for --expose")
}
func (s *DockerSuite) TestRunUnknownCommand(c *check.C) {
out, _, _ := dockerCmdWithStdoutStderr(c, "create", "busybox", "/bin/nada")
cID := strings.TrimSpace(out)
_, _, err := dockerCmdWithError("start", cID)
// Windows and Linux are different here by architectural design. Linux will
// fail to start the container, so an error is expected. Windows will
// successfully start the container, and once started attempt to execute
// the command which will fail.
if daemonPlatform == "windows" {
// Wait for it to exit.
waitExited(cID, 30*time.Second)
c.Assert(err, check.IsNil)
} else {
c.Assert(err, check.NotNil)
}
rc := inspectField(c, cID, "State.ExitCode")
if rc == "0" {
c.Fatalf("ExitCode(%v) cannot be 0", rc)
}
}
func (s *DockerSuite) TestRunModeIpcHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostIpc, err := os.Readlink("/proc/1/ns/ipc")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc")
out = strings.Trim(out, "\n")
if hostIpc != out {
c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc")
out = strings.Trim(out, "\n")
if hostIpc == out {
c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out)
}
}
func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top")
id := strings.TrimSpace(out)
state := inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1 := inspectField(c, id, "State.Pid")
parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1))
if err != nil {
c.Fatal(err)
}
out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc")
out = strings.Trim(out, "\n")
if parentContainerIpc != out {
c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out)
}
catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test")
if catOutput != "test" {
c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput)
}
// check that /dev/mqueue is actually of mqueue type
grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts")
if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") {
c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput)
}
lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue")
lsOutput = strings.Trim(lsOutput, "\n")
if lsOutput != "toto" {
c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput)
}
}
func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top")
if !strings.Contains(out, "abcd1234") || err == nil {
c.Fatalf("run IPC from a non exists container should with correct error out")
}
}
func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "create", "busybox")
id := strings.TrimSpace(out)
out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox")
if err == nil {
c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err)
}
}
func (s *DockerSuite) TestRunModePidContainer(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top")
id := strings.TrimSpace(out)
state := inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1 := inspectField(c, id, "State.Pid")
parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1))
if err != nil {
c.Fatal(err)
}
out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid")
out = strings.Trim(out, "\n")
if parentContainerPid != out {
c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out)
}
}
func (s *DockerSuite) TestRunModePidContainerNotExists(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top")
if !strings.Contains(out, "abcd1234") || err == nil {
c.Fatalf("run PID from a non exists container should with correct error out")
}
}
func (s *DockerSuite) TestRunModePidContainerNotRunning(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "create", "busybox")
id := strings.TrimSpace(out)
out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox")
if err == nil {
c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err)
}
}
func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top")
defer os.Remove("/dev/mqueue/toto")
defer os.Remove("/dev/shm/test")
volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm")
c.Assert(err, checker.IsNil)
if volPath != "/dev/shm" {
c.Fatalf("volumePath should have been /dev/shm, was %s", volPath)
}
out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test")
if out != "test" {
c.Fatalf("Output of /dev/shm/test expected test but found: %s", out)
}
// Check that the mq was created
if _, err := os.Stat("/dev/mqueue/toto"); err != nil {
c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error())
}
}
func (s *DockerSuite) TestContainerNetworkMode(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSpace(out)
c.Assert(waitRun(id), check.IsNil)
pid1 := inspectField(c, id, "State.Pid")
parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1))
if err != nil {
c.Fatal(err)
}
out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if parentContainerNet != out {
c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out)
}
}
func (s *DockerSuite) TestRunModePidHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostPid, err := os.Readlink("/proc/1/ns/pid")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid")
out = strings.Trim(out, "\n")
if hostPid != out {
c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid")
out = strings.Trim(out, "\n")
if hostPid == out {
c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out)
}
}
func (s *DockerSuite) TestRunModeUTSHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
hostUTS, err := os.Readlink("/proc/1/ns/uts")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts")
out = strings.Trim(out, "\n")
if hostUTS != out {
c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts")
out = strings.Trim(out, "\n")
if hostUTS == out {
c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out)
}
out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps")
c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error())
}
func (s *DockerSuite) TestRunTLSverify(c *check.C) {
// Remote daemons use TLS and this test is not applicable when TLS is required.
testRequires(c, SameHostDaemon)
if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 {
c.Fatalf("Should have worked: %v:\n%v", err, out)
}
// Regardless of whether we specify true or false we need to
// test to make sure tls is turned on if --tlsverify is specified at all
out, code, err := dockerCmdWithError("--tlsverify=false", "ps")
if err == nil || code == 0 || !strings.Contains(out, "trying to connect") {
c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err)
}
out, code, err = dockerCmdWithError("--tlsverify=true", "ps")
if err == nil || code == 0 || !strings.Contains(out, "cert") {
c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err)
}
}
func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) {
// TODO Windows. Once moved to libnetwork/CNM, this may be able to be
// re-instated.
testRequires(c, DaemonIsLinux)
// first find allocator current position
out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top")
id := strings.TrimSpace(out)
out, _ = dockerCmd(c, "port", id)
out = strings.TrimSpace(out)
if out == "" {
c.Fatal("docker port command output is empty")
}
out = strings.Split(out, ":")[1]
lastPort, err := strconv.Atoi(out)
if err != nil {
c.Fatal(err)
}
port := lastPort + 1
l, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
c.Fatal(err)
}
defer l.Close()
out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top")
id = strings.TrimSpace(out)
dockerCmd(c, "port", id)
}
func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) {
errChan := make(chan error)
go func() {
defer close(errChan)
cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true")
if _, err := cmd.StdinPipe(); err != nil {
errChan <- err
return
}
expected := "the input device is not a TTY"
if runtime.GOOS == "windows" {
expected += ". If you are using mintty, try prefixing the command with 'winpty'"
}
if out, _, err := runCommandWithOutput(cmd); err == nil {
errChan <- fmt.Errorf("run should have failed")
return
} else if !strings.Contains(out, expected) {
errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected)
return
}
}()
select {
case err := <-errChan:
c.Assert(err, check.IsNil)
case <-time.After(30 * time.Second):
c.Fatal("container is running but should have failed")
}
}
func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) {
addr := "00:16:3E:08:00:50"
args := []string{"run", "--mac-address", addr}
expected := addr
if daemonPlatform != "windows" {
args = append(args, "busybox", "ifconfig")
} else {
args = append(args, WindowsBaseImage, "ipconfig", "/all")
expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1)
}
if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) {
c.Fatalf("Output should have contained %q: %s", expected, out)
}
}
func (s *DockerSuite) TestRunNetHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostNet, err := os.Readlink("/proc/1/ns/net")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if hostNet != out {
c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if hostNet == out {
c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out)
}
}
func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) {
// TODO Windows. As Windows networking evolves and converges towards
// CNM, this test may be possible to enable on Windows.
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true")
dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true")
}
func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostNet, err := os.Readlink("/proc/1/ns/net")
if err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top")
out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if hostNet != out {
c.Fatalf("Container should have host network namespace")
}
}
func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) {
// TODO Windows. This may be possible to enable in the future. However,
// Windows does not currently support --expose, or populate the network
// settings seen through inspect.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top")
id := strings.TrimSpace(out)
portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports")
var ports nat.PortMap
err := unmarshalJSON([]byte(portstr), &ports)
c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr))
for port, binding := range ports {
portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0])
if portnum < 3000 || portnum > 3003 {
c.Fatalf("Port %d is out of range ", portnum)
}
if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 {
c.Fatal("Port is not mapped for the port "+port, out)
}
}
}
func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) {
runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy")
out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name")
if out != "no" {
c.Fatalf("Set default restart policy failed")
}
}
func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false")
timeout := 10 * time.Second
if daemonPlatform == "windows" {
timeout = 120 * time.Second
}
id := strings.TrimSpace(string(out))
if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil {
c.Fatal(err)
}
count := inspectField(c, id, "RestartCount")
if count != "3" {
c.Fatalf("Container was restarted %s times, expected %d", count, 3)
}
MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount")
if MaximumRetryCount != "3" {
c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
}
}
func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) {
dockerCmd(c, "run", "--rm", "busybox", "touch", "/file")
}
func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) {
// Not applicable on Windows which does not support --read-only
testRequires(c, DaemonIsLinux)
testReadOnlyFile(c, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me")
}
func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) {
// Not applicable on Windows due to use of Unix specific functionality, plus
// the use of --read-only which is not supported.
// --read-only + userns has remount issues
testRequires(c, DaemonIsLinux, NotUserNamespace)
// Ensure we have not broken writing /dev/pts
out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount")
if status != 0 {
c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.")
}
expected := "type devpts (rw,"
if !strings.Contains(string(out), expected) {
c.Fatalf("expected output to contain %s but contains %s", expected, out)
}
}
func testReadOnlyFile(c *check.C, filenames ...string) {
// Not applicable on Windows which does not support --read-only
testRequires(c, DaemonIsLinux, NotUserNamespace)
touch := "touch " + strings.Join(filenames, " ")
out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch)
c.Assert(err, checker.NotNil)
for _, f := range filenames {
expected := "touch: " + f + ": Read-only file system"
c.Assert(out, checker.Contains, expected)
}
out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch)
c.Assert(err, checker.NotNil)
for _, f := range filenames {
expected := "touch: " + f + ": Read-only file system"
c.Assert(out, checker.Contains, expected)
}
}
func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) {
// Not applicable on Windows which does not support --link
// --read-only + userns has remount issues
testRequires(c, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top")
out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts")
if !strings.Contains(string(out), "testlinked") {
c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled")
}
}
func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) {
// Not applicable on Windows which does not support either --read-only or --dns.
// --read-only + userns has remount issues
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf")
if !strings.Contains(string(out), "1.1.1.1") {
c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used")
}
}
func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) {
// Not applicable on Windows which does not support --read-only
// --read-only + userns has remount issues
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts")
if !strings.Contains(string(out), "testreadonly") {
c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used")
}
}
func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo")
runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest")
// Remove the main volume container and restart the consuming container
dockerCmd(c, "rm", "-f", "voltest")
// This should not fail since the volumes-from were already applied
dockerCmd(c, "restart", "restarter")
}
// run container with --rm should remove container if exit code != 0
func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) {
name := "flowers"
out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists")
if err == nil {
c.Fatal("Expected docker run to fail", out, err)
}
out, err = getAllContainers()
if err != nil {
c.Fatal(out, err)
}
if out != "" {
c.Fatal("Expected not to have containers", out)
}
}
func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) {
name := "sparkles"
out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound")
if err == nil {
c.Fatal("Expected docker run to fail", out, err)
}
out, err = getAllContainers()
if err != nil {
c.Fatal(out, err)
}
if out != "" {
c.Fatal("Expected not to have containers", out)
}
}
func (s *DockerSuite) TestRunPidHostWithChildIsKillable(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux, NotUserNamespace)
name := "ibuildthecloud"
dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi")
c.Assert(waitRun(name), check.IsNil)
errchan := make(chan error)
go func() {
if out, _, err := dockerCmdWithError("kill", name); err != nil {
errchan <- fmt.Errorf("%v:\n%s", err, out)
}
close(errchan)
}()
select {
case err := <-errchan:
c.Assert(err, check.IsNil)
case <-time.After(5 * time.Second):
c.Fatal("Kill container timed out")
}
}
func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) {
// TODO Windows. This may be possible to enable once Windows supports
// memory limits on containers
testRequires(c, DaemonIsLinux)
// this memory limit is 1 byte less than the min, which is 4MB
// https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22
out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox")
if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") {
c.Fatalf("expected run to fail when using too low a memory limit: %q", out)
}
}
func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
_, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version")
if err == nil || code == 0 {
c.Fatal("standard container should not be able to write to /proc/asound")
}
}
func (s *DockerSuite) TestRunReadProcTimer(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats")
if code != 0 {
return
}
if err != nil {
c.Fatal(err)
}
if strings.Trim(out, "\n ") != "" {
c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out)
}
}
func (s *DockerSuite) TestRunReadProcLatency(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
// some kernels don't have this configured so skip the test if this file is not found
// on the host running the tests.
if _, err := os.Stat("/proc/latency_stats"); err != nil {
c.Skip("kernel doesn't have latency_stats configured")
return
}
out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats")
if code != 0 {
return
}
if err != nil {
c.Fatal(err)
}
if strings.Trim(out, "\n ") != "" {
c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out)
}
}
func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace)
testReadPaths := []string{
"/proc/latency_stats",
"/proc/timer_stats",
"/proc/kcore",
}
for i, filePath := range testReadPaths {
name := fmt.Sprintf("procsieve-%d", i)
shellCmd := fmt.Sprintf("exec 3<%s", filePath)
out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
if exitCode != 0 {
return
}
if err != nil {
c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err)
}
}
}
func (s *DockerSuite) TestMountIntoProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
_, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true")
if err == nil || code == 0 {
c.Fatal("container should not be able to mount into /proc")
}
}
func (s *DockerSuite) TestMountIntoSys(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
testRequires(c, NotUserNamespace)
dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true")
}
func (s *DockerSuite) TestRunUnshareProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace)
// In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run.
errChan := make(chan error)
go func() {
name := "acidburn"
out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount")
if err == nil ||
!(strings.Contains(strings.ToLower(out), "permission denied") ||
strings.Contains(strings.ToLower(out), "operation not permitted")) {
errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err)
} else {
errChan <- nil
}
}()
go func() {
name := "cereal"
out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc")
if err == nil ||
!(strings.Contains(strings.ToLower(out), "mount: cannot mount none") ||
strings.Contains(strings.ToLower(out), "permission denied") ||
strings.Contains(strings.ToLower(out), "operation not permitted")) {
errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err)
} else {
errChan <- nil
}
}()
/* Ensure still fails if running privileged with the default policy */
go func() {
name := "crashoverride"
out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc")
if err == nil ||
!(strings.Contains(strings.ToLower(out), "mount: cannot mount none") ||
strings.Contains(strings.ToLower(out), "permission denied") ||
strings.Contains(strings.ToLower(out), "operation not permitted")) {
errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err)
} else {
errChan <- nil
}
}()
for i := 0; i < 3; i++ {
err := <-errChan
if err != nil {
c.Fatal(err)
}
}
}
func (s *DockerSuite) TestRunPublishPort(c *check.C) {
// TODO Windows: This may be possible once Windows moves to libnetwork and CNM
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top")
out, _ := dockerCmd(c, "port", "test")
out = strings.Trim(out, "\r\n")
if out != "" {
c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out)
}
}
// Issue #10184.
func (s *DockerSuite) TestDevicePermissions(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
const permissions = "crw-rw-rw-"
out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse")
if status != 0 {
c.Fatalf("expected status 0, got %d", status)
}
if !strings.HasPrefix(out, permissions) {
c.Fatalf("output should begin with %q, got %q", permissions, out)
}
}
func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
// https://github.com/docker/docker/pull/14498
func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) {
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true")
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform != "windows" || windowsDaemonKV >= 14350 {
dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true")
}
dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true")
if daemonPlatform != "windows" {
mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test")
c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point"))
if mRO.RW {
c.Fatalf("Expected RO volume was RW")
}
}
mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test")
c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point"))
if !mRW.RW {
c.Fatalf("Expected RW volume was RO")
}
}
func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace)
testWritePaths := []string{
/* modprobe and core_pattern should both be denied by generic
* policy of denials for /proc/sys/kernel. These files have been
* picked to be checked as they are particularly sensitive to writes */
"/proc/sys/kernel/modprobe",
"/proc/sys/kernel/core_pattern",
"/proc/sysrq-trigger",
"/proc/kcore",
}
for i, filePath := range testWritePaths {
name := fmt.Sprintf("writeprocsieve-%d", i)
shellCmd := fmt.Sprintf("exec 3>%s", filePath)
out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
if code != 0 {
return
}
if err != nil {
c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err)
}
}
}
func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
expected := "test123"
filename := createTmpFile(c, expected)
defer os.Remove(filename)
nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
for i := range nwfiles {
actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i])
if actual != expected {
c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual)
}
}
}
func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
filename := createTmpFile(c, "test123")
defer os.Remove(filename)
nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
for i := range nwfiles {
_, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i])
if err == nil || exitCode == 0 {
c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode)
}
}
}
func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
// --read-only + userns has remount issues
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
filename := createTmpFile(c, "test123")
defer os.Remove(filename)
nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
for i := range nwfiles {
_, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i])
if exitCode != 0 {
c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode)
}
}
for i := range nwfiles {
_, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i])
if err == nil || exitCode == 0 {
c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode)
}
}
}
func (s *DockerTrustSuite) TestTrustedRun(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
repoName := s.setupTrustedImage(c, "trusted-run")
// Try run
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running trusted run: %s\n%s\n", err, out)
}
if !strings.Contains(string(out), "Tagging") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
dockerCmd(c, "rmi", repoName)
// Try untrusted run to ensure we pushed the tag to the registry
runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName)
s.trustedCmd(runCmd)
out, _, err = runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running trusted run: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Status: Downloaded") {
c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out)
}
}
func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL)
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoName)
dockerCmd(c, "push", repoName)
dockerCmd(c, "rmi", repoName)
// Try trusted run on untrusted tag
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err == nil {
c.Fatalf("Error expected when running trusted run with:\n%s", out)
}
if !strings.Contains(string(out), "does not have trust data for") {
c.Fatalf("Missing expected output on trusted run:\n%s", out)
}
}
func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
c.Skip("Currently changes system time, causing instability")
repoName := s.setupTrustedImage(c, "trusted-run-expired")
// Certificates have 10 years of expiration
elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11)
runAtDifferentDate(elevenYearsFromNow, func() {
// Try run
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err == nil {
c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out)
}
if !strings.Contains(string(out), "could not validate the path to a trusted root") {
c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out)
}
})
runAtDifferentDate(elevenYearsFromNow, func() {
// Try run
runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Status: Downloaded") {
c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out)
}
})
}
func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL)
evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir")
if err != nil {
c.Fatalf("Failed to create local temp dir")
}
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoName)
pushCmd := exec.Command(dockerBinary, "push", repoName)
s.trustedCmd(pushCmd)
out, _, err := runCommandWithOutput(pushCmd)
if err != nil {
c.Fatalf("Error running trusted push: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Signing and pushing trust metadata") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
dockerCmd(c, "rmi", repoName)
// Try run
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err = runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running trusted run: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Tagging") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
dockerCmd(c, "rmi", repoName)
// Kill the notary server, start a new "evil" one.
s.not.Close()
s.not, err = newTestNotary(c)
if err != nil {
c.Fatalf("Restarting notary server failed.")
}
// In order to make an evil server, lets re-init a client (with a different trust dir) and push new data.
// tag an image and upload it to the private registry
dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName)
// Push up to the new server
pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName)
s.trustedCmd(pushCmd)
out, _, err = runCommandWithOutput(pushCmd)
if err != nil {
c.Fatalf("Error running trusted push: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Signing and pushing trust metadata") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
// Now, try running with the original client from this new trust server. This should fallback to our cached timestamp and metadata.
runCmd = exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err = runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error falling back to cached trust data: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Error while downloading remote metadata, using cached timestamp") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
}
func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux, SameHostDaemon)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSpace(out)
c.Assert(waitRun(id), check.IsNil)
pid1 := inspectField(c, id, "State.Pid")
_, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1))
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotGCCGO)
// Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace
// itself, but pid>1 should not be able to trace pid1.
_, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net")
if exitCode == 0 {
c.Fatal("ptrace was not successfully restricted by AppArmor")
}
}
func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor)
_, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net")
if exitCode != 0 {
c.Fatal("ptrace of self failed.")
}
}
func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace)
_, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo")
if exitCode == 0 {
// If our test failed, attempt to repair the host system...
_, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo")
if exitCode == 0 {
c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.")
}
}
}
func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$")
}
// run create container failed should clean up the container
func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) {
// TODO Windows. This may be possible to enable once link is supported
testRequires(c, DaemonIsLinux)
name := "unique_name"
_, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox")
c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!"))
containerID, err := inspectFieldWithError(name, "Id")
c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID))
c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID))
}
func (s *DockerSuite) TestRunNamedVolume(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar")
out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar")
c.Assert(strings.TrimSpace(out), check.Equals, "hello")
out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar")
c.Assert(strings.TrimSpace(out), check.Equals, "hello")
}
func (s *DockerSuite) TestRunWithUlimits(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n")
ul := strings.TrimSpace(out)
if ul != "42" {
c.Fatalf("expected `ulimit -n` to be 42, got %s", ul)
}
}
func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "test"
name := "cgroup-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "/cgroup-parent/test"
name := "cgroup-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /.
func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST"
cleanCgroupParent := "SHOULD_NOT_EXIST"
name := "cgroup-invalid-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
// XXX: This may include a daemon crash.
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
// We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue.
if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) {
c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!")
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cleanCgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /.
func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST"
cleanCgroupParent := "/SHOULD_NOT_EXIST"
name := "cgroup-absolute-invalid-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
// XXX: This may include a daemon crash.
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
// We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue.
if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) {
c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!")
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cleanCgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
// --read-only + userns has remount issues
testRequires(c, DaemonIsLinux, NotUserNamespace)
filename := "/sys/fs/cgroup/devices/test123"
out, _, err := dockerCmdWithError("run", "busybox", "touch", filename)
if err == nil {
c.Fatal("expected cgroup mount point to be read-only, touch file should fail")
}
expected := "Read-only file system"
if !strings.Contains(out, expected) {
c.Fatalf("expected output from failure to contain %s but contains %s", expected, out)
}
}
func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) {
// Not applicable on Windows which does not support --net=container
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true")
if err == nil || !strings.Contains(out, "cannot join own network") {
c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out)
}
}
func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) {
// Not applicable on Windows which does not support --net=container
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top")
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) {
c.Fatalf("run --net=container with --dns should error out")
}
out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) {
c.Fatalf("run --net=container with --mac-address should error out")
}
out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) {
c.Fatalf("run --net=container with --add-host should error out")
}
}
func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) {
// Not applicable on Windows which does not support --net=container
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top")
out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) {
c.Fatalf("run --net=container with -p should error out")
}
out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) {
c.Fatalf("run --net=container with -P should error out")
}
out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) {
c.Fatalf("run --net=container with --expose should error out")
}
}
func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) {
// Not applicable on Windows which does not support --net=container or --link
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top")
dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top")
dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top")
dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top")
dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top")
}
func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) {
// TODO Windows: This may be possible to convert.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up")
var (
count = 0
parts = strings.Split(out, "\n")
)
for _, l := range parts {
if l != "" {
count++
}
}
if count != 1 {
c.Fatalf("Wrong interface count in container %d", count)
}
if !strings.HasPrefix(out, "1: lo") {
c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out)
}
}
// Issue #4681
func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) {
if daemonPlatform == "windows" {
dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1")
} else {
dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1")
}
}
func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
// Windows does not support --net=container
testRequires(c, DaemonIsLinux, ExecSupport)
dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top")
out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname")
out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname")
if out1 != out {
c.Fatal("containers with shared net namespace should have same hostname")
}
}
func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) {
// TODO Windows: Network settings are not currently propagated. This may
// be resolved in the future with the move to libnetwork and CNM.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top")
id := strings.TrimSpace(out)
res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress")
if res != "" {
c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res)
}
}
func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) {
// Not applicable as Windows does not support --net=host
testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace)
dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top")
dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top")
dockerCmd(c, "stop", "first")
dockerCmd(c, "stop", "second")
}
func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork")
dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first")
}
func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
// Run and connect containers to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check connectivity between containers in testnetwork2
dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1")
// Connect containers to testnetwork2
dockerCmd(c, "network", "connect", "testnetwork2", "first")
dockerCmd(c, "network", "connect", "testnetwork2", "second")
// Check connectivity between containers
dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2")
}
func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
// Run 1 container in testnetwork1 and another in testnetwork2
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check Isolation between containers : ping must fail
_, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second")
c.Assert(err, check.NotNil)
// Connect first container to testnetwork2
dockerCmd(c, "network", "connect", "testnetwork2", "first")
// ping must succeed now
_, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second")
c.Assert(err, check.IsNil)
// Disconnect first container from testnetwork2
dockerCmd(c, "network", "disconnect", "testnetwork2", "first")
// ping must fail again
_, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second")
c.Assert(err, check.NotNil)
}
func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Run and connect containers to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Network delete with active containers must fail
_, _, err := dockerCmdWithError("network", "rm", "testnetwork1")
c.Assert(err, check.NotNil)
dockerCmd(c, "stop", "first")
_, _, err = dockerCmdWithError("network", "rm", "testnetwork1")
c.Assert(err, check.NotNil)
}
func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
// Run and connect containers to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check connectivity between containers in testnetwork2
dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1")
// Connect containers to testnetwork2
dockerCmd(c, "network", "connect", "testnetwork2", "first")
dockerCmd(c, "network", "connect", "testnetwork2", "second")
// Check connectivity between containers
dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2")
// Stop second container and test ping failures on both networks
dockerCmd(c, "stop", "second")
_, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1")
c.Assert(err, check.NotNil)
_, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2")
c.Assert(err, check.NotNil)
// Start second container and connectivity must be restored on both networks
dockerCmd(c, "start", "second")
dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1")
dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2")
}
func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
// Run a container with --net=host
dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Create a network using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Connecting to the user defined network must fail
_, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first")
c.Assert(err, check.NotNil)
}
func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Run second container in first container's network namespace
dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Create a network using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Connecting to the user defined network must fail
out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second")
c.Assert(err, check.NotNil)
c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error())
}
func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Create a network using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Connecting to the user defined network must fail
out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first")
c.Assert(err, check.NotNil)
c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error())
// create a container connected to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Connect second container to none network. it must fail as well
_, _, err = dockerCmdWithError("network", "connect", "none", "second")
c.Assert(err, check.NotNil)
}
// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited
func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true")
in, err := cmd.StdinPipe()
c.Assert(err, check.IsNil)
defer in.Close()
stdout := bytes.NewBuffer(nil)
cmd.Stdout = stdout
cmd.Stderr = stdout
c.Assert(cmd.Start(), check.IsNil)
waitChan := make(chan error)
go func() {
waitChan <- cmd.Wait()
}()
select {
case err := <-waitChan:
c.Assert(err, check.IsNil, check.Commentf(stdout.String()))
case <-time.After(30 * time.Second):
c.Fatal("timeout waiting for command to exit")
}
}
func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) {
// TODO Windows: This needs validation (error out) in the daemon.
testRequires(c, DaemonIsLinux)
out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true")
c.Assert(err, check.NotNil)
expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n"
if !(strings.Contains(out, expected) || exitCode == 125) {
c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode)
}
}
func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) {
// TODO Windows: This needs validation (error out) in the daemon.
testRequires(c, DaemonIsLinux)
out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true")
c.Assert(err, check.NotNil)
expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n"
if !(strings.Contains(out, expected) || exitCode == 125) {
c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode)
}
}
// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127'
func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) {
name := "testNonExecutableCmd"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo")
_, exit, _ := runCommandWithOutput(runCmd)
stateExitCode := findContainerExitCode(c, name)
if !(exit == 127 && strings.Contains(stateExitCode, "127")) {
c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode)
}
}
// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127.
func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) {
name := "testNonExistingCmd"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo")
_, exit, _ := runCommandWithOutput(runCmd)
stateExitCode := findContainerExitCode(c, name)
if !(exit == 127 && strings.Contains(stateExitCode, "127")) {
c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode)
}
}
// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or
// 127 on Windows. The difference is that in Windows, the container must be started
// as that's when the check is made (and yes, by it's design...)
func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) {
expected := 126
if daemonPlatform == "windows" {
expected = 127
}
name := "testCmdCannotBeInvoked"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc")
_, exit, _ := runCommandWithOutput(runCmd)
stateExitCode := findContainerExitCode(c, name)
if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) {
c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode)
}
}
// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image'
func (s *DockerSuite) TestRunNonExistingImage(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "foo")
out, exit, err := runCommandWithOutput(runCmd)
if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) {
c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err)
}
}
// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed
func (s *DockerSuite) TestDockerFails(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox")
out, exit, err := runCommandWithOutput(runCmd)
if !(err != nil && exit == 125) {
c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err)
}
}
// TestRunInvalidReference invokes docker run with a bad reference.
func (s *DockerSuite) TestRunInvalidReference(c *check.C) {
out, exit, _ := dockerCmdWithError("run", "busybox@foo")
if exit == 0 {
c.Fatalf("expected non-zero exist code; received %d", exit)
}
if !strings.Contains(out, "Error parsing reference") {
c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out)
}
}
// Test fix for issue #17854
func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) {
// Not applicable on Windows as it does not support Linux uid/gid ownership
testRequires(c, DaemonIsLinux)
name := "testetcfileownership"
_, err := buildImage(name,
`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN chown dockerio:dockerio /etc`,
true)
if err != nil {
c.Fatal(err)
}
// Test that dockerio ownership of /etc is retained at runtime
out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc")
out = strings.TrimSpace(out)
if out != "dockerio:dockerio" {
c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out)
}
}
func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) {
testRequires(c, DaemonIsLinux)
expected := "642"
out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj")
oomScoreAdj := strings.TrimSpace(out)
if oomScoreAdj != "642" {
c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj)
}
}
func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true")
c.Assert(err, check.NotNil)
expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]."
if !strings.Contains(out, expected) {
c.Fatalf("Expected output to contain %q, got %q instead", expected, out)
}
out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true")
c.Assert(err, check.NotNil)
expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]."
if !strings.Contains(out, expected) {
c.Fatalf("Expected output to contain %q, got %q instead", expected, out)
}
}
func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) {
// Volume propagation is linux only. Also it creates directories for
// bind mounting, so needs to be same host.
testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace)
// Prepare a source directory to bind mount
tmpDir, err := ioutil.TempDir("", "volume-source")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil {
c.Fatal(err)
}
// Convert this directory into a shared mount point so that we do
// not rely on propagation properties of parent mount.
cmd := exec.Command("mount", "--bind", tmpDir, tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1")
// Make sure a bind mount under a shared volume propagated to host.
if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted {
c.Fatalf("Bind mount under shared volume did not propagate to host")
}
mount.Unmount(path.Join(tmpDir, "mnt1"))
}
func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) {
// Volume propagation is linux only. Also it creates directories for
// bind mounting, so needs to be same host.
testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace)
// Prepare a source directory to bind mount
tmpDir, err := ioutil.TempDir("", "volume-source")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil {
c.Fatal(err)
}
// Prepare a source directory with file in it. We will bind mount this
// direcotry and see if file shows up.
tmpDir2, err := ioutil.TempDir("", "volume-source2")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir2)
if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil {
c.Fatal(err)
}
// Convert this directory into a shared mount point so that we do
// not rely on propagation properties of parent mount.
cmd := exec.Command("mount", "--bind", tmpDir, tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top")
// Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside
// container then contents of tmpDir2/slave-testfile should become
// visible at "/volume-dest/mnt1/slave-testfile"
cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1"))
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile")
mount.Unmount(path.Join(tmpDir, "mnt1"))
if out != "Test" {
c.Fatalf("Bind mount under slave volume did not propagate to container")
}
}
func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, exitcode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile")
if exitcode == 0 {
c.Fatalf("expected non-zero exit code; received %d", exitcode)
}
if expected := "Invalid volume specification"; !strings.Contains(out, expected) {
c.Fatalf(`Expected %q in output; got: %s`, expected, out)
}
}
func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) {
testRequires(c, DaemonIsLinux)
testImg := "testvolumecopy"
_, err := buildImage(testImg, `
FROM busybox
RUN mkdir -p /foo && echo hello > /foo/hello
`, true)
c.Assert(err, check.IsNil)
dockerCmd(c, "run", "-v", "foo:/foo", testImg)
out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello")
c.Assert(strings.TrimSpace(out), check.Equals, "hello")
}
func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
dockerCmd(c, "volume", "create", "--name", "test")
dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
dockerCmd(c, "volume", "inspect", "test")
out, _ := dockerCmd(c, "volume", "ls", "-q")
c.Assert(strings.TrimSpace(out), checker.Equals, "test")
dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
dockerCmd(c, "rm", "-fv", "test")
dockerCmd(c, "volume", "inspect", "test")
out, _ = dockerCmd(c, "volume", "ls", "-q")
c.Assert(strings.TrimSpace(out), checker.Equals, "test")
}
func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
dockerCmd(c, "volume", "create", "--name", "test")
dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true")
// Remove the parent so there are not other references to the volumes
dockerCmd(c, "rm", "-f", "parent")
// now remove the child and ensure the named volume (and only the named volume) still exists
dockerCmd(c, "rm", "-fv", "child")
dockerCmd(c, "volume", "inspect", "test")
out, _ := dockerCmd(c, "volume", "ls", "-q")
c.Assert(strings.TrimSpace(out), checker.Equals, "test")
}
func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) {
nroutines, err := getGoroutineNumber()
c.Assert(err, checker.IsNil)
runSleepingContainer(c, "--name=test", "-p", "8000:8000")
// Wait until container is fully up and running
c.Assert(waitRun("test"), check.IsNil)
out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true")
// We will need the following `inspect` to diagnose the issue if test fails (#21247)
out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test")
out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail")
c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2))
// check for windows error as well
// TODO Windows Post TP5. Fix the error message string
c.Assert(strings.Contains(string(out), "port is already allocated") ||
strings.Contains(string(out), "were not connected because a duplicate name exists") ||
strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") ||
strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out))
dockerCmd(c, "rm", "-f", "test")
// NGoroutines is not updated right away, so we need to wait before failing
c.Assert(waitForGoroutines(nroutines), checker.IsNil)
}
// Test for one character directory name case (#20122)
func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo")
c.Assert(strings.TrimSpace(out), checker.Equals, "/foo")
}
func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume
_, err := buildImage("volumecopy",
`FROM busybox
RUN mkdir /foo && echo hello > /foo/bar
CMD cat /foo/bar`,
true,
)
c.Assert(err, checker.IsNil)
dockerCmd(c, "volume", "create", "--name=test")
// test with the nocopy flag
out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy")
c.Assert(err, checker.NotNil, check.Commentf(out))
// test default behavior which is to copy for non-binds
out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy")
c.Assert(strings.TrimSpace(out), checker.Equals, "hello")
// error out when the volume is already populated
out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy")
c.Assert(err, checker.NotNil, check.Commentf(out))
// do not error out when copy isn't explicitly set even though it's already populated
out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy")
c.Assert(strings.TrimSpace(out), checker.Equals, "hello")
// do not allow copy modes on volumes-from
dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true")
out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
// do not allow copy modes on binds
out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
}
func (s *DockerSuite) TestRunTooLongHostname(c *check.C) {
// Test case in #21445
hostname1 := "this-is-a-way-too-long-hostname-but-it-should-give-a-nice-error.local"
out, _, err := dockerCmdWithError("run", "--hostname", hostname1, "busybox", "echo", "test")
c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!"))
c.Assert(out, checker.Contains, "invalid hostname format:", check.Commentf("Expected to have 'invalid hostname format:' in the output, get: %s!", out))
// Additional test cases
validHostnames := map[string]string{
"hostname": "hostname",
"host-name": "host-name",
"hostname123": "hostname123",
"123hostname": "123hostname",
"hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error",
}
for hostname := range validHostnames {
dockerCmd(c, "run", "--hostname", hostname, "busybox", "echo", "test")
}
invalidHostnames := map[string]string{
"^hostname": "invalid hostname format: ^hostname",
"hostname%": "invalid hostname format: hostname%",
"host&name": "invalid hostname format: host&name",
"-hostname": "invalid hostname format: -hostname",
"host_name": "invalid hostname format: host_name",
"hostname-of-64-bytes-long-should-be-invalid-and-be-with-an-error": "invalid hostname format: hostname-of-64-bytes-long-should-be-invalid-and-be-with-an-error",
}
for hostname, expectedError := range invalidHostnames {
out, _, err = dockerCmdWithError("run", "--hostname", hostname, "busybox", "echo", "test")
c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!"))
c.Assert(out, checker.Contains, expectedError, check.Commentf("Expected to have '%s' in the output, get: %s!", expectedError, out))
}
}
// Test case for #21976
func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
expectedOutput := "nameserver 127.0.0.1"
expectedWarning := "Localhost DNS setting"
out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr))
expectedOutput = "nameserver 1.2.3.4"
out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
expectedOutput = "search example.com"
out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
expectedOutput = "options timeout:3"
out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
expectedOutput1 := "nameserver 1.2.3.4"
expectedOutput2 := "search example.com"
expectedOutput3 := "options timeout:3"
out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out))
c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out))
c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out))
}
// Test case for #21976
func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
expectedOutput := "1.2.3.4\textra"
out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
}
|
eqToBaseDiff
|
size.rs
|
//! Size evaluation and manipulation primitives.
use crate::device::Context;
use crate::ir;
use crate::search_space::{NumSet, SearchSpace};
use num::{bigint::ToBigUint, Integer, ToPrimitive, Zero};
use utils::*;
/// A span of values.
#[derive(Debug, Copy, Clone)]
pub struct Range {
pub min: u64,
pub max: u64,
}
impl Range {
pub const ZERO: Self = Range { min: 0, max: 0 };
pub const ONE: Self = Range { min: 1, max: 1 };
/// Creates a `Range` containing a single value.
pub fn new_fixed(val: u64) -> Self {
Range { min: val, max: val }
}
/// Indicates if the `Range` contains a single value.
pub fn is_constrained(&self) -> bool {
self.min == self.max
}
}
/// Bounds the values a size can take, in the given context.
pub fn
|
(size: &ir::PartialSize, space: &SearchSpace, ctx: &dyn Context) -> Range {
let (factor, param_factors, dim_size_factors) = size.factors();
let divisors = size.divisors();
let factor = param_factors
.iter()
.map(|p| u64::from(ctx.param_as_size(&p.name).unwrap()))
.product::<u64>()
* u64::from(factor);
let mut total_min = factor.to_biguint().unwrap();
let mut total_max = total_min.clone();
for &dim in dim_size_factors {
let size = dim_bounds(dim, space);
total_min *= size.min;
total_max *= size.max;
}
for &dim in divisors {
let size = dim_bounds(dim, space);
total_min /= size.max.to_biguint().unwrap().gcd(&total_min);
total_max /= size.min;
}
assert!(!total_min.is_zero());
assert!(!total_max.is_zero());
Range {
min: total_min.to_u64().unwrap(),
max: total_max.to_u64().unwrap(),
}
}
/// Returns the `Range` a static dimension size can take.
pub fn dim_bounds(dim: ir::DimId, space: &SearchSpace) -> Range {
let size = space.domain().get_size(dim);
let universe = unwrap!(space.ir_instance().dim(dim).possible_sizes());
Range {
min: size.min_value(universe).into(),
max: size.max_value(universe).into(),
}
}
/// A span of values, in term of factors. The actual value is a mulitpe of `gcd` and
/// a divisor of `lcm`.
#[derive(Debug, Copy, Clone)]
pub struct FactorRange {
pub gcd: u64,
pub lcm: u64,
}
impl FactorRange {
pub const ZERO: Self = FactorRange { gcd: 0, lcm: 0 };
/// Create a `FactorRange` containing a single point.
pub fn new_fixed(val: u64) -> Self {
FactorRange { gcd: val, lcm: val }
}
}
/// Returns a factor and a multiple of `size`.
pub fn factors(
size: &ir::PartialSize,
space: &SearchSpace,
ctx: &dyn Context,
) -> FactorRange {
let (factor, param_factors, dim_size_factors) = size.factors();
let divisors = size.divisors();
let factor = param_factors
.iter()
.map(|p| u64::from(ctx.param_as_size(&p.name).unwrap()))
.product::<u64>()
* u64::from(factor);
let mut total_gcd = factor.to_biguint().unwrap();
let mut total_lcm = total_gcd.clone();
for &dim in dim_size_factors {
let size = dim_factors(dim, space);
total_gcd *= size.gcd;
total_lcm *= size.lcm;
}
for &dim in divisors {
let size = dim_factors(dim, space);
total_gcd /= size.lcm.to_biguint().unwrap().gcd(&total_gcd);
total_lcm /= size.gcd;
}
FactorRange {
gcd: total_gcd.to_u64().unwrap(),
lcm: total_lcm.to_u64().unwrap(),
}
}
/// Returns the `FactorRane` a static dimension size can take.
pub fn dim_factors(dim: ir::DimId, space: &SearchSpace) -> FactorRange {
let size = space.domain().get_size(dim);
let universe = unwrap!(space.ir_instance().dim(dim).possible_sizes());
FactorRange {
gcd: size.gcd(universe).into(),
lcm: size.lcm(universe).into(),
}
}
|
bounds
|
events.py
|
import logging
import discord
from discord.ext import commands
from core.state import global_state as gstate
from core import (
bot_utility as utility,
consts,
timers,
play_requests
)
from core.play_requests import PlayRequestCategory
from riot import riot_utility
logger = logging.getLogger('events')
class EventCog(commands.Cog):
"""Cog that handles all events. Used for
features like Auto-React, Auto-DM etc.
"""
def __init__(self, bot):
self.bot = bot
""" Dicts are mutable objects, which means
that 'self.play_requests = gstate.play_requests'
makes self.play_requests a pointer to gstate.play_requests
so every change to play_requests also changes
gstate.play_requests. Technically we can make play_requests
local, but I feel like we might need play_requests in a global
scope sometime. Same for message_cache.
"""
self.play_requests = gstate.play_requests
self.message_cache = gstate.message_cache
self.game_selection_message_id = None
@commands.Cog.listener()
async def on_ready(self):
logger.info('We have logged in as %s', self.bot.user)
if gstate.CONFIG["TOGGLE_GAME_SELECTOR"]:
game_selection_channel = discord.utils.find(lambda x: x.name == 'game-selection', self.bot.guilds[0].channels)
game_selector_message_list = await game_selection_channel.history(limit=1).flatten()
self.game_selection_message_id = game_selector_message_list[0].id
@commands.Cog.listener()
async def on_member_join(self, member):
"""Automatically assigns lowest role to
anyone that joins the server.
"""
logger.info('New member joined: %s', member.name)
await member.edit(roles=utility.get_auto_role_list(member))
@commands.Cog.listener()
async def on_message(self, message):
if isinstance(message.channel, discord.DMChannel):
return
# add all messages in channel to gstate.message_cache
if gstate.CONFIG["TOGGLE_AUTO_DELETE"] and utility.is_in_channel(message, consts.CHANNEL_PLAY_REQUESTS):
utility.insert_in_message_cache(self.message_cache, message.id, message.channel.id)
@commands.Cog.listener()
async def on_message_delete(self, message):
utility.clear_play_requests(message)
if message.id in self.message_cache:
utility.clear_message_cache(message.id, self.message_cache)
else:
logger.info('Manually deleted message %s', message.id)
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
# auto dm
logger.debug("Reaction added to %s by %s", reaction.message.id, user.name)
if utility.is_user_bot(user, self.bot):
return
if not gstate.CONFIG["TOGGLE_AUTO_DM"]:
return
if reaction.message.id not in self.play_requests:
logger.debug("Message is not a play request. Ignore reaction")
return
play_request = self.play_requests[reaction.message.id]
if utility.is_play_request_author(user.id, play_request):
logger.info("Remove reaction from a play_request_author")
await reaction.remove(user)
return
if str(reaction.emoji) == consts.EMOJI_PASS:
for player_id in play_request.generate_all_players():
if user.id == player_id:
logger.info("Remove %s from play_request %s", user.name, reaction.message.id)
play_request.remove_subscriber_id(user.id)
return
if utility.is_already_subscriber(user, play_request):
return
utility.add_subscriber_to_play_request(user, play_request)
author = self.bot.get_user(play_request.author_id)
# send auto dms to subscribers and author
logger.info("Send auto dms to play_request subscribers")
for player_id in play_request.generate_all_players():
if player_id == play_request.author_id and player_id != user.id:
await author.send(
consts.MESSAGE_AUTO_DM_CREATOR.format(user.name, str(reaction.emoji))
)
elif player_id != user.id:
player = self.bot.get_user(player_id)
await player.send(
consts.MESSAGE_AUTO_DM_SUBSCRIBER.format(
user.name, author.name, str(reaction.emoji)))
if len(play_request.subscriber_ids) + 1 == 5 and play_request.category == PlayRequestCategory.CLASH:
logger.info("Clash has 5 Members")
await reaction.channel.send(consts.MESSAGE_CLASH_FULL.format(
author, play_request.clash_date, utility.pretty_print_list([self.bot.get_user(player_id) for player_id in play_request.subscriber_ids], author)
))
if len(play_request.subscriber_ids) + 1 > 5 and play_request.category == PlayRequestCategory.CLASH:
logger.info("Remove reaction because clash has 5 Members")
await reaction.remove(user)
await user.send('Das Clash Team ist zur Zeit leider schon voll.')
if len(play_request.subscriber_ids) + 1 == 6 and play_request.category == PlayRequestCategory.INTERN:
logger.info("Create internal play request")
await reaction.channel.send(
utility.switch_to_internal_play_request(reaction.message, play_request))
# TODO no logging
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if gstate.CONFIG["TOGGLE_GAME_SELECTOR"] and payload.message_id == self.game_selection_message_id:
member = discord.utils.find(lambda x: x.id == payload.user_id, list(self.bot.get_all_members()))
member_roles = member.roles.copy()
for role in member_roles:
if role.id == consts.GAME_NAME_TO_ROLE_ID_DICT[payload.emoji.name.upper()]:
return
member_roles.append(discord.utils.find(lambda x: x.name == payload.emoji.name.upper(), member.guild.roles))
await member.edit(roles=member_roles)
return
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
if gstate.CONFIG["TOGGLE_GAME_SELECTOR"] and payload.message_id == self.game_selection_message_id:
member = discord.utils.find(lambda x: x.id == payload.user_id, list(self.bot.get_all_members()))
member_roles = member.roles.copy()
for role in member_roles:
if role.id == consts.GAME_NAME_TO_ROLE_ID_DICT[payload.emoji.name.upper()]:
member_roles.remove(role)
await member.edit(roles=member_roles)
return
@commands.Cog.listener()
async def
|
(self, channel):
if channel.id in gstate.tmp_channel_ids:
logger.info("Temporary channel was deleted manually.")
gstate.tmp_channel_ids[channel.id]["deleted"] = True
@commands.Cog.listener()
async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState):
# Checks if the user changed the channel and returns if the user didn't
if before.channel == after.channel:
return
else:
everyone_role = discord.utils.find(lambda m: m.id == consts.ROLE_EVERYONE_ID, member.guild.roles)
await update_channels_visibility(everyone_role, before.channel, False)
await update_channels_visibility(everyone_role, after.channel, True)
async def update_channels_visibility(role, channel: discord.VoiceChannel, bool_after_channel=False):
if channel is not None and channel.category.id in consts.CATEGORY_IDS:
category_channel = channel.category
bool_make_visible = False
if not bool_after_channel:
for voice_channel in category_channel.voice_channels:
if len(voice_channel.members) >= 1:
bool_make_visible = True
break
else:
bool_make_visible = True
await category_channel.set_permissions(role, read_messages=bool_make_visible)
logger.info("Channel category %s is %s visible for everybody", category_channel.name, "" if bool_make_visible else "not")
def setup(bot: commands.Bot):
bot.add_cog(EventCog(bot))
logger.info('Event cogs loaded')
|
on_guild_channel_delete
|
assets.go
|
//go:generate -command asset go run asset.go
//go:generate asset afl_3.0.txt
//go:generate asset agpl_3.0.txt
//go:generate asset apache_2.0.txt
//go:generate asset artistic_2.0.txt
//go:generate asset bsd_2_clause.txt
|
//go:generate asset cc0_1.0.txt
//go:generate asset epl_1.0.txt
//go:generate asset gpl_2.0.txt
//go:generate asset gpl_3.0.txt
//go:generate asset isc.txt
//go:generate asset lgpl_2.1.txt
//go:generate asset lgpl_3.0.txt
//go:generate asset mit.txt
//go:generate asset mpl_2.0.txt
//go:generate asset ms_pl.txt
//go:generate asset ms_rl.txt
//go:generate asset no_license.txt
//go:generate asset ofl_1.1.txt
//go:generate asset osl_3.0.txt
//go:generate asset unlicense.txt
//go:generate asset wtfpl.txt
package assets
var (
Assets = []asset{}
)
func txt(a asset) asset {
Assets = append(Assets, a)
return a
}
|
//go:generate asset bsd_3_clause_clear.txt
//go:generate asset bsd_3_clause.txt
|
test_agent_in_hostgroup.py
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.models.agent_in_hostgroup import AgentInHostgroup # noqa: E501
from controlm_py.rest import ApiException
class TestAgentInHostgroup(unittest.TestCase):
|
if __name__ == '__main__':
unittest.main()
|
"""AgentInHostgroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAgentInHostgroup(self):
"""Test AgentInHostgroup"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_py.models.agent_in_hostgroup.AgentInHostgroup() # noqa: E501
pass
|
PpiNetworkPrismPredictions.py
|
import time
t1 = time.time()
geneIdNetworkFile = open('PPI_Network.txt', 'r')
PpiNetworkPredictionsFile = open('PPI_Network_Prism_Predictions.txt', 'w')
prismPredictionsFile = open('PrismPredictions.txt', 'r')
prismPredictions = prismPredictionsFile.readlines();
prismPredictionsFile.close()
alternativeConformationsFile = open('clusteredGeneIDToPDBMapping.txt', 'r')
|
alternativeConformations = alternativeConformationsFile.readlines();
alternativeConformationsFile.close()
for PPI in geneIdNetworkFile:
splittedPPI = PPI.split(' ')
protein1 = splittedPPI[0]
protein2 = splittedPPI[2][:-2]
#print(protein1)
#print(protein2)
alternativeConfs1 = []
alternativeConfs2 = []
found1 = False
found2 = False
for geneId in alternativeConformations:
splittedGeneId = geneId.split('\t')
#print(splittedGeneId[1])
if len(splittedGeneId) > 2:
alternativeConfs = splittedGeneId[2].split(',')
if splittedGeneId[0] == protein1:
for i in range(0, int(splittedGeneId[1])):
alternativeConfs1.append(alternativeConfs[i][:4] + alternativeConfs[i][5:6])
found1 = True
elif splittedGeneId[0] == protein2:
for i in range(0, int(splittedGeneId[1])):
alternativeConfs2.append(alternativeConfs[i][:4] + alternativeConfs[i][5:6])
found2 = True
if found1 and found2:
PpiNetworkPredictionsFile.write(protein1 + '-' + protein2 + '\n')
for alternativeConf1 in alternativeConfs1:
PpiNetworkPredictionsFile.write(alternativeConf1 + '\t')
PpiNetworkPredictionsFile.write('\n')
for alternativeConf2 in alternativeConfs2:
PpiNetworkPredictionsFile.write(alternativeConf2 + '\t')
PpiNetworkPredictionsFile.write('\n')
for alternativeConf1 in alternativeConfs1:
for alternativeConf2 in alternativeConfs2:
for index in range(4, len(prismPredictions)):
splittedPrediction = prismPredictions[index].split()
#print(splittedPrediction[0])
#print(splittedPrediction[1])
#print(alternativeConf1)
#print(alternativeConf2)
if splittedPrediction[0].upper() == alternativeConf1.upper() and splittedPrediction[1].upper() == alternativeConf2.upper():
PpiNetworkPredictionsFile.write(prismPredictions[index])
break
geneIdNetworkFile.close()
PpiNetworkPredictionsFile.close()
t2 = time.time()
print('Elapsed time = %f' %(t2-t1))
| |
logged_out_page.tsx
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { EuiButton, EuiIcon, EuiSpacer, EuiTitle } from '@elastic/eui';
import { FormattedMessage } from '@kbn/i18n/react';
import React, { Component } from 'react';
interface Props {
addBasePath: (path: string) => string;
}
export class LoggedOutPage extends Component<Props, {}> {
public render() {
return (
<div className="loggedOut">
<header className="loggedOut__header">
<div className="loggedOut__content eui-textCenter">
<EuiSpacer size="xxl" />
|
<EuiIcon type="logoKibana" size="xxl" />
</span>
<EuiTitle size="l" className="loggedOut__title">
<h1>
<FormattedMessage
id="xpack.security.loggedOut.title"
defaultMessage="Successfully logged out"
/>
</h1>
</EuiTitle>
<EuiSpacer size="xl" />
</div>
</header>
<div className="loggedOut__content eui-textCenter">
<EuiButton href={this.props.addBasePath('/')}>
<FormattedMessage id="xpack.security.loggedOut.login" defaultMessage="Login" />
</EuiButton>
</div>
</div>
);
}
}
|
<span className="loggedOut__logo">
|
signature.go
|
package core
import (
"github.com/jaeles-project/jaeles/database"
"github.com/jaeles-project/jaeles/libs"
"github.com/jaeles-project/jaeles/utils"
"github.com/thoas/go-funk"
"path"
"path/filepath"
"regexp"
"strings"
)
// @NOTE: Signatures allow execute command on your machine
// So make sure you read the signature before you run it
// SelectSign select signature by multiple selector
func SelectSign(signName string) []string {
var Signs []string
// return default sign if doesn't set anything
if signName == "**" {
Signs = database.SelectSign("")
return Signs
}
signs := SingleSign(strings.TrimSpace(signName))
if len(signs) > 0 {
Signs = append(Signs, signs...)
}
Signs = funk.UniqString(Signs)
return Signs
}
// SingleSign select signature by single selector
func SingleSign(signName string) []string {
signName = utils.NormalizePath(signName)
var Signs []string
// in case selector is file
if strings.HasSuffix(signName, ".yaml") && !strings.Contains(signName, "*") {
if utils.FileExists(signName) {
Signs = append(Signs, signName)
}
return Signs
}
// in case selector is a folder
if utils.FolderExists(signName) {
signName = path.Join(path.Clean(signName), ".*")
}
// get more signature
if strings.Contains(signName, "*") && strings.Contains(signName, "/")
|
return Signs
}
// AltResolveRequest resolve all request but look for [[ ]] delimiter
func AltResolveRequest(req *libs.Request) {
target := req.Target
if len(req.Values) > 0 {
for _, value := range req.Values {
for k, v := range value {
if strings.Contains(v, "{{.") && strings.Contains(v, "}}") {
v = ResolveVariable(v, target)
}
// variable as a script
if strings.Contains(v, "(") && strings.Contains(v, ")") {
newValue := RunVariables(v)
if len(newValue) > 0 {
target[k] = newValue[0]
}
} else {
target[k] = v
}
}
}
}
// resolve all part again but with secondary template
req.URL = AltResolveVariable(req.URL, target)
req.Body = AltResolveVariable(req.Body, target)
req.Headers = AltResolveHeader(req.Headers, target)
req.Detections = AltResolveDetection(req.Detections, target)
req.Generators = AltResolveDetection(req.Generators, target)
req.Middlewares = AltResolveDetection(req.Middlewares, target)
}
// ResolveDetection resolve detection part in YAML signature file
func ResolveDetection(detections []string, target map[string]string) []string {
var realDetections []string
for _, detect := range detections {
realDetections = append(realDetections, ResolveVariable(detect, target))
}
return realDetections
}
// AltResolveDetection resolve detection part in YAML signature file
func AltResolveDetection(detections []string, target map[string]string) []string {
var realDetections []string
for _, detect := range detections {
realDetections = append(realDetections, AltResolveVariable(detect, target))
}
return realDetections
}
// ResolveHeader resolve headers part in YAML signature file
func ResolveHeader(headers []map[string]string, target map[string]string) []map[string]string {
// realHeaders := headers
var realHeaders []map[string]string
for _, head := range headers {
realHeader := make(map[string]string)
for key, value := range head {
realKey := ResolveVariable(key, target)
realVal := ResolveVariable(value, target)
realHeader[realKey] = realVal
}
realHeaders = append(realHeaders, realHeader)
}
return realHeaders
}
// AltResolveHeader resolve headers part in YAML signature file
func AltResolveHeader(headers []map[string]string, target map[string]string) []map[string]string {
var realHeaders []map[string]string
for _, head := range headers {
realHeader := make(map[string]string)
for key, value := range head {
realKey := AltResolveVariable(key, target)
realVal := AltResolveVariable(value, target)
realHeader[realKey] = realVal
}
realHeaders = append(realHeaders, realHeader)
}
return realHeaders
}
|
{
asbPath, _ := filepath.Abs(signName)
baseSelect := filepath.Base(signName)
rawSigns := utils.GetFileNames(filepath.Dir(asbPath), "yaml")
for _, signFile := range rawSigns {
baseSign := filepath.Base(signFile)
if len(baseSign) == 1 && baseSign == "*" {
Signs = append(Signs, signFile)
continue
}
r, err := regexp.Compile(baseSelect)
if err != nil {
if strings.Contains(signFile, baseSelect) {
Signs = append(Signs, signFile)
}
}
if r.MatchString(baseSign) {
Signs = append(Signs, signFile)
}
}
}
|
resourcenavigationlinks.go
|
package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ResourceNavigationLinksClient is the network Client
type ResourceNavigationLinksClient struct {
BaseClient
}
// NewResourceNavigationLinksClient creates an instance of the ResourceNavigationLinksClient client.
func NewResourceNavigationLinksClient(subscriptionID string) ResourceNavigationLinksClient {
return NewResourceNavigationLinksClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewResourceNavigationLinksClientWithBaseURI creates an instance of the ResourceNavigationLinksClient client using a
// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
// Azure stack).
func NewResourceNavigationLinksClientWithBaseURI(baseURI string, subscriptionID string) ResourceNavigationLinksClient {
return ResourceNavigationLinksClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// List gets a list of resource navigation links for a subnet.
// Parameters:
// resourceGroupName - the name of the resource group.
// virtualNetworkName - the name of the virtual network.
// subnetName - the name of the subnet.
func (client ResourceNavigationLinksClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (result ResourceNavigationLinksListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ResourceNavigationLinksClient.List")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListPreparer(ctx, resourceGroupName, virtualNetworkName, subnetName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ResourceNavigationLinksClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ResourceNavigationLinksClient", "List", resp, "Failure sending request")
return
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ResourceNavigationLinksClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client ResourceNavigationLinksClient) ListPreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subnetName": autorest.Encode("path", subnetName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ResourceNavigationLinks", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
|
func (client ResourceNavigationLinksClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ResourceNavigationLinksClient) ListResponder(resp *http.Response) (result ResourceNavigationLinksListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
|
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
|
node.go
|
package storagenode
import (
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/rcrowley/go-metrics"
)
func NewStorageNode(config *Config) (*StorageNode, error) {
node := &StorageNode{
Config: config,
Datasources: make(map[string]*DatasourceInstance),
registry: metrics.NewPrefixedChildRegistry(metrics.DefaultRegistry, "storagenode."),
}
// TODO: error if no datasources?
for datasourceName, datasourceConfig := range config.Datasources {
datasourceConfig.Registry = metrics.NewPrefixedChildRegistry(node.registry, datasourceName+".")
if datasource, err := NewDatasourceInstanceDefault(datasourceConfig); err == nil {
node.Datasources[datasourceName] = datasource
} else {
return nil, err
}
}
|
// This node is responsible for handling all of the queries for a specific storage node
// This is also responsible for maintaining schema, indexes, etc. from the metadata store
// and applying them to the actual storage subsystem
type StorageNode struct {
Config *Config
Datasources map[string]*DatasourceInstance
registry metrics.Registry
}
// TODO: have a stop?
func (s *StorageNode) Start() error {
// initialize the http api (since at this point we are ready to go!
router := httprouter.New()
api := NewHTTPApi(s)
api.Start(router)
return http.ListenAndServe(s.Config.HTTP.Addr, router)
}
|
return node, nil
}
|
tests.rs
|
use super::*;
use ckb_testtool::context::Context;
use ckb_testtool::ckb_types::{
bytes::Bytes,
core::TransactionBuilder,
packed::*,
prelude::*,
};
use ckb_testtool::ckb_error::assert_error_eq;
use ckb_testtool::ckb_script::ScriptError;
const MAX_CYCLES: u64 = 10_000_000;
// error numbers
const ERROR_EMPTY_ARGS: i8 = 5;
#[test]
fn test_success()
|
#[test]
fn test_empty_args() {
// deploy contract
let mut context = Context::default();
let contract_bin: Bytes = Loader::default().load_binary("contract1");
let out_point = context.deploy_cell(contract_bin);
// prepare scripts
let lock_script = context
.build_script(&out_point, Default::default())
.expect("script");
let lock_script_dep = CellDep::new_builder()
.out_point(out_point)
.build();
// prepare cells
let input_out_point = context.create_cell(
CellOutput::new_builder()
.capacity(1000u64.pack())
.lock(lock_script.clone())
.build(),
Bytes::new(),
);
let input = CellInput::new_builder()
.previous_output(input_out_point)
.build();
let outputs = vec![
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script.clone())
.build(),
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script)
.build(),
];
let outputs_data = vec![Bytes::new(); 2];
// build transaction
let tx = TransactionBuilder::default()
.input(input)
.outputs(outputs)
.outputs_data(outputs_data.pack())
.cell_dep(lock_script_dep)
.build();
let tx = context.complete_tx(tx);
// run
let err = context
.verify_tx(&tx, MAX_CYCLES)
.unwrap_err();
// we expect an error raised from 0-indexed cell's lock script
let script_cell_index = 0;
assert_error_eq!(
err,
ScriptError::ValidationFailure(ERROR_EMPTY_ARGS).input_lock_script(script_cell_index)
);
}
|
{
// deploy contract
let mut context = Context::default();
let contract_bin: Bytes = Loader::default().load_binary("contract1");
let out_point = context.deploy_cell(contract_bin);
// prepare scripts
let lock_script = context
.build_script(&out_point, Bytes::from(vec![42]))
.expect("script");
let lock_script_dep = CellDep::new_builder()
.out_point(out_point)
.build();
// prepare cells
let input_out_point = context.create_cell(
CellOutput::new_builder()
.capacity(1000u64.pack())
.lock(lock_script.clone())
.build(),
Bytes::new(),
);
let input = CellInput::new_builder()
.previous_output(input_out_point)
.build();
let outputs = vec![
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script.clone())
.build(),
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script)
.build(),
];
let outputs_data = vec![Bytes::new(); 2];
// build transaction
let tx = TransactionBuilder::default()
.input(input)
.outputs(outputs)
.outputs_data(outputs_data.pack())
.cell_dep(lock_script_dep)
.build();
let tx = context.complete_tx(tx);
// run
let cycles = context
.verify_tx(&tx, MAX_CYCLES)
.expect("pass verification");
println!("consume cycles: {}", cycles);
}
|
run-gat-2-8.py
|
import main
from common import Task, STOP, GNN_TYPE
from attrdict import AttrDict
from experiment import Experiment
import torch
override_params = {
2: {'batch_size': 64, 'eval_every': 1000},
3: {'batch_size': 64},
4: {'batch_size': 1024},
5: {'batch_size': 1024},
6: {'batch_size': 1024},
7: {'batch_size': 2048},
8: {'batch_size': 1024, 'accum_grad': 2}, # effective batch size of 2048, with less GPU memory
}
class Results:
def __init__(self, train_acc, test_acc, epoch):
self.train_acc = train_acc
self.test_acc = test_acc
self.epoch = epoch
if __name__ == '__main__':
|
task = Task.DICTIONARY
gnn_type = GNN_TYPE.GAT
stopping_criterion = STOP.TRAIN
min_depth = 2
max_depth = 8
results_all_depths = {}
for depth in range(min_depth, max_depth + 1):
num_layers = depth + 1
args = main.get_fake_args(task=task, depth=depth, num_layers=num_layers, loader_workers=7,
type=gnn_type, stop=stopping_criterion,
no_activation=True, no_residual=False)
if depth in override_params:
for key, value in AttrDict(override_params[depth]).items():
args[key] = value
train_acc, test_acc, epoch = Experiment(args).run()
torch.cuda.empty_cache()
results_all_depths[depth] = Results(train_acc=train_acc, test_acc=test_acc, epoch=epoch)
print()
print(f'Task: {task}')
print('depth, train_acc, test_acc, epoch, train_acc, test_acc, epoch,')
for depth in range(min_depth, max_depth + 1):
res = results_all_depths[depth]
print(f'{depth}, {res.train_acc}, {res.test_acc}, {res.epoch}')
|
|
EA_A_03_2LFact_Data.py
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,scripts//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.1.6
# kernelspec:
# display_name: Python [conda env:thesis] *
# language: python
# name: conda-env-thesis-py
# ---
# %% [raw]
# \author{Eloy Ruiz-Donayre}
# \title{TESTCASE A - 2-Level 6-Factor Full Factorial (With 30 replicates) - Data Generation}
# \date{\today}
# \maketitle
# %% [raw]
# \tableofcontents
# %% [markdown]
# # Preliminaries
# %% [markdown]
# Importing python packages and setting display parameters
# %%
import numpy as np
import pandas as pd
import itertools as it
import scipy.stats as stats
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import thesis_EAfunc as EAf
import thesis_visfunc as EAv
# %%
plt.style.use("bmh")
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
pd.set_option("display.latex.repr", True)
pd.set_option("display.latex.longtable", True)
# %% [markdown] {"toc-hr-collapsed": false}
# # Fitness Landscape Definition
# %%
# Problem domain
x_min = -15
x_max = 15
y_min = -15
y_max = 15
# Known minimum
x_point = -1
y_point = -1
domain = (x_min, x_max, y_min, y_max)
point = (x_point, y_point)
img_size = (8.5, 4.25)
# Problem definition
def f(x, y):
|
# %%
# Testing the minimum
print(f(-1, -1))
# %%
# Testing the function
print(f(-1.0, -1.0), f(-11.0, -9.0), f(11.0, 3.0), f(-6.0, 9.0))
# %% [markdown] {"toc-hr-collapsed": false}
# # Setting up the experiment
# 64 Experiments
# >L-> In each experiment, one set of parameters is used.
# >>L-> 40 Replicates per experiment.
# >>>L-> Each replicate is different due to randomness effects.
# %%
# starting seed
np.random.seed(42)
# %% [markdown]
# ## Initializing data storage
# %%
mult_fit_cols = (
["exp"]
+ ["pop_s"]
+ ["b"]
+ ["mut_p"]
+ ["mut_s"]
+ ["p_sel"]
+ ["s_sel"]
+ ["run", "generation", "fitness_min", "fitness_max", "fitness_mean", "fitness_std"]
)
multi_fit = pd.DataFrame(columns=mult_fit_cols)
multi_fit = multi_fit.infer_objects()
# %% [markdown] {"toc-hr-collapsed": false}
# ## Parameter space for the experiment
# %% [markdown]
# ### Initializing
# %%
# Algorithm parameters
# Number of replicates, and generations per experiment
rep_n = 30
gen_f = 200
# Population size
pop_s = [10, 160]
# Parent subpopulation's selection method and size
par_selection = ["uniform", "tournament_k3"]
b = [0.5, 5]
par_s = [z * y for z in pop_s for y in b]
# Progeny subpopulation's size
prog_s = par_s
# Crossover Method
crossover = "uniform"
# Mutation method, probability and size
mutation = "random_all_gau_dis"
mut_p = [0.1, 0.9]
mut_s = [0.5, 5]
# New population selection method
sur_selection = ["fitness_proportional_selection", "uniform"]
# %% [markdown]
# ### 2-Level Factors encoded values
# %%
inputs_labels = {
"pop_s": "Population size",
"b": "Progeny-to-population ratio",
"mut_p": "Mutation Probability",
"mut_s": "Mutation size",
"p_sel": "Parent selection",
"s_sel": "Survivor selection method",
}
dat = [
("pop_s", 10, 160, -1, 1, "Numerical"),
("b", 0.5, 5, -1, 1, "Numerical"),
("mut_p", 0.1, 0.9, -1, 1, "Numerical (<1)"),
("mut_s", 0.5, 5, -1, 1, "Numerical"),
("p_sel", "uniform", "tournament k3", -1, 1, "Categorical"),
("s_sel", "fitness proportional", "uniform", -1, 1, "Categorical"),
]
inputs_df = pd.DataFrame(
dat,
columns=[
"Factor",
"Value_low",
"Value_high",
"encoded_low",
"encoded_high",
"Variable type",
],
)
inputs_df = inputs_df.set_index(["Factor"])
inputs_df["Label"] = inputs_df.index.map(lambda z: inputs_labels[z])
inputs_df = inputs_df[
["Label", "Variable type", "Value_low", "Value_high", "encoded_low", "encoded_high"]
]
inputs_df
# %% [markdown]
# ### Combining the 2-level Factors
# %% [markdown]
# We create a list with all the possible combinations of the 2-level factors
# %%
exp_par = list(it.product(pop_s, b, mut_p, mut_s, par_selection, sur_selection))
print('Cantidad de combinaciones de parametros en "exp_par" :' + str(len(exp_par)))
print()
print('Primera y última combinación de parametros en "exp_par":')
print("Secuencia (pop_s, b, mut_p, mut_s, p_sel, s_sel)")
print(exp_par[0])
print(exp_par[63])
# %% [markdown]
# # Experiment execution
# %%
# %%time
exp_n = 1
for (zz, yy, xx, vv, uu, tt) in exp_par:
sur_selection = tt
par_selection = uu
mut_s = vv
mut_p = xx
b = yy
pop_s = zz
prog_s = int(b * pop_s)
par_s = prog_s
fitness_res = EAf.EA_exp_only_fitness(
rep_n,
gen_f,
f,
domain,
pop_s,
par_s,
prog_s,
mut_p,
mut_s,
par_selection,
crossover,
mutation,
sur_selection,
)
fitness_res.insert(0, "s_sel", tt)
fitness_res.insert(0, "p_sel", uu)
fitness_res.insert(0, "mut_s", vv)
fitness_res.insert(0, "mut_p", xx)
fitness_res.insert(0, "b", yy)
fitness_res.insert(0, "pop_s", zz)
fitness_res.insert(0, "exp", exp_n)
multi_fit = multi_fit.append(fitness_res, ignore_index=True, sort=False)
multi_fit = multi_fit.infer_objects()
exp_n += 1
# %% [markdown]
# ## Data storage
# %% [markdown]
# Writing the Data Frame to a pickle file
# %%
multi_fit.to_pickle("./Data/TEST_A_2L_FitData.gz", compression="gzip")
# %% [markdown]
# Reading the Data Frame from a pickle file
# %%
multi_fit = pd.read_pickle("./Data/TEST_A_2L_FitData.gz", compression="gzip")
# %%
multi_fit.tail()
# %% [markdown]
# # Processing data for DOE Analysis
# %% [markdown]
# Storing the latest generation's population of each replicate
# %%
query = multi_fit["generation"] == gen_f
multi_final_fitness_res = multi_fit[query]
# %% [markdown]
# Reordering columns
# %%
multi_final_fitness_res = multi_final_fitness_res.drop(
["exp", "generation", "run", "seed"], axis=1
)
multi_final_fitness_res.columns = [
"pop_s",
"b",
"mut_p",
"mut_s",
"p_sel",
"s_sel",
"f_min",
"f_max",
"f_mean",
"f_std",
]
multi_final_fitness_res = multi_final_fitness_res[
[
"pop_s",
"b",
"mut_p",
"mut_s",
"p_sel",
"s_sel",
"f_min",
"f_max",
"f_mean",
"f_std",
]
]
multi_final_fitness_res = multi_final_fitness_res.reset_index(drop=True)
# %% [markdown]
# Encoding values for DOE's Factors
# %%
multi_final_fitness_res["pop_s"] = (
multi_final_fitness_res["pop_s"].replace([10, 160], [-1, 1]).infer_objects()
)
multi_final_fitness_res["b"] = (
multi_final_fitness_res["b"].replace([0.5, 5], [-1, 1]).infer_objects()
)
multi_final_fitness_res["mut_p"] = (
multi_final_fitness_res["mut_p"].replace([0.1, 0.9], [-1, 1]).infer_objects()
)
multi_final_fitness_res["mut_s"] = (
multi_final_fitness_res["mut_s"].replace([0.5, 5], [-1, 1]).infer_objects()
)
multi_final_fitness_res["p_sel"] = (
multi_final_fitness_res["p_sel"]
.replace(["uniform", "tournament_k3"], [-1, 1])
.infer_objects()
)
multi_final_fitness_res["s_sel"] = (
multi_final_fitness_res["s_sel"]
.replace(["fitness_proportional_selection", "uniform"], [-1, 1])
.infer_objects()
)
# %% [markdown]
# Exploring the Data Frame
# %%
multi_final_fitness_res.head()
# %%
multi_final_fitness_res.tail()
# %% [markdown]
# Storing the Factor Coding and DOE results Data Frames
# %%
inputs_df.to_pickle("./Data/TEST_A_DOE_code.gz", compression="gzip")
multi_final_fitness_res.to_pickle("./Data/TEST_A_DOE_data.gz", compression="gzip")
# %%
|
D = 2
alpha = 1 / 8
x = (x - 5) / 6
y = (y - 5) / 6
a = np.abs(x ** 2 + y ** 2 - D) ** (alpha * D)
b = (0.5 * (x ** 2 + y ** 2) + (x + y)) / D
return a + b + 0.5
|
answer.py
|
from typing import List
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
|
for c in range(C-2):
r1c1 = grid[r][c]
r1c2 = grid[r][c+1]
r1c3 = grid[r][c+2]
r2c1 = grid[r+1][c]
r2c2 = grid[r+1][c+1]
r2c3 = grid[r+1][c+2]
r3c1 = grid[r+2][c]
r3c2 = grid[r+2][c+1]
r3c3 = grid[r+2][c+2]
result = sorted([r1c1, r1c2, r1c3, r2c1, r2c2,
r2c3, r3c1, r3c2, r3c3]) == list(range(1, 10))
result &= (r1c1 + r1c2 + r1c3 == 15)
result &= (r2c1 + r2c2 + r2c3 == 15)
result &= (r2c1 + r2c2 + r2c3 == 15)
result &= (r1c1 + r2c1 + r3c1 == 15)
result &= (r1c2 + r2c2 + r3c2 == 15)
result &= (r1c3 + r2c3 + r3c3 == 15)
result &= (r1c1 + r2c2 + r3c3 == 15)
result &= (r1c3 + r2c2 + r3c1 == 15)
count += 1 if result else 0
return count
if __name__ == "__main__":
s = Solution()
result = s.numMagicSquaresInside(
[[4, 7, 8], [9, 5, 1], [2, 3, 6]])
print(result)
|
R = len(grid)
C = len(grid[0])
count = 0
for r in range(R-2):
|
image.rs
|
#![allow(clippy::too_many_arguments)]
use std::convert::TryFrom;
use std::io;
use std::io::Read;
use std::ops::{Deref, DerefMut};
use std::path::Path;
use crate::buffer::{ImageBuffer, Pixel};
use crate::color::{ColorType, ExtendedColorType};
use crate::error::{ImageError, ImageResult};
use crate::math::Rect;
use crate::animation::Frames;
#[cfg(feature = "pnm")]
use crate::pnm::PNMSubtype;
/// An enumeration of supported image formats.
/// Not all formats support both encoding and decoding.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum ImageFormat {
/// An Image in PNG Format
Png,
/// An Image in JPEG Format
Jpeg,
/// An Image in GIF Format
Gif,
/// An Image in WEBP Format
WebP,
/// An Image in general PNM Format
Pnm,
/// An Image in TIFF Format
Tiff,
/// An Image in TGA Format
Tga,
/// An Image in DDS Format
Dds,
/// An Image in BMP Format
Bmp,
/// An Image in ICO Format
Ico,
/// An Image in Radiance HDR Format
Hdr,
#[doc(hidden)]
__NonExhaustive(crate::utils::NonExhaustiveMarker),
}
impl ImageFormat {
/// Return the image format specified by the path's file extension.
pub fn from_path<P>(path: P) -> ImageResult<Self> where P : AsRef<Path> {
// thin wrapper function to strip generics before calling from_path_impl
crate::io::free_functions::guess_format_from_path_impl(path.as_ref())
.map_err(Into::into)
}
}
/// An enumeration of supported image formats for encoding.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum ImageOutputFormat {
#[cfg(feature = "png")]
/// An Image in PNG Format
Png,
#[cfg(feature = "jpeg")]
/// An Image in JPEG Format with specified quality
Jpeg(u8),
#[cfg(feature = "pnm")]
/// An Image in one of the PNM Formats
Pnm(PNMSubtype),
#[cfg(feature = "gif")]
/// An Image in GIF Format
Gif,
#[cfg(feature = "ico")]
/// An Image in ICO Format
Ico,
#[cfg(feature = "bmp")]
/// An Image in BMP Format
Bmp,
/// A value for signalling an error: An unsupported format was requested
// Note: When TryFrom is stabilized, this value should not be needed, and
// a TryInto<ImageOutputFormat> should be used instead of an Into<ImageOutputFormat>.
Unsupported(String),
#[doc(hidden)]
__NonExhaustive(crate::utils::NonExhaustiveMarker),
}
impl From<ImageFormat> for ImageOutputFormat {
fn from(fmt: ImageFormat) -> Self {
match fmt {
#[cfg(feature = "png")]
ImageFormat::Png => ImageOutputFormat::Png,
#[cfg(feature = "jpeg")]
ImageFormat::Jpeg => ImageOutputFormat::Jpeg(75),
#[cfg(feature = "pnm")]
ImageFormat::Pnm => ImageOutputFormat::Pnm(PNMSubtype::ArbitraryMap),
#[cfg(feature = "gif")]
ImageFormat::Gif => ImageOutputFormat::Gif,
#[cfg(feature = "ico")]
ImageFormat::Ico => ImageOutputFormat::Ico,
#[cfg(feature = "bmp")]
ImageFormat::Bmp => ImageOutputFormat::Bmp,
f => ImageOutputFormat::Unsupported(format!(
"Image format {:?} not supported for encoding.",
f
)),
}
}
}
// This struct manages buffering associated with implementing `Read` and `Seek` on decoders that can
// must decode ranges of bytes at a time.
pub(crate) struct ImageReadBuffer {
scanline_bytes: usize,
buffer: Vec<u8>,
consumed: usize,
total_bytes: u64,
offset: u64,
}
impl ImageReadBuffer {
/// Create a new ImageReadBuffer.
///
/// Panics if scanline_bytes doesn't fit into a usize, because that would mean reading anything
/// from the image would take more RAM than the entire virtual address space. In other words,
/// actually using this struct would instantly OOM so just get it out of the way now.
pub(crate) fn new(scanline_bytes: u64, total_bytes: u64) -> Self {
Self {
scanline_bytes: usize::try_from(scanline_bytes).unwrap(),
buffer: Vec::new(),
consumed: 0,
total_bytes,
offset: 0,
}
}
pub(crate) fn read<F>(&mut self, buf: &mut [u8], mut read_scanline: F) -> io::Result<usize>
where
F: FnMut(&mut [u8]) -> io::Result<usize>,
{
if self.buffer.len() == self.consumed {
if self.offset == self.total_bytes {
return Ok(0);
} else if buf.len() >= self.scanline_bytes {
// If there is nothing buffered and the user requested a full scanline worth of
// data, skip buffering.
let bytes_read = read_scanline(&mut buf[..self.scanline_bytes])?;
self.offset += u64::try_from(bytes_read).unwrap();
return Ok(bytes_read);
} else {
// Lazily allocate buffer the first time that read is called with a buffer smaller
// than the scanline size.
if self.buffer.is_empty() {
self.buffer.resize(self.scanline_bytes, 0);
}
self.consumed = 0;
let bytes_read = read_scanline(&mut self.buffer[..])?;
self.buffer.resize(bytes_read, 0);
self.offset += u64::try_from(bytes_read).unwrap();
assert!(bytes_read == self.scanline_bytes || self.offset == self.total_bytes);
}
}
// Finally, copy bytes into output buffer.
let bytes_buffered = self.buffer.len() - self.consumed;
if bytes_buffered > buf.len() {
crate::copy_memory(&self.buffer[self.consumed..][..buf.len()], &mut buf[..]);
self.consumed += buf.len();
Ok(buf.len())
} else {
crate::copy_memory(&self.buffer[self.consumed..], &mut buf[..bytes_buffered]);
self.consumed = self.buffer.len();
Ok(bytes_buffered)
}
}
}
/// Decodes a specific region of the image, represented by the rectangle
/// starting from ```x``` and ```y``` and having ```length``` and ```width```
pub(crate) fn load_rect<'a, D, F, F1, F2, E>(x: u32, y: u32, width: u32, height: u32, buf: &mut [u8],
progress_callback: F,
decoder: &mut D,
mut seek_scanline: F1,
mut read_scanline: F2) -> ImageResult<()>
where D: ImageDecoder<'a>,
F: Fn(Progress),
F1: FnMut(&mut D, u64) -> io::Result<()>,
F2: FnMut(&mut D, &mut [u8]) -> Result<usize, E>,
ImageError: From<E>,
{
let (x, y, width, height) = (u64::from(x), u64::from(y), u64::from(width), u64::from(height));
let dimensions = decoder.dimensions();
let bytes_per_pixel = u64::from(decoder.color_type().bytes_per_pixel());
let row_bytes = bytes_per_pixel * u64::from(dimensions.0);
let scanline_bytes = decoder.scanline_bytes();
let total_bytes = width * height * bytes_per_pixel;
let mut bytes_read = 0u64;
let mut current_scanline = 0;
let mut tmp = Vec::new();
{
// Read a range of the image starting from byte number `start` and continuing until byte
// number `end`. Updates `current_scanline` and `bytes_read` appropiately.
let mut read_image_range = |start: u64, end: u64| -> ImageResult<()> {
let target_scanline = start / scanline_bytes;
if target_scanline != current_scanline {
seek_scanline(decoder, target_scanline)?;
current_scanline = target_scanline;
}
let mut position = current_scanline * scanline_bytes;
while position < end {
if position >= start && end - position >= scanline_bytes {
read_scanline(decoder, &mut buf[(bytes_read as usize)..]
[..(scanline_bytes as usize)])?;
bytes_read += scanline_bytes;
} else {
tmp.resize(scanline_bytes as usize, 0u8);
read_scanline(decoder, &mut tmp)?;
let offset = start.saturating_sub(position);
let len = (end - start)
.min(scanline_bytes - offset)
.min(end - position);
buf[(bytes_read as usize)..][..len as usize]
.copy_from_slice(&tmp[offset as usize..][..len as usize]);
bytes_read += len;
}
current_scanline += 1;
position += scanline_bytes;
progress_callback(Progress {current: bytes_read, total: total_bytes});
}
Ok(())
};
if x + width > u64::from(dimensions.0) || y + height > u64::from(dimensions.0)
|| width == 0 || height == 0 {
return Err(ImageError::DimensionError);
}
if scanline_bytes > usize::max_value() as u64 {
return Err(ImageError::InsufficientMemory);
}
progress_callback(Progress {current: 0, total: total_bytes});
if x == 0 && width == u64::from(dimensions.0) {
let start = x * bytes_per_pixel + y * row_bytes;
let end = (x + width) * bytes_per_pixel + (y + height - 1) * row_bytes;
read_image_range(start, end)?;
} else {
for row in y..(y+height) {
let start = x * bytes_per_pixel + row * row_bytes;
let end = (x + width) * bytes_per_pixel + row * row_bytes;
read_image_range(start, end)?;
}
}
}
// Seek back to the start
Ok(seek_scanline(decoder, 0)?)
}
/// Reads all of the bytes of a decoder into a Vec<T>. No particular alignment
/// of the output buffer is guaranteed.
///
/// Panics if there isn't enough memory to decode the image.
pub(crate) fn decoder_to_vec<'a, T>(decoder: impl ImageDecoder<'a>) -> ImageResult<Vec<T>>
where
T: crate::traits::Primitive + bytemuck::Pod,
{
let mut buf = vec![num_traits::Zero::zero(); usize::try_from(decoder.total_bytes()).unwrap() / std::mem::size_of::<T>()];
decoder.read_image(bytemuck::cast_slice_mut(buf.as_mut_slice()))?;
Ok(buf)
}
/// Represents the progress of an image operation.
///
/// Note that this is not necessarily accurate and no change to the values passed to the progress
/// function during decoding will be considered breaking. A decoder could in theory report the
/// progress `(0, 0)` if progress is unknown, without violating the interface contract of the type.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Progress {
current: u64,
total: u64,
}
impl Progress {
/// A measure of completed decoding.
pub fn current(self) -> u64 {
self.current
}
/// A measure of all necessary decoding work.
///
/// This is in general greater or equal than `current`.
pub fn total(self) -> u64 {
self.total
}
/// Calculate a measure for remaining decoding work.
pub fn remaining(self) -> u64 {
self.total.max(self.current) - self.current
}
}
/// The trait that all decoders implement
pub trait ImageDecoder<'a>: Sized {
/// The type of reader produced by `into_reader`.
type Reader: Read + 'a;
/// Returns a tuple containing the width and height of the image
fn dimensions(&self) -> (u32, u32);
/// Returns the color type of the image data produced by this decoder
fn color_type(&self) -> ColorType;
/// Retuns the color type of the image file before decoding
fn original_color_type(&self) -> ExtendedColorType {
self.color_type().into()
}
/// Returns a reader that can be used to obtain the bytes of the image. For the best
/// performance, always try to read at least `scanline_bytes` from the reader at a time. Reading
/// fewer bytes will cause the reader to perform internal buffering.
fn into_reader(self) -> ImageResult<Self::Reader>;
/// Returns the total number of bytes in the decoded image.
///
/// This is the size of the buffer that must be passed to `read_image` or
/// `read_image_with_progress`. The returned value may exceed usize::MAX, in
/// which case it isn't actually possible to construct a buffer to decode all the image data
/// into.
fn total_bytes(&self) -> u64 {
let dimensions = self.dimensions();
u64::from(dimensions.0) * u64::from(dimensions.1) * u64::from(self.color_type().bytes_per_pixel())
}
/// Returns the minimum number of bytes that can be efficiently read from this decoder. This may
/// be as few as 1 or as many as `total_bytes()`.
fn scanline_bytes(&self) -> u64 {
self.total_bytes()
}
/// Returns all the bytes in the image.
///
/// This function takes a slice of bytes and writes the pixel data of the image into it.
/// Although not required, for certain color types callers may want to pass buffers which are
/// aligned to 2 or 4 byte boundaries to the slice can be cast to a [u16] or [u32]. To accommodate
/// such casts, the returned contents will always be in native endian.
///
/// # Panics
///
/// This function panics if buf.len() != self.total_bytes().
///
/// # Examples
///
/// ```no_build
/// use zerocopy::{AsBytes, FromBytes};
/// fn read_16bit_image(decoder: impl ImageDecoder) -> Vec<16> {
/// let mut buf: Vec<u16> = vec![0; decoder.total_bytes()/2];
/// decoder.read_image(buf.as_bytes());
/// buf
/// }
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
self.read_image_with_progress(buf, |_| {})
}
/// Same as `read_image` but periodically calls the provided callback to give updates on loading
/// progress.
fn read_image_with_progress<F: Fn(Progress)>(
self,
buf: &mut [u8],
progress_callback: F,
) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
let total_bytes = self.total_bytes() as usize;
let scanline_bytes = self.scanline_bytes() as usize;
let target_read_size = if scanline_bytes < 4096 {
(4096 / scanline_bytes) * scanline_bytes
} else {
scanline_bytes
};
let mut reader = self.into_reader()?;
let mut bytes_read = 0;
while bytes_read < total_bytes {
let read_size = target_read_size.min(total_bytes - bytes_read);
reader.read_exact(&mut buf[bytes_read..][..read_size])?;
bytes_read += read_size;
progress_callback(Progress {
current: bytes_read as u64,
total: total_bytes as u64,
});
}
Ok(())
}
}
/// ImageDecoderExt trait
pub trait ImageDecoderExt<'a>: ImageDecoder<'a> + Sized {
/// Read a rectangular section of the image.
fn read_rect(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
buf: &mut [u8],
) -> ImageResult<()> {
self.read_rect_with_progress(x, y, width, height, buf, |_|{})
}
/// Read a rectangular section of the image, periodically reporting progress.
fn read_rect_with_progress<F: Fn(Progress)>(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
buf: &mut [u8],
progress_callback: F,
) -> ImageResult<()>;
}
/// AnimationDecoder trait
pub trait AnimationDecoder<'a> {
/// Consume the decoder producing a series of frames.
fn into_frames(self) -> Frames<'a>;
}
/// The trait all encoders implement
pub trait ImageEncoder {
/// Writes all the bytes in an image to the encoder.
///
/// This function takes a slice of bytes of the pixel data of the image
/// and encodes them. Unlike particular format encoders inherent impl encode
/// methods where endianness is not specified, here image data bytes should
/// always be in native endian. The implementor will reorder the endianess
/// as necessary for the target encoding format.
///
/// See also `ImageDecoder::read_image` which reads byte buffers into
/// native endian.
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ColorType,
) -> ImageResult<()>;
}
/// Immutable pixel iterator
pub struct Pixels<'a, I: ?Sized + 'a> {
image: &'a I,
x: u32,
y: u32,
width: u32,
height: u32,
}
impl<'a, I: GenericImageView> Iterator for Pixels<'a, I> {
type Item = (u32, u32, I::Pixel);
fn next(&mut self) -> Option<(u32, u32, I::Pixel)> {
if self.x >= self.width {
self.x = 0;
self.y += 1;
}
if self.y >= self.height {
None
} else {
let pixel = self.image.get_pixel(self.x, self.y);
let p = (self.x, self.y, pixel);
self.x += 1;
Some(p)
}
}
}
/// Trait to inspect an image.
pub trait GenericImageView {
/// The type of pixel.
type Pixel: Pixel;
/// Underlying image type. This is mainly used by SubImages in order to
/// always have a reference to the original image. This allows for less
/// indirections and it eases the use of nested SubImages.
type InnerImageView: GenericImageView<Pixel = Self::Pixel>;
/// The width and height of this image.
fn dimensions(&self) -> (u32, u32);
/// The width of this image.
fn width(&self) -> u32 {
let (w, _) = self.dimensions();
w
}
/// The height of this image.
fn height(&self) -> u32 {
let (_, h) = self.dimensions();
h
}
/// The bounding rectangle of this image.
fn bounds(&self) -> (u32, u32, u32, u32);
/// Returns true if this x, y coordinate is contained inside the image.
fn in_bounds(&self, x: u32, y: u32) -> bool {
let (ix, iy, iw, ih) = self.bounds();
x >= ix && x < ix + iw && y >= iy && y < iy + ih
}
/// Returns the pixel located at (x, y)
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
///
/// TODO: change this signature to &P
fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel;
/// Returns the pixel located at (x, y)
///
/// This function can be implemented in a way that ignores bounds checking.
unsafe fn unsafe_get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
self.get_pixel(x, y)
}
/// Returns an Iterator over the pixels of this image.
/// The iterator yields the coordinates of each pixel
/// along with their value
fn pixels(&self) -> Pixels<Self> {
let (width, height) = self.dimensions();
Pixels {
image: self,
x: 0,
y: 0,
width,
height,
}
}
/// Returns a reference to the underlying image.
fn inner(&self) -> &Self::InnerImageView;
/// Returns an subimage that is an immutable view into this image.
/// You can use [`GenericImage::sub_image`] if you need a mutable view instead.
fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self::InnerImageView> {
SubImage::new(self.inner(), x, y, width, height)
}
}
/// A trait for manipulating images.
pub trait GenericImage: GenericImageView {
/// Underlying image type. This is mainly used by SubImages in order to
/// always have a reference to the original image. This allows for less
/// indirections and it eases the use of nested SubImages.
type InnerImage: GenericImage<Pixel = Self::Pixel>;
/// Gets a reference to the mutable pixel at location `(x, y)`
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel;
/// Put a pixel at location (x, y)
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
/// Puts a pixel at location (x, y)
///
/// This function can be implemented in a way that ignores bounds checking.
unsafe fn unsafe_put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.put_pixel(x, y, pixel);
}
/// Put a pixel at location (x, y), taking into account alpha channels
///
/// DEPRECATED: This method will be removed. Blend the pixel directly instead.
fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
/// Copies all of the pixels from another image into this image.
///
/// The other image is copied with the top-left corner of the
/// other image placed at (x, y).
///
/// In order to copy only a piece of the other image, use [`GenericImageView::view`].
///
/// # Returns
/// Returns an error if the image is too large to be copied at the given position
fn copy_from<O>(&mut self, other: &O, x: u32, y: u32) -> ImageResult<()>
where
O: GenericImageView<Pixel = Self::Pixel>,
{
// Do bounds checking here so we can use the non-bounds-checking
// functions to copy pixels.
if self.width() < other.width() + x || self.height() < other.height() + y {
return Err(ImageError::DimensionError);
}
for i in 0..other.width() {
for k in 0..other.height() {
let p = other.get_pixel(i, k);
self.put_pixel(i + x, k + y, p);
}
}
Ok(())
}
/// Copies all of the pixels from one part of this image to another part of this image.
///
/// The destination rectangle of the copy is specified with the top-left corner placed at (x, y).
///
/// # Returns
/// `true` if the copy was successful, `false` if the image could not
/// be copied due to size constraints.
fn copy_within(&mut self, source: Rect, x: u32, y: u32) -> bool {
let Rect { x: sx, y: sy, width, height } = source;
let dx = x;
let dy = y;
assert!(sx < self.width() && dx < self.width());
assert!(sy < self.height() && dy < self.height());
if self.width() - dx.max(sx) < width || self.height() - dy.max(sy) < height {
return false;
}
// since `.rev()` creates a new dype we would either have to go with dynamic dispatch for the ranges
// or have quite a lot of code bloat. A macro gives us static dispatch with less visible bloat.
macro_rules! copy_within_impl_ {
($xiter:expr, $yiter:expr) => {
for y in $yiter {
let sy = sy + y;
let dy = dy + y;
for x in $xiter {
let sx = sx + x;
let dx = dx + x;
let pixel = self.get_pixel(sx, sy);
self.put_pixel(dx, dy, pixel);
}
}
};
}
// check how target and source rectangles relate to each other so we dont overwrite data before we copied it.
match (sx < dx, sy < dy) {
(true, true) => copy_within_impl_!((0..width).rev(), (0..height).rev()),
(true, false) => copy_within_impl_!((0..width).rev(), 0..height),
(false, true) => copy_within_impl_!(0..width, (0..height).rev()),
(false, false) => copy_within_impl_!(0..width, 0..height),
}
true
}
/// Returns a mutable reference to the underlying image.
fn inner_mut(&mut self) -> &mut Self::InnerImage;
/// Returns a mutable subimage that is a view into this image.
/// If you want an immutable subimage instead, use [`GenericImageView::view`]
fn sub_image(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
) -> SubImage<&mut Self::InnerImage> {
SubImage::new(self.inner_mut(), x, y, width, height)
}
}
/// A View into another image
///
/// Instances of this struct can be created using:
/// - [`GenericImage::sub_image`] to create a mutable view,
/// - [`GenericImageView::view`] to create an immutable view,
/// - [`SubImage::new`] to instantiate the struct directly.
pub struct SubImage<I> {
image: I,
xoffset: u32,
yoffset: u32,
xstride: u32,
ystride: u32,
}
/// Alias to access Pixel behind a reference
type DerefPixel<I> = <<I as Deref>::Target as GenericImageView>::Pixel;
/// Alias to access Subpixel behind a reference
type DerefSubpixel<I> = <DerefPixel<I> as Pixel>::Subpixel;
impl<I> SubImage<I> {
/// Construct a new subimage
pub fn new(image: I, x: u32, y: u32, width: u32, height: u32) -> SubImage<I> {
SubImage {
image,
xoffset: x,
yoffset: y,
xstride: width,
ystride: height,
}
}
/// Change the coordinates of this subimage.
pub fn change_bounds(&mut self, x: u32, y: u32, width: u32, height: u32) {
self.xoffset = x;
self.yoffset = y;
self.xstride = width;
self.ystride = height;
}
/// Convert this subimage to an ImageBuffer
pub fn to_image(&self) -> ImageBuffer<DerefPixel<I>, Vec<DerefSubpixel<I>>>
where
I: Deref,
I::Target: GenericImage + 'static,
{
let mut out = ImageBuffer::new(self.xstride, self.ystride);
let borrowed = self.image.deref();
for y in 0..self.ystride {
for x in 0..self.xstride {
let p = borrowed.get_pixel(x + self.xoffset, y + self.yoffset);
out.put_pixel(x, y, p);
}
}
out
}
}
#[allow(deprecated)]
impl<I> GenericImageView for SubImage<I>
where
I: Deref,
I::Target: GenericImageView + Sized,
{
type Pixel = DerefPixel<I>;
type InnerImageView = I::Target;
fn dimensions(&self) -> (u32, u32) {
(self.xstride, self.ystride)
}
fn bounds(&self) -> (u32, u32, u32, u32) {
(self.xoffset, self.yoffset, self.xstride, self.ystride)
}
fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
self.image.get_pixel(x + self.xoffset, y + self.yoffset)
}
fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self::InnerImageView> {
let x = self.xoffset + x;
let y = self.yoffset + y;
SubImage::new(self.inner(), x, y, width, height)
}
fn
|
(&self) -> &Self::InnerImageView {
&self.image
}
}
#[allow(deprecated)]
impl<I> GenericImage for SubImage<I>
where
I: DerefMut,
I::Target: GenericImage + Sized,
{
type InnerImage = I::Target;
fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel {
self.image.get_pixel_mut(x + self.xoffset, y + self.yoffset)
}
fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.image
.put_pixel(x + self.xoffset, y + self.yoffset, pixel)
}
/// DEPRECATED: This method will be removed. Blend the pixel directly instead.
fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.image
.blend_pixel(x + self.xoffset, y + self.yoffset, pixel)
}
fn sub_image(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
) -> SubImage<&mut Self::InnerImage> {
let x = self.xoffset + x;
let y = self.yoffset + y;
SubImage::new(self.inner_mut(), x, y, width, height)
}
fn inner_mut(&mut self) -> &mut Self::InnerImage {
&mut self.image
}
}
#[cfg(test)]
mod tests {
use std::io;
use std::path::Path;
use super::{ColorType, ImageDecoder, ImageResult, GenericImage, GenericImageView, load_rect, ImageFormat};
use crate::buffer::{GrayImage, ImageBuffer};
use crate::color::Rgba;
use crate::math::Rect;
#[test]
/// Test that alpha blending works as expected
fn test_image_alpha_blending() {
let mut target = ImageBuffer::new(1, 1);
target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
assert!(*target.get_pixel(0, 0) == Rgba([255, 0, 0, 255]));
target.blend_pixel(0, 0, Rgba([0, 255, 0, 255]));
assert!(*target.get_pixel(0, 0) == Rgba([0, 255, 0, 255]));
// Blending an alpha channel onto a solid background
target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
assert!(*target.get_pixel(0, 0) == Rgba([127, 127, 0, 255]));
// Blending two alpha channels
target.put_pixel(0, 0, Rgba([0, 255, 0, 127]));
target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
assert!(*target.get_pixel(0, 0) == Rgba([169, 85, 0, 190]));
}
#[test]
fn test_in_bounds() {
let mut target = ImageBuffer::new(2, 2);
target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
assert!(target.in_bounds(0, 0));
assert!(target.in_bounds(1, 0));
assert!(target.in_bounds(0, 1));
assert!(target.in_bounds(1, 1));
assert!(!target.in_bounds(2, 0));
assert!(!target.in_bounds(0, 2));
assert!(!target.in_bounds(2, 2));
}
#[test]
fn test_can_subimage_clone_nonmut() {
let mut source = ImageBuffer::new(3, 3);
source.put_pixel(1, 1, Rgba([255u8, 0, 0, 255]));
// A non-mutable copy of the source image
let source = source.clone();
// Clone a view into non-mutable to a separate buffer
let cloned = source.view(1, 1, 1, 1).to_image();
assert!(cloned.get_pixel(0, 0) == source.get_pixel(1, 1));
}
#[test]
fn test_can_nest_views() {
let mut source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
{
let mut sub1 = source.sub_image(0, 0, 2, 2);
let mut sub2 = sub1.sub_image(1, 1, 1, 1);
sub2.put_pixel(0, 0, Rgba([0, 0, 0, 0]));
}
assert_eq!(*source.get_pixel(1, 1), Rgba([0, 0, 0, 0]));
let view1 = source.view(0, 0, 2, 2);
assert_eq!(*source.get_pixel(1, 1), view1.get_pixel(1, 1));
let view2 = view1.view(1, 1, 1, 1);
assert_eq!(*source.get_pixel(1, 1), view2.get_pixel(0, 0));
}
#[test]
fn test_load_rect() {
struct MockDecoder {scanline_number: u64, scanline_bytes: u64}
impl<'a> ImageDecoder<'a> for MockDecoder {
type Reader = Box<dyn io::Read>;
fn dimensions(&self) -> (u32, u32) {(5, 5)}
fn color_type(&self) -> ColorType { ColorType::L8 }
fn into_reader(self) -> ImageResult<Self::Reader> {unimplemented!()}
fn scanline_bytes(&self) -> u64 { self.scanline_bytes }
}
const DATA: [u8; 25] = [0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 11, 12, 13, 14,
15, 16, 17, 18, 19,
20, 21, 22, 23, 24];
fn seek_scanline(m: &mut MockDecoder, n: u64) -> io::Result<()> {
m.scanline_number = n;
Ok(())
}
fn read_scanline(m: &mut MockDecoder, buf: &mut [u8]) -> io::Result<usize> {
let bytes_read = m.scanline_number * m.scanline_bytes;
if bytes_read >= 25 {
return Ok(0);
}
let len = m.scanline_bytes.min(25 - bytes_read);
buf[..(len as usize)].copy_from_slice(&DATA[(bytes_read as usize)..][..(len as usize)]);
m.scanline_number += 1;
Ok(len as usize)
}
for scanline_bytes in 1..30 {
let mut output = [0u8; 26];
load_rect(0, 0, 5, 5, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..25], DATA);
assert_eq!(output[25], 0);
output = [0u8; 26];
load_rect(3, 2, 1, 1, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..2], [13, 0]);
output = [0u8; 26];
load_rect(3, 2, 2, 2, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..5], [13, 14, 18, 19, 0]);
output = [0u8; 26];
load_rect(1, 1, 2, 4, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..9], [6, 7, 11, 12, 16, 17, 21, 22, 0]);
}
}
#[test]
fn test_image_format_from_path() {
fn from_path(s: &str) -> ImageResult<ImageFormat> {
ImageFormat::from_path(Path::new(s))
}
assert_eq!(from_path("./a.jpg").unwrap(), ImageFormat::Jpeg);
assert_eq!(from_path("./a.jpeg").unwrap(), ImageFormat::Jpeg);
assert_eq!(from_path("./a.JPEG").unwrap(), ImageFormat::Jpeg);
assert_eq!(from_path("./a.pNg").unwrap(), ImageFormat::Png);
assert_eq!(from_path("./a.gif").unwrap(), ImageFormat::Gif);
assert_eq!(from_path("./a.webp").unwrap(), ImageFormat::WebP);
assert_eq!(from_path("./a.tiFF").unwrap(), ImageFormat::Tiff);
assert_eq!(from_path("./a.tif").unwrap(), ImageFormat::Tiff);
assert_eq!(from_path("./a.tga").unwrap(), ImageFormat::Tga);
assert_eq!(from_path("./a.dds").unwrap(), ImageFormat::Dds);
assert_eq!(from_path("./a.bmp").unwrap(), ImageFormat::Bmp);
assert_eq!(from_path("./a.Ico").unwrap(), ImageFormat::Ico);
assert_eq!(from_path("./a.hdr").unwrap(), ImageFormat::Hdr);
assert_eq!(from_path("./a.pbm").unwrap(), ImageFormat::Pnm);
assert_eq!(from_path("./a.pAM").unwrap(), ImageFormat::Pnm);
assert_eq!(from_path("./a.Ppm").unwrap(), ImageFormat::Pnm);
assert_eq!(from_path("./a.pgm").unwrap(), ImageFormat::Pnm);
assert!(from_path("./a.txt").is_err());
assert!(from_path("./a").is_err());
}
#[test]
fn test_generic_image_copy_within_oob() {
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, vec![0u8; 16]).unwrap();
assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 5, height: 4 }, 0, 0));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 4, height: 5 }, 0, 0));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 0, width: 4, height: 4 }, 0, 0));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 1, 0));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 1, width: 4, height: 4 }, 0, 0));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 0, 1));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 1, width: 4, height: 4 }, 0, 0));
}
#[test]
fn test_generic_image_copy_within_tl() {
let data = &[
00, 01, 02, 03,
04, 05, 06, 07,
08, 09, 10, 11,
12, 13, 14, 15
];
let expected = [
00, 01, 02, 03,
04, 00, 01, 02,
08, 04, 05, 06,
12, 08, 09, 10,
];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 3, height: 3 }, 1, 1));
assert_eq!(&image.into_raw(), &expected);
}
#[test]
fn test_generic_image_copy_within_tr() {
let data = &[
00, 01, 02, 03,
04, 05, 06, 07,
08, 09, 10, 11,
12, 13, 14, 15
];
let expected = [
00, 01, 02, 03,
01, 02, 03, 07,
05, 06, 07, 11,
09, 10, 11, 15
];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 0, width: 3, height: 3 }, 0, 1));
assert_eq!(&image.into_raw(), &expected);
}
#[test]
fn test_generic_image_copy_within_bl() {
let data = &[
00, 01, 02, 03,
04, 05, 06, 07,
08, 09, 10, 11,
12, 13, 14, 15
];
let expected = [
00, 04, 05, 06,
04, 08, 09, 10,
08, 12, 13, 14,
12, 13, 14, 15
];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 1, width: 3, height: 3 }, 1, 0));
assert_eq!(&image.into_raw(), &expected);
}
#[test]
fn test_generic_image_copy_within_br() {
let data = &[
00, 01, 02, 03,
04, 05, 06, 07,
08, 09, 10, 11,
12, 13, 14, 15
];
let expected = [
05, 06, 07, 03,
09, 10, 11, 07,
13, 14, 15, 11,
12, 13, 14, 15
];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 1, width: 3, height: 3 }, 0, 0));
assert_eq!(&image.into_raw(), &expected);
}
}
|
inner
|
geometry.rs
|
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
use crate::Precision;
use crate::{
AsRaw, AsRawMut, ContextHandle, ContextHandling, ContextInteractions, CoordSeq,
PreparedGeometry, WKTWriter,
};
use c_vec::CVec;
use context_handle::PtrWrap;
use enums::*;
use error::{Error, GResult, PredicateType};
use functions::*;
use geos_sys::*;
use std::borrow::Borrow;
use std::convert::TryFrom;
use std::ffi::CString;
use std::sync::Arc;
use std::{self, str};
/// Representation of a GEOS geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 3.5)")
/// .expect("Invalid geometry");
/// assert_eq!(point_geom.get_x(), Ok(2.5));
/// assert_eq!(point_geom.get_y(), Ok(3.5));
/// ```
pub struct Geometry<'a> {
pub(crate) ptr: PtrWrap<*mut GEOSGeometry>,
pub(crate) context: Arc<ContextHandle<'a>>,
}
// Representation of a GEOS geometry. Since it's only a view over another GEOS geometry data,
/// only not mutable operations are implemented on it.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
///let geom = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0),\
/// (1 1, 2 1, 2 5, 1 5, 1 1),\
/// (8 5, 8 4, 9 4, 9 5, 8 5))")
/// .expect("Invalid geometry");
/// let point_geom = geom
/// .get_interior_ring_n(0)
/// .expect("failed to get const geometry");
/// ```
pub struct ConstGeometry<'a, 'b> {
pub(crate) ptr: PtrWrap<*const GEOSGeometry>,
pub(crate) original: &'b Geometry<'a>,
}
pub trait Geom<'a>:
AsRaw<RawType = GEOSGeometry> + ContextHandling<Context = Arc<ContextHandle<'a>>>
{
/// Returns the type of the geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))")
/// .expect("Invalid geometry");
/// assert_eq!(geom.get_type(), Ok("Polygon".to_owned()));
/// ```
fn get_type(&self) -> GResult<String>;
fn geometry_type(&self) -> GeometryTypes;
/// Checks if the geometry is valid.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))")
/// .expect("Invalid geometry");
/// assert!(geom.is_valid() == false);
/// ```
fn is_valid(&self) -> bool;
/// Returns an explanation on why the geometry is invalid.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))")
/// .expect("Invalid geometry");
/// assert_eq!(
/// geom.is_valid_reason(),
/// Ok("Self-intersection[0 0]".to_owned()),
/// );
/// ```
fn is_valid_reason(&self) -> GResult<String>;
/// Get the underlying geos CoordSeq object from the geometry
///
/// Note: this clones the underlying CoordSeq to avoid double free
/// (because CoordSeq handles the object ptr and the CoordSeq is still owned by the geos
/// geometry) if this method's performance becomes a bottleneck, feel free to open an issue,
/// we could skip this clone with cleaner code.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT (2 3)")
/// .expect("Invalid geometry");
/// let coord_seq = geom.get_coord_seq().expect("get_coord_seq failed");
///
/// assert_eq!(coord_seq.get_x(0), Ok(2.));
/// assert_eq!(coord_seq.get_y(0), Ok(3.));
/// ```
fn get_coord_seq(&self) -> GResult<CoordSeq<'a>>;
/// Returns the area of the geometry. Units are specified by the SRID of the given geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0))")
/// .expect("Invalid geometry");
/// assert_eq!(geom1.area(), Ok(60.));
/// ```
fn area(&self) -> GResult<f64>;
/// Returns a WKT representation of the geometry. It defaults to 2 dimensions output. Use
/// [`WKTWriter`] type directly if you want more control.
///
/// # Examples
///
/// ```
/// use geos::{Geom, Geometry, OutputDimension, WKTWriter};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)")
/// .expect("Invalid geometry");
/// assert_eq!(
/// point_geom.to_wkt().unwrap(),
/// "POINT (2.5000000000000000 2.5000000000000000)",
/// );
///
/// // A three dimension point will be output just as a 2 dimension:
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5 3)")
/// .expect("Invalid geometry");
/// assert_eq!(
/// point_geom.to_wkt().unwrap(),
/// "POINT (2.5000000000000000 2.5000000000000000)",
/// );
///
/// // To "fix" it, use `WKTWriter` instead:
/// let mut wkt_writer = WKTWriter::new()
/// .expect("Failed to create WKTWriter");
/// wkt_writer.set_output_dimension(OutputDimension::ThreeD);
/// assert_eq!(
/// wkt_writer.write(&point_geom).unwrap(),
/// "POINT Z (2.5000000000000000 2.5000000000000000 3.0000000000000000)",
/// );
/// ```
fn to_wkt(&self) -> GResult<String>;
/// Returns a WKT representation of the geometry with the given `precision`. It is a wrapper
/// around [`WKTWriter::set_rounding_precision`].
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry, WKTWriter};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// assert_eq!(point_geom.to_wkt_precision(2).unwrap(), "POINT (2.50 2.50)");
///
/// // It is a wrapper around:
/// let mut writer = WKTWriter::new().expect("Failed to create WKTWriter");
/// writer.set_rounding_precision(2);
/// assert_eq!(writer.write(&point_geom).unwrap(), "POINT (2.50 2.50)");
/// ```
fn to_wkt_precision(&self, precision: u32) -> GResult<String>;
/// Returns `true` if the geometry is a ring.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let circle = Geometry::new_from_wkt("LINESTRING(0 0, 0 1, 1 1, 0 0)").expect("Invalid geometry");
/// assert_eq!(circle.is_ring(), Ok(true));
/// ```
fn is_ring(&self) -> GResult<bool>;
/// Returns `true` if `self` shares any portion of space with `other`. So if any of this is
/// `true`:
///
/// * `self` overlaps `other`
/// * `self` touches `other`
/// * `self` is within `other`
///
/// Then `intersects` will return `true` as well.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT(0 0)").expect("invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING(2 0, 0 2)").expect("invalid geometry");
/// let geom3 = Geometry::new_from_wkt("LINESTRING(0 0, 0 2)").expect("invalid geometry");
///
/// assert_eq!(geom1.intersects(&geom2), Ok(false));
/// assert_eq!(geom1.intersects(&geom3), Ok(true));
/// ```
fn intersects<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns `true` if `self` and `other` have at least one interior into each other.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("LINESTRING(1 1,2 2)").expect("invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING(2 1,1 2)").expect("invalid geometry");
///
/// assert_eq!(geom1.crosses(&geom2), Ok(true));
/// ```
fn crosses<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns `true` if `self` doesn't:
///
/// * Overlap `other`
/// * Touch `other`
/// * Is within `other`
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT(0 0)").expect("invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING(2 0, 0 2)").expect("invalid geometry");
/// let geom3 = Geometry::new_from_wkt("LINESTRING(0 0, 0 2)").expect("invalid geometry");
///
/// assert_eq!(geom1.disjoint(&geom2), Ok(true));
/// assert_eq!(geom1.disjoint(&geom3), Ok(false));
/// ```
fn disjoint<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns `true` if the only points in common between `self` and `other` lie in the union of
/// the boundaries of `self` and `other`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("LINESTRING(0 0, 1 1, 0 2)").expect("invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT(1 1)").expect("invalid geometry");
///
/// assert_eq!(geom1.touches(&geom2), Ok(false));
///
/// let geom2 = Geometry::new_from_wkt("POINT(0 2)").expect("invalid geometry");
///
/// assert_eq!(geom1.touches(&geom2), Ok(true));
/// ```
fn touches<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns `true` if `self` spatially overlaps `other`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT(1 0.5)").expect("invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING(1 0, 1 1, 3 5)").expect("invalid geometry");
///
/// assert_eq!(geom1.overlaps(&geom2), Ok(false));
///
/// let geom1 = geom1.buffer(3., 8).expect("buffer failed");
/// let geom2 = geom2.buffer(0.5, 8).expect("buffer failed");
///
/// assert_eq!(geom1.overlaps(&geom2), Ok(true));
/// ```
fn overlaps<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns `true` if `self` is completely inside `other`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT(50 50)").expect("invalid geometry");
/// let small_geom = geom.buffer(20., 8).expect("buffer failed");
/// let big_geom = geom.buffer(40., 8).expect("buffer failed");
///
/// assert_eq!(small_geom.within(&small_geom), Ok(true));
/// assert_eq!(small_geom.within(&big_geom), Ok(true));
/// assert_eq!(big_geom.within(&small_geom), Ok(false));
/// ```
fn within<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Checks if the two [`Geometry`] objects are equal.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (3.8 3.8)").expect("Invalid geometry");
/// let geom3 = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
///
/// assert!(geom1.equals(&geom2) == Ok(false));
/// assert!(geom1.equals(&geom3) == Ok(true));
/// ```
///
/// Note that you can also use method through the `PartialEq` trait:
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (3.8 3.8)").expect("Invalid geometry");
/// let geom3 = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
///
/// assert!(geom1 != geom2);
/// assert!(geom1 == geom3);
/// ```
fn equals<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Checks if the two [`Geometry`] objects are exactly equal.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (3.8 3.8)").expect("Invalid geometry");
/// let geom3 = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
///
/// assert_eq!(geom1.equals_exact(&geom2, 0.1), Ok(false));
/// assert_eq!(geom1.equals_exact(&geom3, 0.1), Ok(true));
/// ```
fn equals_exact<'b, G: Geom<'b>>(&self, other: &G, precision: f64) -> GResult<bool>;
/// Returns `true` if no point of `other` is outside of `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT (1 2)").expect("Invalid geometry");
/// let little_geom = geom.buffer(10., 8).expect("buffer failed");
/// let big_geom = geom.buffer(20., 8).expect("buffer failed");
///
/// assert_eq!(little_geom.covers(&big_geom), Ok(false));
/// assert_eq!(big_geom.covers(&little_geom), Ok(true));
/// ```
fn covers<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns `true` if no point of `self` is outside of `other`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT (1 2)").expect("Invalid geometry");
/// let little_geom = geom.buffer(10., 8).expect("buffer failed");
/// let big_geom = geom.buffer(20., 8).expect("buffer failed");
///
/// assert_eq!(little_geom.covered_by(&big_geom), Ok(true));
/// assert_eq!(big_geom.covered_by(&little_geom), Ok(false));
/// ```
fn covered_by<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns `true` if no points of the `other` geometry is outside the exterior of `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0))").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
///
/// assert_eq!(geom1.contains(&geom2), Ok(true));
/// ```
fn contains<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool>;
/// Returns a geometry which represents all points whose distance from `self` is less than or
/// equal to distance.
///
/// You can find nice examples about this in [postgis](https://postgis.net/docs/ST_Buffer.html)
/// documentation.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT(1 3)").expect("Invalid geometry");
/// let buffer_geom = geom.buffer(50., 2).expect("buffer failed");
///
/// assert_eq!(buffer_geom.to_wkt_precision(1).unwrap(),
/// "POLYGON ((51.0 3.0, 36.4 -32.4, 1.0 -47.0, -34.4 -32.4, -49.0 3.0, -34.4 38.4, \
/// 1.0 53.0, 36.4 38.4, 51.0 3.0))");
/// ```
fn buffer(&self, width: f64, quadsegs: i32) -> GResult<Geometry<'a>>;
/// Returns `true` if the given geometry is empty.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::create_empty_polygon().expect("Invalid geometry");
/// assert_eq!(geom.is_empty(), Ok(true));
///
/// let geom = Geometry::new_from_wkt("POLYGON EMPTY").expect("Invalid geometry");
/// assert_eq!(geom.is_empty(), Ok(true));
///
/// let geom = Geometry::new_from_wkt("POINT(1 3)").expect("Invalid geometry");
/// assert_eq!(geom.is_empty(), Ok(false));
/// ```
fn is_empty(&self) -> GResult<bool>;
/// Returns true if the given geometry has no anomalous geometric points, such as self
/// intersection or self tangency.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT (2.5 2.5)")
/// .expect("Invalid geometry");
/// assert_eq!(geom.is_simple(), Ok(true));
///
/// let geom = Geometry::new_from_wkt("LINESTRING(1 1,2 2,2 3.5,1 3,1 2,2 1)")
/// .expect("Invalid geometry");
/// assert_eq!(geom.is_simple(), Ok(false));
/// ```
fn is_simple(&self) -> GResult<bool>;
/// Returns a geometry which represents part of `self` that doesn't intersect with `other`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("LINESTRING(50 100, 50 200)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING(50 50, 50 150)").expect("Invalid geometry");
///
/// let difference_geom = geom1.difference(&geom2).expect("envelope failed");
///
/// assert_eq!(difference_geom.to_wkt_precision(1).unwrap(),
/// "LINESTRING (50.0 150.0, 50.0 200.0)");
/// ```
fn difference<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>>;
/// Returns the minimum bouding box of the given geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT(1 3)").expect("Invalid geometry");
/// let envelope_geom = geom.envelope().expect("envelope failed");
///
/// assert_eq!(envelope_geom.to_wkt_precision(1).unwrap(), "POINT (1.0 3.0)");
///
/// let geom = Geometry::new_from_wkt("LINESTRING(0 0, 1 3)").expect("Invalid geometry");
/// let envelope_geom = geom.envelope().expect("envelope failed");
///
/// assert_eq!(envelope_geom.to_wkt_precision(1).unwrap(),
/// "POLYGON ((0.0 0.0, 1.0 0.0, 1.0 3.0, 0.0 3.0, 0.0 0.0))");
/// ```
fn envelope(&self) -> GResult<Geometry<'a>>;
/// Returns a geometry which represents the parts of `self` and `other` that don't intersect.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("LINESTRING(50 100, 50 200)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING(50 50, 50 150)").expect("Invalid geometry");
///
/// let sym_diff_geom = geom1.sym_difference(&geom2).expect("sym_difference failed");
///
/// assert_eq!(sym_diff_geom.to_wkt_precision(1).unwrap(),
/// "MULTILINESTRING ((50.0 150.0, 50.0 200.0), (50.0 50.0, 50.0 100.0))");
/// ```
fn sym_difference<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>>;
/// Aggregates the given geometry with another one.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT(1 2)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT(3 4)").expect("Invalid geometry");
///
/// let union_geom = geom1.union(&geom2).expect("union failed");
///
/// assert_eq!(union_geom.to_wkt_precision(1).unwrap(), "MULTIPOINT (1.0 2.0, 3.0 4.0)");
/// ```
fn union<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>>;
/// Returns the geometric center or (equivalently) the center of mass of the given geometry as
/// a point.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("MULTIPOINT(-1 0, -1 2, -1 3, -1 4, -1 7, 0 1, 0 3, 1 1)")
/// .expect("Invalid geometry");
/// let centroid = geom.get_centroid().expect("failed to get centroid");
///
/// assert_eq!(centroid.to_wkt_precision(1).unwrap(), "POINT (-0.5 2.6)");
/// ```
fn get_centroid(&self) -> GResult<Geometry<'a>>;
/// Documentation from [postgis](https://postgis.net/docs/ST_UnaryUnion.html):
///
/// > Unlike ST_Union, ST_UnaryUnion does dissolve boundaries between components of a
/// > multipolygon (invalid) and does perform union between the components of a
/// > geometrycollection. Each components of the input geometry is assumed to be valid, so you
/// > won't get a valid multipolygon out of a bow-tie polygon (invalid).
/// >
/// > You may use this function to node a set of linestrings. You may mix ST_UnaryUnion with
/// > ST_Collect to fine-tune how many geometries at once you want to dissolve to be nice on
/// > both memory size and CPU time, finding the balance between ST_Union and ST_MemUnion.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0))")
/// .expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POLYGON((1 1, 2 1, 2 5, 1 5, 1 1))")
/// .expect("Invalid geometry");
///
/// let geom = Geometry::create_multipolygon(vec![geom1, geom2])
/// .expect("Failed to build multipolygon");
///
/// let union_geom = geom.unary_union().expect("unary_union failed");
///
/// assert_eq!(union_geom.to_wkt_precision(1).unwrap(),
/// "POLYGON ((0.0 0.0, 0.0 6.0, 10.0 6.0, 10.0 0.0, 0.0 0.0))");
/// ```
fn unary_union(&self) -> GResult<Geometry<'a>>;
/// Create a voronoi diagram.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let input = Geometry::new_from_wkt("MULTIPOINT((100 200), (105 202), (110 200), (140 230),
/// (210 240), (220 190), (170 170), (170 260),
/// (213 245), (220 190))")
/// .expect("Invalid geometry");
/// let mut expected = Geometry::new_from_wkt(
/// "GEOMETRYCOLLECTION(POLYGON ((-20 50, -20 380, -3.75 380, 105 235, 105 115, 77.14285714285714 50, -20 50)),\
/// POLYGON ((247 50, 77.14285714285714 50, 105 115, 145 195, 178.33333333333334 211.66666666666666, 183.51851851851853 208.7037037037037, 247 50)),\
/// POLYGON ((-3.75 380, 20.000000000000007 380, 176.66666666666666 223.33333333333334, 178.33333333333334 211.66666666666666, 145 195, 105 235, -3.75 380)),\
/// POLYGON ((105 115, 105 235, 145 195, 105 115)),\
/// POLYGON ((20.000000000000007 380, 255 380, 176.66666666666666 223.33333333333334, 20.000000000000007 380)),\
/// POLYGON ((255 380, 340 380, 340 240, 183.51851851851853 208.7037037037037, 178.33333333333334 211.66666666666666, 176.66666666666666 223.33333333333334, 255 380)),\
/// POLYGON ((340 240, 340 50, 247 50, 183.51851851851853 208.7037037037037, 340 240)))")
/// .expect("Invalid geometry");
///
/// let mut voronoi = input.voronoi(None::<&Geometry>, 6., false)
/// .expect("voronoi failed");
///
/// expected.normalize().expect("normalize failed");
/// voronoi.normalize().expect("normalize failed");
///
/// assert_eq!(expected.equals(&voronoi), Ok(true));
/// ```
fn voronoi<'b, G: Geom<'b>>(
&self,
envelope: Option<&G>,
tolerance: f64,
only_edges: bool,
) -> GResult<Geometry<'a>>;
/// Returns a geometry representing the intersection between `self` and `other`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let mut geom1 = Geometry::new_from_wkt("POINT(0 0)").expect("Invalid geometry");
/// let mut geom2 = Geometry::new_from_wkt("LINESTRING(2 0, 0 2)").expect("Invalid geometry");
///
/// let intersection_geom = geom1.intersection(&geom2).expect("intersection failed");
///
/// // No intersection.
/// assert_eq!(intersection_geom.is_empty(), Ok(true));
///
/// // We slighty change the linestring so we have an intersection:
/// let mut geom2 = Geometry::new_from_wkt("LINESTRING(0 0, 0 2)").expect("Invalid geometry");
///
/// let intersection_geom = geom1.intersection(&geom2).expect("intersection failed");
///
/// // Intersection!
/// assert_eq!(intersection_geom.to_wkt_precision(1).unwrap(), "POINT (0.0 0.0)");
/// ```
fn intersection<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>>;
/// Documentation from [postgis](https://postgis.net/docs/ST_ConvexHull.html):
///
/// > The convex hull of a geometry represents the minimum convex geometry that encloses all
/// > geometries within the set.
/// >
/// > One can think of the convex hull as the geometry you get by wrapping an elastic band
/// > around a set of geometries. This is different from a concave hull which is analogous to
/// > shrink-wrapping your geometries.
/// >
/// > It is usually used with MULTI and Geometry Collections. Although it is not an aggregate -
/// > you can use it in conjunction with ST_Collect to get the convex hull of a set of points.
/// > ST_ConvexHull(ST_Collect(somepointfield)).
/// >
/// > It is often used to determine an affected area based on a set of point observations.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let mut geom1 = Geometry::new_from_wkt("MULTILINESTRING((100 190,10 8),
/// (150 10, 20 30))")
/// .expect("Invalid geometry");
/// let mut geom2 = Geometry::new_from_wkt("MULTIPOINT(50 5, 150 30, 50 10, 10 10)")
/// .expect("Invalid geometry");
///
/// let geom = geom1.union(&geom2).expect("union failed");
/// let convex_hull_geom = geom.convex_hull().expect("convex_hull failed");
///
/// assert_eq!(convex_hull_geom.to_wkt_precision(1).unwrap(),
/// "POLYGON ((50.0 5.0, 10.0 8.0, 10.0 10.0, 100.0 190.0, 150.0 30.0, 150.0 10.0, 50.0 5.0))");
/// ```
fn convex_hull(&self) -> GResult<Geometry<'a>>;
/// Returns the closure of the combinatorial boundary of `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING(1 1,0 0, -1 1)").expect("Invalid geometry");
/// let boundary_geom = geom.boundary().expect("boundary failed");
///
/// assert_eq!(boundary_geom.to_wkt_precision(1).unwrap(), "MULTIPOINT (1.0 1.0, -1.0 1.0)");
/// ```
fn boundary(&self) -> GResult<Geometry<'a>>;
/// Returns `true` if `self` has a Z coordinate.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT(1 2 3)").expect("Invalid geometry");
/// assert_eq!(geom.has_z(), Ok(true));
///
/// let geom = Geometry::new_from_wkt("POINT(1 2)").expect("Invalid geometry");
/// assert_eq!(geom.has_z(), Ok(false));
/// ```
fn has_z(&self) -> GResult<bool>;
/// Returns `true` if start and end point are coincident.
///
/// Only works on `LineString` and `MultiLineString`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING(0 0, 1 1)").expect("Invalid geometry");
/// assert_eq!(geom.is_closed(), Ok(false));
///
/// let geom = Geometry::new_from_wkt("LINESTRING(0 0, 0 1, 1 1, 0 0)").expect("Invalid geometry");
/// assert_eq!(geom.is_closed(), Ok(true));
///
/// let geom = Geometry::new_from_wkt("MULTILINESTRING((0 0, 0 1, 1 1, 0 0),(0 0, 1 1))")
/// .expect("Invalid geometry");
/// assert_eq!(geom.is_closed(), Ok(false));
/// ```
fn is_closed(&self) -> GResult<bool>;
/// Returns the length of `self`. The unit depends of the SRID.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING(743238 2967416,743238 2967450)")
/// .expect("Invalid geometry");
///
/// assert_eq!(
/// geom.length().map(|x| format!("{:.2}", x)).unwrap(),
/// "34.00",
/// );
/// ```
fn length(&self) -> GResult<f64>;
/// Returns the distance between `self` and `other`. The unit depends of the SRID.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (1 2)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (2 2)").expect("Invalid geometry");
///
/// assert_eq!(geom1.distance(&geom2).map(|x| format!("{:.2}", x)).unwrap(), "1.00");
/// ```
fn distance<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64>;
/// Returns the indexed distance between `self` and `other`. The unit depends of the SRID.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (1 2)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (2 2)").expect("Invalid geometry");
///
/// assert_eq!(geom1.distance_indexed(&geom2).map(|x| format!("{:.2}", x)).unwrap(), "1.00");
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn distance_indexed<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64>;
/// Returns the hausdorff distance between `self` and `other`. The unit depends of the SRID.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (1 2)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (2 2)").expect("Invalid geometry");
///
/// assert_eq!(geom1.hausdorff_distance(&geom2).map(|x| format!("{:.2}", x)).unwrap(), "1.00");
/// ```
fn hausdorff_distance<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64>;
/// Returns the hausdorff distance between `self` and `other`. The unit depends of the SRID.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (1 2)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (2 2)").expect("Invalid geometry");
///
/// assert_eq!(geom1.hausdorff_distance_densify(&geom2, 1.).map(|x| format!("{:.2}", x))
/// .unwrap(), "1.00");
/// ```
fn hausdorff_distance_densify<'b, G: Geom<'b>>(
&self,
other: &G,
distance_frac: f64,
) -> GResult<f64>;
/// Returns the frechet distance between `self` and `other`. The unit depends of the SRID.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("LINESTRING (0 0, 100 0)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING (0 0, 50 50, 100 0)").expect("Invalid geometry");
///
/// assert_eq!(geom1.frechet_distance(&geom2).map(|x| format!("{:.2}", x)).unwrap(), "70.71");
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn frechet_distance<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64>;
/// Returns the frechet distance between `self` and `other`. The unit depends of the SRID.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("LINESTRING (0 0, 100 0)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING (0 0, 50 50, 100 0)").expect("Invalid geometry");
///
/// assert_eq!(geom1.frechet_distance_densify(&geom2, 1.).map(|x| format!("{:.2}", x))
/// .unwrap(), "70.71");
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn frechet_distance_densify<'b, G: Geom<'b>>(
&self,
other: &G,
distance_frac: f64,
) -> GResult<f64>;
/// Returns the length of the given geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING (1 2, 3 4, 5 6)")
/// .expect("Invalid geometry");
///
/// assert_eq!(geom.get_length().map(|x| format!("{:.2}", x)).unwrap(), "5.66");
/// ```
fn get_length(&self) -> GResult<f64>;
/// Documentation from [postgis](https://postgis.net/docs/ST_Snap.html):
///
/// > Snaps the vertices and segments of a geometry another Geometry's vertices. A snap
/// > distance tolerance is used to control where snapping is performed. The result geometry is
/// > the input geometry with the vertices snapped. If no snapping occurs then the input
/// > geometry is returned unchanged.
/// >
/// > Snapping one geometry to another can improve robustness for overlay operations by
/// > eliminating nearly-coincident edges (which cause problems during noding and intersection
/// > calculation).
/// >
/// > Too much snapping can result in invalid topology being created, so the number and location
/// > of snapped vertices is decided using heuristics to determine when it is safe to snap. This
/// > can result in some potential snaps being omitted, however.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("MULTIPOLYGON(((26 125, 26 200, 126 200, 126 125, 26 125),
/// (51 150, 101 150, 76 175, 51 150)),
/// ((151 100, 151 200, 176 175, 151 100)))")
/// .expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING(5 107, 54 84, 101 100)")
/// .expect("Invalid geometry");
///
/// let distance = geom1.distance(&geom2).expect("distance failed");
/// let snap_geom = geom1.snap(&geom2, distance * 1.25).expect("snap failed");
///
/// assert_eq!(snap_geom.to_wkt_precision(1).unwrap(),
/// "MULTIPOLYGON (((5.0 107.0, 26.0 200.0, 126.0 200.0, 126.0 125.0, 101.0 100.0, 54.0 84.0, 5.0 107.0), \
/// (51.0 150.0, 101.0 150.0, 76.0 175.0, 51.0 150.0)), \
/// ((151.0 100.0, 151.0 200.0, 176.0 175.0, 151.0 100.0)))");
/// ```
fn snap<'b, G: Geom<'b>>(&self, other: &G, tolerance: f64) -> GResult<Geometry<'a>>;
/// Returns unique points of `self`.
fn extract_unique_points(&self) -> GResult<Geometry<'a>>;
fn nearest_points<'b, G: Geom<'b>>(&self, other: &G) -> GResult<CoordSeq<'a>>;
/// Returns the X position. The given `Geometry` must be a `Point`, otherwise it'll fail.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (1.5 2.5 3.5)").expect("Invalid geometry");
/// assert!(point_geom.get_x() == Ok(1.5));
/// ```
fn get_x(&self) -> GResult<f64>;
/// Returns the Y position. The given `Geometry` must be a `Point`, otherwise it'll fail.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (1.5 2.5 3.5)").expect("Invalid geometry");
/// assert!(point_geom.get_y() == Ok(2.5));
/// ```
fn get_y(&self) -> GResult<f64>;
/// Returns the Z position. The given `Geometry` must be a `Point`, otherwise it'll fail.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5 4.0)").expect("Invalid geometry");
/// assert!(point_geom.get_z() == Ok(4.0));
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_z(&self) -> GResult<f64>;
/// Returns the nth point of the given geometry.
///
/// The given `Geometry` must be a `LineString`, otherwise it'll fail.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING (1 2, 3 4, 5 6)")
/// .expect("Invalid geometry");
/// let nth_point = geom.get_point_n(1).expect("get_point_n failed");
///
/// assert_eq!(nth_point.to_wkt_precision(1).unwrap(), "POINT (3.0 4.0)");
/// ```
fn get_point_n(&self, n: usize) -> GResult<Geometry<'a>>;
/// Returns the start point of `self`.
///
/// The given `Geometry` must be a `LineString`, otherwise it'll fail.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING (1 2, 3 4)")
/// .expect("Invalid geometry");
/// let start_point = geom.get_start_point().expect("get_start_point failed");
///
/// assert_eq!(start_point.to_wkt_precision(1).unwrap(), "POINT (1.0 2.0)");
/// ```
fn get_start_point(&self) -> GResult<Geometry<'a>>;
/// Returns the end point of `self`.
///
/// The given `Geometry` must be a `LineString`, otherwise it'll fail.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING (1 2, 3 4)")
/// .expect("Invalid geometry");
/// let end_point = geom.get_end_point().expect("get_end_point failed");
///
/// assert_eq!(end_point.to_wkt_precision(1).unwrap(), "POINT (3.0 4.0)");
/// ```
fn get_end_point(&self) -> GResult<Geometry<'a>>;
/// Returns the number of points of `self`.
///
/// The given `Geometry` must be a `LineString`, otherwise it'll fail.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING (1 2, 3 4)")
/// .expect("Invalid geometry");
///
/// assert_eq!(geom.get_num_points(), Ok(2));
/// ```
fn get_num_points(&self) -> GResult<usize>;
/// Returns the number of interior rings.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0),\
/// (1 1, 2 1, 2 5, 1 5, 1 1),\
/// (8 5, 8 4, 9 4, 9 5, 8 5))")
/// .expect("Invalid geometry");
///
/// assert_eq!(geom.get_num_interior_rings(), Ok(2));
/// ```
fn get_num_interior_rings(&self) -> GResult<usize>;
/// Returns the number of coordinates inside `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0))")
/// .expect("Invalid geometry");
///
/// assert_eq!(geom.get_num_coordinates(), Ok(5));
/// ```
fn get_num_coordinates(&self) -> GResult<usize>;
/// Returns the number of dimensions used in `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0))")
/// .expect("Invalid geometry");
///
/// assert_eq!(geom.get_num_dimensions(), Ok(2));
/// ```
fn get_num_dimensions(&self) -> GResult<usize>;
/// Return in which coordinate dimension the geometry is.
///
/// # Example
///
/// ```
/// use geos::{Dimensions, Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5 4.0)").expect("Invalid geometry");
/// assert!(point_geom.get_coordinate_dimension() == Ok(Dimensions::ThreeD));
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 4.0)").expect("Invalid geometry");
/// assert!(point_geom.get_coordinate_dimension() == Ok(Dimensions::TwoD));
/// ```
fn get_coordinate_dimension(&self) -> GResult<Dimensions>;
/// This functions attempts to return a valid representation of `self`.
///
/// Available using the `v3_8_0` feature.
#[cfg(any(feature = "v3_8_0", feature = "dox"))]
fn make_valid(&self) -> GResult<Geometry<'a>>;
/// Returns the number of geometries.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING(77.29 29.07,77.42 29.26,77.27 29.31,77.29 29.07)")
/// .expect("Invalid geometry");
/// assert_eq!(geom.get_num_geometries(), Ok(1));
///
/// let geom = Geometry::new_from_wkt("GEOMETRYCOLLECTION(MULTIPOINT(-2 3 , -2 2),\
/// LINESTRING(5 5 ,10 10),\
/// POLYGON((-7 4.2,-7.1 5,-7.1 4.3,-7 4.2)))")
/// .expect("Invalid geometry");
/// assert_eq!(geom.get_num_geometries(), Ok(3));
/// ```
fn get_num_geometries(&self) -> GResult<usize>;
/// Get SRID of `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let mut point_geom = Geometry::new_from_wkt("POINT (2.5 2.5 4.0)")
/// .expect("Invalid geometry");
/// point_geom.set_srid(4326);
/// assert_eq!(point_geom.get_srid(), Ok(4326));
/// ```
fn get_srid(&self) -> GResult<usize>;
/// Returns the precision of `self`.
///
/// Available using the `v3_6_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5 4.0)").expect("Invalid geometry");
/// assert_eq!(point_geom.get_precision().map(|x| format!("{:.2}", x)).unwrap(), "0.00");
/// ```
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn get_precision(&self) -> GResult<f64>;
/// Returns the precision of `self`.
///
/// Available using the `v3_6_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry, Precision};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5 4.0)").expect("Invalid geometry");
///
/// point_geom.set_precision(1., Precision::KeepCollapsed);
/// assert_eq!(point_geom.get_precision().map(|x| format!("{:.2}", x)).unwrap(), "0.00");
/// ```
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn set_precision(&self, grid_size: f64, flags: Precision) -> GResult<Geometry<'a>>;
/// Returns the biggest X of the geometry.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let line = Geometry::new_from_wkt("LINESTRING(1 3 4, 5 6 7)").expect("Invalid WKT");
/// assert_eq!(line.get_x_max(), Ok(5.));
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_x_max(&self) -> GResult<f64>;
/// Returns the smallest X of the geometry.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let line = Geometry::new_from_wkt("LINESTRING(1 3 4, 5 6 7)").expect("Invalid WKT");
/// assert_eq!(line.get_x_min(), Ok(1.));
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_x_min(&self) -> GResult<f64>;
/// Returns the biggest Y of the geometry.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let line = Geometry::new_from_wkt("LINESTRING(1 3 4, 5 6 7)").expect("Invalid WKT");
/// assert_eq!(line.get_y_max(), Ok(6.));
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_y_max(&self) -> GResult<f64>;
/// Returns the smallest Y of the geometry.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let line = Geometry::new_from_wkt("LINESTRING(1 3 4, 5 6 7)").expect("Invalid WKT");
/// assert_eq!(line.get_y_min(), Ok(3.));
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_y_min(&self) -> GResult<f64>;
/// Returns the smallest distance by which a vertex of `self` could be moved to produce an
/// invalid geometry.
///
/// Available using the `v3_6_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINESTRING(1 3 4, 5 6 7)").expect("Invalid WKT");
/// assert_eq!(geom.minimum_clearance().map(|x| format!("{:.8}", x)).unwrap(), "5.00000000");
/// ```
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_clearance(&self) -> GResult<f64>;
/// Returns the two-point LineString spanning of `self`'s minimum clearance.
///
/// Available using the `v3_6_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON ((0 0, 1 0, 1 1, 0.5 3.2e-4, 0 0))")
/// .expect("Invalid WKT");
/// let line = geom.minimum_clearance_line().expect("minimum_clearance_line failed");
/// assert_eq!(line.to_wkt_precision(1).unwrap(), "LINESTRING (0.5 0.0, 0.5 0.0)");
/// ```
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_clearance_line(&self) -> GResult<Geometry<'a>>;
/// Returns the minimum rotated rectangle inside of `self`.
///
/// Available using the `v3_6_0` feature.
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_rotated_rectangle(&self) -> GResult<Geometry<'a>>;
/// Returns the minimum width inside of `self`.
///
/// Available using the `v3_6_0` feature.
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_width(&self) -> GResult<Geometry<'a>>;
/// Returns a [delaunay triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation)
/// around the vertices of `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POLYGON((175 150, 20 40, 50 60, 125 100, 175 150))")
/// .expect("Invalid WKT");
/// let geom2 = Geometry::new_from_wkt("POINT(110 170)").expect("Invalid WKT");
/// let geom2 = geom2.buffer(20., 8).expect("buffer failed");
///
/// let geom = geom1.union(&geom2).expect("union failed");
///
/// let final_geom = geom.delaunay_triangulation(0.001, false).expect("delaunay_triangulation failed");
/// ```
fn delaunay_triangulation(&self, tolerance: f64, only_edges: bool) -> GResult<Geometry<'a>>;
fn interpolate(&self, d: f64) -> GResult<Geometry<'a>>;
fn interpolate_normalized(&self, d: f64) -> GResult<Geometry<'a>>;
fn project<'b, G: Geom<'b>>(&self, p: &G) -> GResult<f64>;
fn project_normalized<'b, G: Geom<'b>>(&self, p: &G) -> GResult<f64>;
fn node(&self) -> GResult<Geometry<'a>>;
/// Return an offset line at a given distance and side from an input line. All points of the
/// returned geometries are not further than the given distance from the input geometry.
///
/// ### Parameters description:
///
/// #### width
///
/// * If `width` is positive, the offset will be at the left side of the input line and retain
/// the same direction.
/// * If `width` is negative, it'll be at the right side and in the opposite direction.
///
/// #### quadrant_segments
///
/// * If `quadrant_segments` is >= 1, joins are round, and `quadrant_segments` indicates the
/// number of segments to use to approximate a quarter-circle.
/// * If `quadrant_segments` == 0, joins are bevelled (flat).
/// * If `quadrant_segments` < 0, joins are mitred, and the value of `quadrant_segments`
/// indicates the mitre ration limit as `mitre_limit = |quadrant_segments|`
///
/// #### mitre_limit
///
/// The mitre ratio is the ratio of the distance from the corner to the end of the mitred offset
/// corner. When two line segments meet at a sharp angle, a miter join will extend far beyond
/// the original geometry (and in the extreme case will be infinitely far). To prevent
/// unreasonable geometry, the mitre limit allows controlling the maximum length of the join
/// corner. Corners with a ratio which exceed the limit will be beveled.
fn offset_curve(
&self,
width: f64,
quadrant_segments: i32,
join_style: JoinStyle,
mitre_limit: f64,
) -> GResult<Geometry<'a>>;
fn point_on_surface(&self) -> GResult<Geometry<'a>>;
/// Returns, in the tuple elements order:
///
/// 1. The polygonized geometry.
/// 2. The cuts geometries collection.
/// 3. The dangles geometries collection.
/// 4. The invalid geometries collection.
#[allow(clippy::type_complexity)]
fn polygonize_full(
&self,
) -> GResult<(
Geometry<'a>,
Option<Geometry<'a>>,
Option<Geometry<'a>>,
Option<Geometry<'a>>,
)>;
fn shared_paths<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>>;
/// Converts a [`Geometry`] to the HEX format. For more control over the generated output,
/// use the [`WKBWriter`] type.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)")
/// .expect("Invalid geometry");
/// let hex_buf = point_geom.to_hex().expect("conversion to WKB failed");
/// ```
fn to_hex(&self) -> GResult<CVec<u8>>;
/// Converts a [`Geometry`] to the WKB format. For more control over the generated output,
/// use the [`WKBWriter`] type.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)")
/// .expect("Invalid geometry");
/// let wkb_buf = point_geom.to_wkb().expect("conversion to WKB failed");
/// ```
fn to_wkb(&self) -> GResult<CVec<u8>>;
/// Creates a new [`PreparedGeometry`] from the current `Geometry`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)")
/// .expect("Invalid geometry");
/// let prepared_geom = point_geom.to_prepared_geom().expect("failed to create prepared geom");
/// ```
#[allow(clippy::needless_lifetimes)]
fn to_prepared_geom<'c>(&'c self) -> GResult<PreparedGeometry<'c>>;
/// Also passes the context to the newly created `Geometry`.
fn clone(&self) -> Geometry<'a>;
/// Returns the 1-based nth geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("MULTIPOINT(1 1, 2 2, 3 3, 4 4)")
/// .expect("Invalid geometry");
/// let point_nb3 = geom
/// .get_geometry_n(2)
/// .expect("failed to get third point");
/// assert_eq!(
/// point_nb3.to_wkt().unwrap(),
/// "POINT (3.0000000000000000 3.0000000000000000)",
/// );
/// ```
fn get_geometry_n<'c>(&'c self, n: usize) -> GResult<ConstGeometry<'a, 'c>>;
/// Returns the nth interior ring.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0),\
/// (1 1, 2 1, 2 5, 1 5, 1 1),\
/// (8 5, 8 4, 9 4, 9 5, 8 5))")
/// .expect("Invalid geometry");
/// let interior = geom
/// .get_interior_ring_n(0)
/// .expect("failed to get interior ring");
/// assert_eq!(interior.to_wkt().unwrap(),
/// "LINEARRING (1.0000000000000000 1.0000000000000000, \
/// 2.0000000000000000 1.0000000000000000, \
/// 2.0000000000000000 5.0000000000000000, \
/// 1.0000000000000000 5.0000000000000000, \
/// 1.0000000000000000 1.0000000000000000)");
/// ```
fn get_interior_ring_n<'c>(&'c self, n: u32) -> GResult<ConstGeometry<'a, 'c>>;
/// Returns the exterior ring.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0),\
/// (1 1, 2 1, 2 5, 1 5, 1 1))")
/// .expect("Invalid geometry");
///
/// let exterior = point_geom
/// .get_exterior_ring()
/// .expect("failed to get exterior ring");
/// assert_eq!(exterior.to_wkt().unwrap(),
/// "LINEARRING (0.0000000000000000 0.0000000000000000, \
/// 10.0000000000000000 0.0000000000000000, \
/// 10.0000000000000000 6.0000000000000000, \
/// 0.0000000000000000 6.0000000000000000, \
/// 0.0000000000000000 0.0000000000000000)");
/// ```
fn get_exterior_ring<'c>(&'c self) -> GResult<ConstGeometry<'a, 'c>>;
}
macro_rules! impl_geom {
($ty_name:ident) => (
impl_geom!($ty_name,,);
);
($ty_name:ident, $lt:lifetime) => (
impl_geom!($ty_name, $lt, original);
);
($ty_name:ident, $($lt:lifetime)?, $($field:ident)?) => (
impl<'a$(, $lt)?> Geom<'a> for $ty_name<'a$(, $lt)?> {
fn get_type(&self) -> GResult<String> {
unsafe {
let ptr = GEOSGeomType_r(self.get_raw_context(), self.as_raw());
managed_string(ptr, self.get_context_handle(), "GGeom::get_type")
}
}
fn geometry_type(&self) -> GeometryTypes {
let type_geom = unsafe { GEOSGeomTypeId_r(self.get_raw_context(), self.as_raw()) as i32 };
GeometryTypes::try_from(type_geom).expect("Failed to convert to GeometryTypes")
}
fn is_valid(&self) -> bool {
unsafe { GEOSisValid_r(self.get_raw_context(), self.as_raw()) == 1 }
}
fn is_valid_reason(&self) -> GResult<String> {
unsafe {
let ptr = GEOSisValidReason_r(self.get_raw_context(), self.as_raw());
managed_string(ptr, self.get_context_handle(), "GGeom::is_valid_reason")
}
}
fn get_coord_seq(&self) -> GResult<CoordSeq<'a>> {
let type_geom = self.geometry_type();
match type_geom {
GeometryTypes::Point | GeometryTypes::LineString | GeometryTypes::LinearRing => unsafe {
let coord = GEOSGeom_getCoordSeq_r(self.get_raw_context(), self.as_raw());
let t = GEOSCoordSeq_clone_r(self.get_raw_context(), coord);
let mut size = 0;
let mut dims = 0;
if GEOSCoordSeq_getSize_r(self.get_raw_context(), coord, &mut size) == 0 {
return Err(Error::GenericError("GEOSCoordSeq_getSize_r failed".to_owned()));
}
if GEOSCoordSeq_getDimensions_r(self.get_raw_context(), coord, &mut dims) == 0 {
return Err(Error::GenericError("GEOSCoordSeq_getDimensions_r failed".to_owned()));
}
CoordSeq::new_from_raw(t, self.clone_context(), size, dims, "get_coord_seq")
},
_ => Err(Error::ImpossibleOperation(
"Geometry must be a Point, LineString or LinearRing to extract its coordinates"
.into(),
)),
}
}
fn area(&self) -> GResult<f64> {
let mut n = 0.;
let res = unsafe { GEOSArea_r(self.get_raw_context(), self.as_raw(), &mut n) };
if res != 1 {
Err(Error::GeosError(format!("area failed with code {}", res)))
} else {
Ok(n as f64)
}
}
fn to_wkt(&self) -> GResult<String> {
match WKTWriter::new_with_context(self.clone_context()) {
Ok(mut w) => w.write(self),
Err(e) => Err(e),
}
}
fn to_wkt_precision(&self, precision: u32) -> GResult<String> {
unsafe {
let writer = GEOSWKTWriter_create_r(self.get_raw_context());
GEOSWKTWriter_setRoundingPrecision_r(self.get_raw_context(), writer, precision as _);
let c_result = GEOSWKTWriter_write_r(self.get_raw_context(), writer, self.as_raw());
GEOSWKTWriter_destroy_r(self.get_raw_context(), writer);
managed_string(c_result, self.get_context_handle(), "GResult::to_wkt_precision")
}
}
fn is_ring(&self) -> GResult<bool> {
let rv = unsafe { GEOSisRing_r(self.get_raw_context(), self.as_raw()) };
check_geos_predicate(rv as _, PredicateType::IsRing)
}
fn intersects<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSIntersects_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Intersects)
}
fn crosses<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSCrosses_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Crosses)
}
fn disjoint<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSDisjoint_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Disjoint)
}
fn touches<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSTouches_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Touches)
}
fn overlaps<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSOverlaps_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Overlaps)
}
fn within<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSWithin_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Within)
}
fn equals<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSEquals_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Equals)
}
fn equals_exact<'b, G: Geom<'b>>(&self, other: &G, precision: f64) -> GResult<bool> {
let ret_val = unsafe {
GEOSEqualsExact_r(self.get_raw_context(), self.as_raw(), other.as_raw(), precision)
};
check_geos_predicate(ret_val as _, PredicateType::EqualsExact)
}
fn covers<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSCovers_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Covers)
}
fn covered_by<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSCoveredBy_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::CoveredBy)
}
fn contains<'b, G: Geom<'b>>(&self, other: &G) -> GResult<bool> {
let ret_val = unsafe {
GEOSContains_r(self.get_raw_context(), self.as_raw(), other.as_raw())
};
check_geos_predicate(ret_val as _, PredicateType::Contains)
}
fn buffer(&self, width: f64, quadsegs: i32) -> GResult<Geometry<'a>> {
assert!(quadsegs > 0);
unsafe {
let ptr = GEOSBuffer_r(
self.get_raw_context(),
self.as_raw(),
width,
quadsegs as _,
);
Geometry::new_from_raw(ptr, self.clone_context(), "buffer")
}
}
fn is_empty(&self) -> GResult<bool> {
let ret_val = unsafe { GEOSisEmpty_r(self.get_raw_context(), self.as_raw()) };
check_geos_predicate(ret_val as _, PredicateType::IsEmpty)
}
fn is_simple(&self) -> GResult<bool> {
let ret_val = unsafe { GEOSisSimple_r(self.get_raw_context(), self.as_raw()) };
check_geos_predicate(ret_val as _, PredicateType::IsSimple)
}
fn difference<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSDifference_r(self.get_raw_context(), self.as_raw(), other.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "difference")
}
}
fn envelope(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSEnvelope_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "envelope")
}
}
fn sym_difference<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSSymDifference_r(self.get_raw_context(), self.as_raw(), other.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "sym_difference")
}
}
fn union<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSUnion_r(self.get_raw_context(), self.as_raw(), other.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "union")
}
}
fn get_centroid(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSGetCentroid_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "get_centroid")
}
}
fn unary_union(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSUnaryUnion_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "unary_union")
}
}
fn voronoi<'b, G: Geom<'b>>(
&self,
envelope: Option<&G>,
tolerance: f64,
only_edges: bool,
) -> GResult<Geometry<'a>> {
unsafe {
let raw_voronoi = GEOSVoronoiDiagram_r(
self.get_raw_context(),
self.as_raw(),
envelope
.map(|e| e.as_raw())
.unwrap_or(std::ptr::null_mut()),
tolerance,
only_edges as _,
);
Geometry::new_from_raw(raw_voronoi, self.clone_context(), "voronoi")
}
}
fn intersection<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSIntersection_r(self.get_raw_context(), self.as_raw(), other.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "intersection")
}
}
fn convex_hull(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSConvexHull_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "convex_hull")
}
}
fn boundary(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSBoundary_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "boundary")
}
}
fn has_z(&self) -> GResult<bool> {
let ret_val = unsafe { GEOSHasZ_r(self.get_raw_context(), self.as_raw()) };
check_geos_predicate(ret_val as _, PredicateType::IsSimple)
}
fn is_closed(&self) -> GResult<bool> {
if self.geometry_type() != GeometryTypes::LineString &&
self.geometry_type() != GeometryTypes::MultiLineString {
return Err(Error::GenericError("Geometry must be a LineString or a MultiLineString".to_owned()));
}
let ret_val = unsafe { GEOSisClosed_r(self.get_raw_context(), self.as_raw()) };
check_geos_predicate(ret_val as _, PredicateType::IsSimple)
}
fn length(&self) -> GResult<f64> {
let mut length = 0.;
unsafe {
let ret = GEOSLength_r(self.get_raw_context(), self.as_raw(), &mut length);
check_ret(ret, PredicateType::IsSimple).map(|_| length)
}
}
fn distance<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64> {
let mut distance = 0.;
unsafe {
let ret = GEOSDistance_r(
self.get_raw_context(),
self.as_raw(),
other.as_raw(),
&mut distance);
check_ret(ret, PredicateType::IsSimple).map(|_| distance)
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn distance_indexed<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64> {
unsafe {
let mut distance = 0.;
if GEOSDistanceIndexed_r(self.get_raw_context(),
self.as_raw(),
other.as_raw(),
&mut distance) != 1 {
Err(Error::GenericError("GEOSDistanceIndexed_r failed".to_owned()))
} else {
Ok(distance)
}
}
}
fn hausdorff_distance<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64> {
let mut distance = 0.;
unsafe {
let ret = GEOSHausdorffDistance_r(
self.get_raw_context(),
self.as_raw(),
other.as_raw(),
&mut distance);
check_ret(ret, PredicateType::IsSimple).map(|_| distance)
}
}
fn hausdorff_distance_densify<'b, G: Geom<'b>>(&self, other: &G, distance_frac: f64) -> GResult<f64> {
let mut distance = 0.;
unsafe {
let ret = GEOSHausdorffDistanceDensify_r(
self.get_raw_context(),
self.as_raw(),
other.as_raw(),
distance_frac,
&mut distance);
check_ret(ret, PredicateType::IsSimple).map(|_| distance)
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn frechet_distance<'b, G: Geom<'b>>(&self, other: &G) -> GResult<f64> {
let mut distance = 0.;
unsafe {
let ret = GEOSFrechetDistance_r(
self.get_raw_context(),
self.as_raw(),
other.as_raw(),
&mut distance);
check_ret(ret, PredicateType::IsSimple).map(|_| distance)
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn frechet_distance_densify<'b, G: Geom<'b>>(&self, other: &G, distance_frac: f64) -> GResult<f64> {
let mut distance = 0.;
unsafe {
let ret = GEOSFrechetDistanceDensify_r(
self.get_raw_context(),
self.as_raw(),
other.as_raw(),
distance_frac,
&mut distance);
check_ret(ret, PredicateType::IsSimple).map(|_| distance)
}
}
fn get_length(&self) -> GResult<f64> {
let mut length = 0.;
unsafe {
let ret = GEOSGeomGetLength_r(self.get_raw_context(), self.as_raw(), &mut length);
check_ret(ret, PredicateType::IsSimple).map(|_| length)
}
}
fn snap<'b, G: Geom<'b>>(&self, other: &G, tolerance: f64) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSSnap_r(self.get_raw_context(), self.as_raw(), other.as_raw(), tolerance);
Geometry::new_from_raw(ptr, self.clone_context(), "snap")
}
}
fn extract_unique_points(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSGeom_extractUniquePoints_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "extract_unique_points")
}
}
fn nearest_points<'b, G: Geom<'b>>(&self, other: &G) -> GResult<CoordSeq<'a>> {
unsafe {
let ptr = GEOSNearestPoints_r(
self.get_raw_context(),
self.as_raw(),
other.as_raw(),
);
let mut size = 0;
let mut dims = 0;
if GEOSCoordSeq_getSize_r(self.get_raw_context(), ptr, &mut size) == 0 {
return Err(Error::GenericError("GEOSCoordSeq_getSize_r failed".to_owned()));
}
if GEOSCoordSeq_getDimensions_r(self.get_raw_context(), ptr, &mut dims) == 0 {
return Err(Error::GenericError("GEOSCoordSeq_getDimensions_r failed".to_owned()));
}
CoordSeq::new_from_raw(ptr, self.clone_context(), size, dims, "nearest_points")
}
}
fn get_x(&self) -> GResult<f64> {
if self.geometry_type() != GeometryTypes::Point {
return Err(Error::GenericError("Geometry must be a point".to_owned()));
}
let mut x = 0.;
unsafe {
if GEOSGeomGetX_r(self.get_raw_context(), self.as_raw(), &mut x) == 1 {
Ok(x)
} else {
Err(Error::GenericError("GEOSGeomGetX_r failed".to_owned()))
}
}
}
fn get_y(&self) -> GResult<f64> {
if self.geometry_type() != GeometryTypes::Point {
return Err(Error::GenericError("Geometry must be a point".to_owned()));
}
let mut y = 0.;
unsafe {
if GEOSGeomGetY_r(self.get_raw_context(), self.as_raw(), &mut y) == 1 {
Ok(y)
} else {
Err(Error::GenericError("GEOSGeomGetY_r failed".to_owned()))
}
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_z(&self) -> GResult<f64> {
if self.geometry_type() != GeometryTypes::Point {
return Err(Error::GenericError("Geometry must be a point".to_owned()));
}
let mut z = 0.;
unsafe {
if GEOSGeomGetZ_r(self.get_raw_context(), self.as_raw(), &mut z) == 1 {
Ok(z)
} else {
Err(Error::GenericError("GEOSGeomGetZ_r failed".to_owned()))
}
}
}
fn get_point_n(&self, n: usize) -> GResult<Geometry<'a>> {
if self.geometry_type() != GeometryTypes::LineString {
return Err(Error::GenericError("Geometry must be a LineString".to_owned()));
}
unsafe {
let ptr = GEOSGeomGetPointN_r(self.get_raw_context(), self.as_raw(), n as _);
Geometry::new_from_raw(ptr, self.clone_context(), "get_point_n")
}
}
fn get_start_point(&self) -> GResult<Geometry<'a>> {
if self.geometry_type() != GeometryTypes::LineString {
return Err(Error::GenericError("Geometry must be a LineString".to_owned()));
}
unsafe {
let ptr = GEOSGeomGetStartPoint_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "get_start_point")
}
}
fn get_end_point(&self) -> GResult<Geometry<'a>> {
if self.geometry_type() != GeometryTypes::LineString {
return Err(Error::GenericError("Geometry must be a LineString".to_owned()));
}
unsafe {
let ptr = GEOSGeomGetEndPoint_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "get_end_point")
}
}
fn get_num_points(&self) -> GResult<usize> {
if self.geometry_type() != GeometryTypes::LineString {
return Err(Error::GenericError("Geometry must be a LineString".to_owned()));
}
unsafe {
let ret = GEOSGeomGetNumPoints_r(self.get_raw_context(), self.as_raw());
if ret == -1 {
Err(Error::GenericError("GEOSGeomGetNumPoints_r failed".to_owned()))
} else {
Ok(ret as _)
}
}
}
fn get_num_interior_rings(&self) -> GResult<usize> {
unsafe {
let ret = GEOSGetNumInteriorRings_r(self.get_raw_context(), self.as_raw());
if ret == -1 {
Err(Error::GenericError("GEOSGetNumInteriorRings_r failed".to_owned()))
} else {
Ok(ret as _)
}
}
}
fn get_num_coordinates(&self) -> GResult<usize> {
unsafe {
let ret = GEOSGetNumCoordinates_r(self.get_raw_context(), self.as_raw());
if ret == -1 {
Err(Error::GenericError("GEOSGetNumCoordinates_r failed".to_owned()))
} else {
Ok(ret as _)
}
}
}
fn get_num_dimensions(&self) -> GResult<usize> {
unsafe {
let ret = GEOSGeom_getDimensions_r(self.get_raw_context(), self.as_raw());
if ret == -1 {
Err(Error::GenericError("GEOSGeom_getDimensions_r failed".to_owned()))
} else {
Ok(ret as _)
}
}
}
fn get_coordinate_dimension(&self) -> GResult<Dimensions> {
unsafe {
let ret = GEOSGeom_getCoordinateDimension_r(self.get_raw_context(), self.as_raw());
if ret != 2 && ret != 3 {
Err(Error::GenericError("GEOSGeom_getCoordinateDimension_r failed".to_owned()))
} else {
Ok(Dimensions::try_from(ret).expect("Failed to convert to Dimensions"))
}
}
}
#[cfg(any(feature = "v3_8_0", feature = "dox"))]
fn make_valid(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSMakeValid_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "make_valid")
}
}
fn get_num_geometries(&self) -> GResult<usize> {
unsafe {
let ret = GEOSGetNumGeometries_r(self.get_raw_context(), self.as_raw());
if ret < 1 {
Err(Error::GenericError("GEOSGetNumGeometries_r failed".to_owned()))
} else {
Ok(ret as _)
}
}
}
fn get_srid(&self) -> GResult<usize> {
unsafe {
let ret = GEOSGetSRID_r(self.get_raw_context(), self.as_raw());
if ret < 1 {
Err(Error::GenericError("GEOSGetSRID_r failed".to_owned()))
} else {
Ok(ret as _)
}
}
}
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn get_precision(&self) -> GResult<f64> {
unsafe {
let ret = GEOSGeom_getPrecision_r(self.get_raw_context(), self.as_raw());
if ret == -1. {
Err(Error::GenericError("GEOSGeom_getPrecision_r failed".to_owned()))
} else {
Ok(ret)
}
}
}
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn set_precision(&self, grid_size: f64, flags: Precision) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSGeom_setPrecision_r(self.get_raw_context(),
self.as_raw(),
grid_size,
flags.into());
Geometry::new_from_raw(ptr, self.clone_context(), "set_precision")
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_x_max(&self) -> GResult<f64> {
unsafe {
let mut value = 0.;
if GEOSGeom_getXMax_r(self.get_raw_context(), self.as_raw(), &mut value) == 0 {
Err(Error::GenericError("GEOSGeom_getXMax_r failed".to_owned()))
} else {
Ok(value)
}
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_x_min(&self) -> GResult<f64> {
unsafe {
let mut value = 0.;
if GEOSGeom_getXMin_r(self.get_raw_context(), self.as_raw(), &mut value) == 0 {
Err(Error::GenericError("GEOSGeom_getXMin_r failed".to_owned()))
} else {
Ok(value)
}
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_y_max(&self) -> GResult<f64> {
unsafe {
let mut value = 0.;
if GEOSGeom_getYMax_r(self.get_raw_context(), self.as_raw(), &mut value) == 0 {
Err(Error::GenericError("GEOSGeom_getYMax_r failed".to_owned()))
} else {
Ok(value)
}
}
}
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
fn get_y_min(&self) -> GResult<f64> {
unsafe {
let mut value = 0.;
if GEOSGeom_getYMin_r(self.get_raw_context(), self.as_raw(), &mut value) == 0 {
Err(Error::GenericError("GEOSGeom_getYMin_r failed".to_owned()))
} else {
Ok(value)
}
}
}
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_clearance(&self) -> GResult<f64> {
unsafe {
let mut value = 0.;
if GEOSMinimumClearance_r(self.get_raw_context(), self.as_raw(), &mut value) != 0 {
Err(Error::GenericError("GEOSMinimumClearance_r failed".to_owned()))
} else {
Ok(value)
}
}
}
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_clearance_line(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSMinimumClearanceLine_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "minimum_clearance_line")
}
}
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_rotated_rectangle(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSMinimumRotatedRectangle_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "minimum_rotated_rectangle")
}
}
#[cfg(any(feature = "v3_6_0", feature = "dox"))]
fn minimum_width(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSMinimumWidth_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "minimum_width")
}
}
fn delaunay_triangulation(&self, tolerance: f64, only_edges: bool) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSDelaunayTriangulation_r(
self.get_raw_context(),
self.as_raw(),
tolerance,
only_edges as _,
);
Geometry::new_from_raw(ptr, self.clone_context(), "delaunay_triangulation")
}
}
fn interpolate(&self, d: f64) -> GResult<Geometry<'a>> {
if self.geometry_type() != GeometryTypes::LineString {
return Err(Error::GenericError("Geometry must be a LineString".to_owned()));
}
unsafe {
let ptr = GEOSInterpolate_r(self.get_raw_context(), self.as_raw(), d);
Geometry::new_from_raw(ptr, self.clone_context(), "interpolate")
}
}
fn interpolate_normalized(&self, d: f64) -> GResult<Geometry<'a>> {
if self.geometry_type() != GeometryTypes::LineString {
return Err(Error::GenericError("Geometry must be a LineString".to_owned()));
}
unsafe {
let ptr = GEOSInterpolateNormalized_r(self.get_raw_context(), self.as_raw(), d);
Geometry::new_from_raw(ptr, self.clone_context(), "interpolate_normalized")
}
}
fn project<'b, G: Geom<'b>>(&self, p: &G) -> GResult<f64> {
if p.geometry_type() != GeometryTypes::Point {
return Err(Error::GenericError("Second geometry must be a Point".to_owned()));
}
unsafe {
let ret = GEOSProject_r(self.get_raw_context(), self.as_raw(), p.as_raw());
if (ret - -1.).abs() < 0.001 {
Err(Error::GenericError("GEOSProject_r failed".to_owned()))
} else {
Ok(ret)
}
}
}
fn project_normalized<'b, G: Geom<'b>>(&self, p: &G) -> GResult<f64> {
if p.geometry_type() != GeometryTypes::Point {
return Err(Error::GenericError("Second geometry must be a Point".to_owned()));
}
unsafe {
let ret = GEOSProjectNormalized_r(self.get_raw_context(), self.as_raw(), p.as_raw());
if (ret - -1.).abs() < 0.001 {
Err(Error::GenericError("GEOSProjectNormalized_r failed".to_owned()))
} else {
Ok(ret)
}
}
}
fn node(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSNode_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "node")
}
}
fn offset_curve(
&self,
width: f64,
quadrant_segments: i32,
join_style: JoinStyle,
mitre_limit: f64,
) -> GResult<Geometry<'a>> {
if self.geometry_type() != GeometryTypes::LineString {
return Err(Error::GenericError("Geometry must be a LineString".to_owned()));
}
unsafe {
let ptr = GEOSOffsetCurve_r(self.get_raw_context(), self.as_raw(), width,
quadrant_segments, join_style.into(), mitre_limit);
Geometry::new_from_raw(ptr, self.clone_context(), "offset_curve")
}
}
fn point_on_surface(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSPointOnSurface_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "point_on_surface")
}
}
fn polygonize_full(
&self,
) -> GResult<(Geometry<'a>, Option<Geometry<'a>>, Option<Geometry<'a>>, Option<Geometry<'a>>)> {
let mut cuts: *mut GEOSGeometry = ::std::ptr::null_mut();
let mut dangles: *mut GEOSGeometry = ::std::ptr::null_mut();
let mut invalids: *mut GEOSGeometry = ::std::ptr::null_mut();
unsafe {
let ptr = GEOSPolygonize_full_r(
self.get_raw_context(),
self.as_raw(),
&mut cuts,
&mut dangles,
&mut invalids,
);
let cuts = if !cuts.is_null() {
Geometry::new_from_raw(cuts, self.clone_context(), "polygonize_full").ok()
} else {
None
};
let dangles = if !dangles.is_null() {
Geometry::new_from_raw(dangles, self.clone_context(), "polygonize_full").ok()
} else {
None
};
let invalids = if !invalids.is_null() {
Geometry::new_from_raw(invalids, self.clone_context(), "polygonize_full").ok()
} else {
None
};
Geometry::new_from_raw(ptr, self.clone_context(), "polygonize_full")
.map(|x| (x, cuts, dangles, invalids))
}
}
fn shared_paths<'b, G: Geom<'b>>(&self, other: &G) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSSharedPaths_r(self.get_raw_context(), self.as_raw(), other.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "shared_paths")
}
}
fn to_hex(&self) -> GResult<CVec<u8>> {
let mut size = 0;
unsafe {
let ptr = GEOSGeomToHEX_buf_r(self.get_raw_context(), self.as_raw(), &mut size);
if ptr.is_null() {
Err(Error::NoConstructionFromNullPtr(
"Geometry::to_hex failed: GEOSGeomToHEX_buf_r returned null pointer".to_owned())
)
} else {
Ok(CVec::new(ptr, size as _))
}
}
}
fn to_wkb(&self) -> GResult<CVec<u8>> {
let mut size = 0;
unsafe {
let ptr = GEOSGeomToWKB_buf_r(self.get_raw_context(), self.as_raw(), &mut size);
if ptr.is_null() {
Err(Error::NoConstructionFromNullPtr(
"Geometry::to_wkb failed: GEOSGeomToWKB_buf_r returned null pointer".to_owned())
)
} else {
Ok(CVec::new(ptr, size as _))
}
}
}
#[allow(clippy::needless_lifetimes)]
fn to_prepared_geom<'c>(&'c self) -> GResult<PreparedGeometry<'c>> {
PreparedGeometry::new(self)
}
fn clone(&self) -> Geometry<'a> {
let context = self.clone_context();
let ptr = unsafe { GEOSGeom_clone_r(context.as_raw(), self.as_raw()) };
if ptr.is_null() {
panic!("Couldn't clone geometry...");
}
Geometry {
ptr: PtrWrap(ptr),
context,
}
}
fn get_geometry_n<'c>(&'c self, n: usize) -> GResult<ConstGeometry<'a, 'c>> {
unsafe {
let ptr = GEOSGetGeometryN_r(self.get_raw_context(), self.as_raw(), n as _);
ConstGeometry::new_from_raw(ptr, self$(.$field)?, "get_geometry_n")
}
}
fn get_interior_ring_n<'c>(&'c self, n: u32) -> GResult<ConstGeometry<'a, 'c>> {
unsafe {
let ptr = GEOSGetInteriorRingN_r(self.get_raw_context(), self.as_raw(), n as _);
ConstGeometry::new_from_raw(ptr, self$(.$field)?, "get_interior_ring_n")
}
}
fn get_exterior_ring<'c>(&'c self) -> GResult<ConstGeometry<'a, 'c>> {
unsafe {
let ptr = GEOSGetExteriorRing_r(self.get_raw_context(), self.as_raw());
ConstGeometry::new_from_raw(ptr, self$(.$field)?, "get_exterior_ring")
}
}
}
impl<'a, 'b$(, $lt)?, G: Geom<'b>> PartialEq<G> for $ty_name<'a$(, $lt)?> {
fn eq(&self, other: &G) -> bool {
self.equals(other).unwrap_or_else(|_| false)
}
}
unsafe impl<'a$(, $lt)?> Send for $ty_name<'a$(, $lt)?> {}
unsafe impl<'a$(, $lt)?> Sync for $ty_name<'a$(, $lt)?> {}
)
}
impl_geom!(Geometry);
impl_geom!(ConstGeometry, 'd);
impl<'a> Geometry<'a> {
/// Creates a `Geometry` from the WKT format.
///
/// # Example
///
/// ```
/// use geos::Geometry;
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// ```
pub fn new_from_wkt(wkt: &str) -> GResult<Geometry<'a>> {
match ContextHandle::init_e(Some("Geometry::new_from_wkt")) {
Ok(context_handle) => match CString::new(wkt) {
Ok(c_str) => unsafe {
let reader = GEOSWKTReader_create_r(context_handle.as_raw());
let ptr = GEOSWKTReader_read_r(context_handle.as_raw(), reader, c_str.as_ptr());
GEOSWKTReader_destroy_r(context_handle.as_raw(), reader);
Geometry::new_from_raw(ptr, Arc::new(context_handle), "new_from_wkt")
},
Err(e) => Err(Error::GenericError(format!(
"Conversion to CString failed: {}",
e
))),
},
Err(e) => Err(e),
}
}
/// Create a new [`Geometry`] from the HEX format.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// let hex_buf = point_geom.to_hex().expect("conversion to HEX failed");
///
/// // The interesting part is here:
/// let new_geom = Geometry::new_from_hex(hex_buf.as_ref())
/// .expect("conversion from HEX failed");
/// assert_eq!(point_geom.equals(&new_geom), Ok(true));
/// ```
pub fn new_from_hex(hex: &[u8]) -> GResult<Geometry<'a>> {
match ContextHandle::init_e(Some("Geometry::new_from_hex")) {
Ok(context) => unsafe {
let ptr = GEOSGeomFromHEX_buf_r(context.as_raw(), hex.as_ptr(), hex.len());
Geometry::new_from_raw(ptr, Arc::new(context), "new_from_hex")
},
Err(e) => Err(e),
}
}
/// Create a new [`Geometry`] from the WKB format.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// let wkb_buf = point_geom.to_wkb().expect("conversion to WKB failed");
///
/// // The interesting part is here:
/// let new_geom = Geometry::new_from_wkb(wkb_buf.as_ref())
/// .expect("conversion from WKB failed");
/// assert_eq!(point_geom.equals(&new_geom), Ok(true));
/// ```
pub fn new_from_wkb(wkb: &[u8]) -> GResult<Geometry<'a>> {
match ContextHandle::init_e(Some("Geometry::new_from_wkb")) {
Ok(context) => unsafe {
let ptr = GEOSGeomFromWKB_buf_r(context.as_raw(), wkb.as_ptr(), wkb.len());
Geometry::new_from_raw(ptr, Arc::new(context), "new_from_wkb")
},
Err(e) => Err(e),
}
}
/// Creates an areal geometry formed by the constituent linework of given geometry.
///
/// You can find new illustrations on [postgis](https://postgis.net/docs/ST_BuildArea.html)
/// documentation.
///
/// Available using the `v3_8_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("POINT(100 90)").expect("Invalid geometry");
/// let small_geom = geom.buffer(25., 8).expect("buffer failed");
/// let big_geom = geom.buffer(50., 8).expect("buffer failed");
///
/// let union_geom = small_geom.union(&big_geom).expect("union failed");
/// let build_area_geom = union_geom.build_area().expect("build_area failed");
///
/// // Looks like a donut.
/// assert_eq!(union_geom.to_wkt_precision(1).unwrap(),
/// "POLYGON ((150.0 90.0, 149.0 80.2, 146.2 70.9, 141.6 62.2, 135.4 54.6, \
/// 127.8 48.4, 119.1 43.8, 109.8 41.0, 100.0 40.0, 90.2 41.0, \
/// 80.9 43.8, 72.2 48.4, 64.6 54.6, 58.4 62.2, 53.8 70.9, 51.0 80.2, \
/// 50.0 90.0, 51.0 99.8, 53.8 109.1, 58.4 117.8, 64.6 125.4, \
/// 72.2 131.6, 80.9 136.2, 90.2 139.0, 100.0 140.0, 109.8 139.0, \
/// 119.1 136.2, 127.8 131.6, 135.4 125.4, 141.6 117.8, 146.2 109.1, \
/// 149.0 99.8, 150.0 90.0))");
/// ```
#[cfg(any(feature = "v3_8_0", feature = "dox"))]
pub fn build_area(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSBuildArea_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "build_area")
}
}
/// Description from [postgis](https://postgis.net/docs/ST_Polygonize.html):
///
/// > Creates a GeometryCollection containing possible polygons formed from the constituent
/// > linework of a set of geometries.
///
/// # Example:
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POLYGON((-71.040878 42.285678,\
/// -71.040943 42.2856,\
/// -71.04096 42.285752,\
/// -71.040878 42.285678))")
/// .expect("Failed to create geometry");
/// let geom2 = Geometry::new_from_wkt("POLYGON((-71.17166 42.353675,\
/// -71.172026 42.354044,\
/// -71.17239 42.354358,\
/// -71.171794 42.354971,\
/// -71.170511 42.354855,\
/// -71.17112 42.354238,\
/// -71.17166 42.353675))")
/// .expect("Failed to create geometry");
///
/// let polygonized = Geometry::polygonize(&[geom1, geom2]).expect("polygonize failed");
/// assert_eq!(polygonized.to_wkt().unwrap(),
/// "GEOMETRYCOLLECTION (POLYGON ((-71.0408780000000064 42.2856779999999972, \
/// -71.0409429999999986 42.2856000000000023, \
/// -71.0409599999999983 42.2857520000000022, \
/// -71.0408780000000064 42.2856779999999972)), \
/// POLYGON ((-71.1716600000000028 42.3536750000000026, \
/// -71.1720260000000025 42.3540440000000018, \
/// -71.1723899999999929 42.3543579999999977, \
/// -71.1717940000000056 42.3549709999999990, \
/// -71.1705110000000047 42.3548550000000006, \
/// -71.1711200000000019 42.3542380000000023, \
/// -71.1716600000000028 42.3536750000000026)))");
/// ```
pub fn polygonize<T: Borrow<Geometry<'a>>>(geometries: &[T]) -> GResult<Geometry<'a>> {
unsafe {
let context = match geometries.get(0) {
Some(g) => g.borrow().clone_context(),
None => match ContextHandle::init_e(Some("Geometry::polygonize")) {
Ok(context) => Arc::new(context),
Err(e) => return Err(e),
},
};
|
.iter()
.map(|g| g.borrow().as_raw() as *const _)
.collect::<Vec<_>>();
let ptr = GEOSPolygonize_r(context.as_raw(), geoms.as_ptr(), geoms.len() as _);
Geometry::new_from_raw(ptr, context, "polygonize")
}
}
pub fn polygonizer_get_cut_edges<T: Borrow<Geometry<'a>>>(
&self,
geometries: &[T],
) -> GResult<Geometry<'a>> {
unsafe {
let context = match geometries.get(0) {
Some(g) => g.borrow().clone_context(),
None => match ContextHandle::init_e(Some("Geometry::polygonizer_get_cut_edges")) {
Ok(context) => Arc::new(context),
Err(e) => return Err(e),
},
};
let geoms = geometries
.iter()
.map(|g| g.borrow().as_raw() as *const _)
.collect::<Vec<_>>();
let ptr =
GEOSPolygonizer_getCutEdges_r(context.as_raw(), geoms.as_ptr(), geoms.len() as _);
Geometry::new_from_raw(ptr, context, "polygonizer_get_cut_edges")
}
}
/// Merges `Multi Line String` geometry into a (set of) `Line String`.
///
/// ### Warning
///
/// If you use this function on something else than a `Multi Line String` or a
/// `Line String`, it'll return an empty `Geometry collection`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let lines = Geometry::new_from_wkt("MULTILINESTRING((-29 -27,-30 -29.7,-36 -31,-45 -33),\
/// (-45 -33,-46 -32))")
/// .expect("Invalid geometry");
/// let lines_merged = lines.line_merge().expect("line merge failed");
/// assert_eq!(
/// lines_merged.to_wkt_precision(1).unwrap(),
/// "LINESTRING (-29.0 -27.0, -30.0 -29.7, -36.0 -31.0, -45.0 -33.0, -46.0 -32.0)",
/// );
/// ```
pub fn line_merge(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSLineMerge_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "line_merge")
}
}
/// Reverses the order of the vertexes.
///
/// Available using the `v3_7_0` feature.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let line = Geometry::new_from_wkt("LINESTRING(1 10,1 2)")
/// .expect("invalid geometry");
/// let reversed_line = line.reverse().expect("reverse failed");
///
/// assert_eq!(
/// reversed_line.to_wkt_precision(1).unwrap(),
/// "LINESTRING (1.0 2.0, 1.0 10.0)",
/// );
/// ```
#[cfg(any(feature = "v3_7_0", feature = "dox"))]
pub fn reverse(&self) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSReverse_r(self.get_raw_context(), self.as_raw());
Geometry::new_from_raw(ptr, self.clone_context(), "reverse")
}
}
/// Returns a simplified version of the given geometry.
pub fn simplify(&self, tolerance: f64) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSSimplify_r(self.get_raw_context(), self.as_raw(), tolerance);
Geometry::new_from_raw(ptr, self.clone_context(), "simplify")
}
}
/// Returns a simplified version of the given geometry. It will avoid creating invalid derived
/// geometries.
pub fn topology_preserve_simplify(&self, tolerance: f64) -> GResult<Geometry<'a>> {
unsafe {
let ptr =
GEOSTopologyPreserveSimplify_r(self.get_raw_context(), self.as_raw(), tolerance);
Geometry::new_from_raw(ptr, self.clone_context(), "topology_preserve_simplify")
}
}
pub(crate) unsafe fn new_from_raw(
ptr: *mut GEOSGeometry,
context: Arc<ContextHandle<'a>>,
caller: &str,
) -> GResult<Geometry<'a>> {
if ptr.is_null() {
let extra = if let Some(x) = context.get_last_error() {
format!("\nLast error: {}", x)
} else {
String::new()
};
return Err(Error::NoConstructionFromNullPtr(format!(
"Geometry::{}{}",
caller, extra
)));
}
Ok(Geometry {
ptr: PtrWrap(ptr),
context,
})
}
/// Set SRID of `self`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let mut point_geom = Geometry::new_from_wkt("POINT (2.5 2.5 4.0)")
/// .expect("Invalid geometry");
/// point_geom.set_srid(4326);
/// assert_eq!(point_geom.get_srid(), Ok(4326));
/// ```
pub fn set_srid(&mut self, srid: usize) {
unsafe { GEOSSetSRID_r(self.get_raw_context(), self.as_raw_mut(), srid as _) }
}
/// Normalizes `self` in its normalized/canonical form. May reorder vertices in polygon rings,
/// rings in a polygon, elements in a multi-geometry complex.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let mut geom = Geometry::new_from_wkt(
/// "GEOMETRYCOLLECTION(POINT(2 3), MULTILINESTRING((0 0, 1 1),(2 2, 3 3)))",
/// ).expect("Invalid geometry");
///
/// geom.normalize().expect("normalize failed");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(),
/// "GEOMETRYCOLLECTION (MULTILINESTRING ((2.0 2.0, 3.0 3.0), (0.0 0.0, 1.0 1.0)), \
/// POINT (2.0 3.0))");
/// ```
pub fn normalize(&mut self) -> GResult<()> {
let ret_val = unsafe { GEOSNormalize_r(self.get_raw_context(), self.as_raw_mut()) };
if ret_val == -1 {
Err(Error::GeosFunctionError(PredicateType::Normalize, ret_val))
} else {
Ok(())
}
}
/// Creates an empty polygon geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::create_empty_polygon().expect("Failed to build empty polygon");
///
/// assert_eq!(geom.to_wkt().unwrap(), "POLYGON EMPTY");
/// ```
pub fn create_empty_polygon() -> GResult<Geometry<'a>> {
match ContextHandle::init_e(Some("Geometry::create_empty_polygon")) {
Ok(context) => unsafe {
let ptr = GEOSGeom_createEmptyPolygon_r(context.as_raw());
Geometry::new_from_raw(ptr, Arc::new(context), "create_empty_polygon")
},
Err(e) => Err(e),
}
}
/// Creates an empty point geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::create_empty_point().expect("Failed to build empty point");
///
/// assert_eq!(geom.to_wkt().unwrap(), "POINT EMPTY");
/// ```
pub fn create_empty_point() -> GResult<Geometry<'a>> {
match ContextHandle::init_e(Some("Geometry::create_empty_point")) {
Ok(context) => unsafe {
let ptr = GEOSGeom_createEmptyPoint_r(context.as_raw());
Geometry::new_from_raw(ptr, Arc::new(context), "create_empty_point")
},
Err(e) => Err(e),
}
}
/// Creates an empty line string geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::create_empty_line_string().expect("Failed to build empty line string");
///
/// assert_eq!(geom.to_wkt().unwrap(), "LINESTRING EMPTY");
/// ```
pub fn create_empty_line_string() -> GResult<Geometry<'a>> {
match ContextHandle::init_e(Some("Geometry::create_empty_line_string")) {
Ok(context) => unsafe {
let ptr = GEOSGeom_createEmptyLineString_r(context.as_raw());
Geometry::new_from_raw(ptr, Arc::new(context), "create_empty_line_string")
},
Err(e) => Err(e),
}
}
/// Creates an empty collection.
///
/// The `type_` must be one of:
///
/// * [`GeometryTypes::GeometryCollection`]
/// * [`GeometryTypes::MultiPoint`]
/// * [`GeometryTypes::MultiLineString`]
/// * [`GeometryTypes::MultiPolygon`]
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry, GeometryTypes};
///
/// let geom = Geometry::create_empty_collection(GeometryTypes::MultiPolygon)
/// .expect("Failed to build empty collection");
///
/// assert_eq!(geom.to_wkt().unwrap(), "MULTIPOLYGON EMPTY");
/// ```
pub fn create_empty_collection(type_: GeometryTypes) -> GResult<Geometry<'a>> {
match type_ {
GeometryTypes::GeometryCollection
| GeometryTypes::MultiPoint
| GeometryTypes::MultiLineString
| GeometryTypes::MultiPolygon => {}
_ => return Err(Error::GenericError("Invalid geometry type".to_owned())),
}
match ContextHandle::init_e(Some("Geometry::create_empty_collection")) {
Ok(context) => unsafe {
let ptr = GEOSGeom_createEmptyCollection_r(context.as_raw(), type_.into());
Geometry::new_from_raw(ptr, Arc::new(context), "create_empty_collection")
},
Err(e) => Err(e),
}
}
/// Creates a polygon formed by the given shell and array of holes.
///
/// ### Note
///
/// `exterior` must be a `LinearRing`.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom = Geometry::new_from_wkt("LINEARRING(75.15 29.53,77 29,77.6 29.5, 75.15 29.53)")
/// .expect("Invalid geometry");
/// let polygon_geom = Geometry::create_polygon(geom, vec![])
/// .expect("create_polygon failed");
///
/// assert_eq!(
/// polygon_geom.to_wkt_precision(1).unwrap(),
/// "POLYGON ((75.2 29.5, 77.0 29.0, 77.6 29.5, 75.2 29.5))",
/// );
/// ```
pub fn create_polygon<'b>(
mut exterior: Geometry<'a>,
mut interiors: Vec<Geometry<'b>>,
) -> GResult<Geometry<'a>> {
if exterior.geometry_type() != GeometryTypes::LinearRing {
return Err(Error::GenericError(
"exterior must be a LinearRing".to_owned(),
));
}
let context_handle = exterior.clone_context();
let nb_interiors = interiors.len();
let res = unsafe {
let mut geoms: Vec<*mut GEOSGeometry> =
interiors.iter_mut().map(|g| g.as_raw_mut()).collect();
let ptr = GEOSGeom_createPolygon_r(
context_handle.as_raw(),
exterior.as_raw_mut(),
geoms.as_mut_ptr() as *mut *mut GEOSGeometry,
nb_interiors as _,
);
Geometry::new_from_raw(ptr, context_handle, "create_polygon")
};
// We transfered the ownership of the ptr to the new Geometry,
// so the old ones need to forget their c ptr to avoid double free.
exterior.ptr = PtrWrap(::std::ptr::null_mut());
for i in interiors.iter_mut() {
i.ptr = PtrWrap(::std::ptr::null_mut());
}
res
}
/// Create a geometry collection.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0))")
/// .expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (3.0 4.0)").expect("Invalid geometry");
///
/// let geom = Geometry::create_geometry_collection(vec![geom1, geom2])
/// .expect("Failed to build multipolygon");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(),
/// "GEOMETRYCOLLECTION (POLYGON ((0.0 0.0, 10.0 0.0, 10.0 6.0, 0.0 6.0, 0.0 0.0)), \
/// POINT (3.0 4.0))");
/// ```
pub fn create_geometry_collection(geoms: Vec<Geometry<'a>>) -> GResult<Geometry<'a>> {
create_multi_geom(geoms, GeometryTypes::GeometryCollection)
}
/// Create a multi polygon geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POLYGON((0 0, 10 0, 10 6, 0 6, 0 0))")
/// .expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POLYGON((3 3, 10 3, 10 6, 3 6, 3 3))")
/// .expect("Invalid geometry");
///
/// let geom = Geometry::create_multipolygon(vec![geom1, geom2])
/// .expect("Failed to build multipolygon");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(),
/// "MULTIPOLYGON (((0.0 0.0, 10.0 0.0, 10.0 6.0, 0.0 6.0, 0.0 0.0)), \
/// ((3.0 3.0, 10.0 3.0, 10.0 6.0, 3.0 6.0, 3.0 3.0)))");
/// ```
pub fn create_multipolygon(polygons: Vec<Geometry<'a>>) -> GResult<Geometry<'a>> {
if !check_same_geometry_type(&polygons, GeometryTypes::Polygon) {
return Err(Error::ImpossibleOperation(
"all the provided geometry have to be of type Polygon".to_owned(),
));
}
create_multi_geom(polygons, GeometryTypes::MultiPolygon)
}
/// Create a multiline string geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("LINESTRING (1.0 2.0, 3.0 4.0)").expect("invalid geometry");
/// let geom2 = Geometry::new_from_wkt("LINESTRING (5.0 6.0, 7.0 8.0)").expect("invalid geometry");
///
/// let geom = Geometry::create_multiline_string(vec![geom1, geom2])
/// .expect("Failed to build multiline string");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(),
/// "MULTILINESTRING ((1.0 2.0, 3.0 4.0), (5.0 6.0, 7.0 8.0))");
/// ```
pub fn create_multiline_string(linestrings: Vec<Geometry<'a>>) -> GResult<Geometry<'a>> {
if !check_same_geometry_type(&linestrings, GeometryTypes::LineString) {
return Err(Error::ImpossibleOperation(
"all the provided geometry have to be of type LineString".to_owned(),
));
}
create_multi_geom(linestrings, GeometryTypes::MultiLineString)
}
/// Creates a multi point geometry.
///
/// # Example
///
/// ```
/// use geos::{Geom, Geometry};
///
/// let geom1 = Geometry::new_from_wkt("POINT (1.0 2.0)").expect("Invalid geometry");
/// let geom2 = Geometry::new_from_wkt("POINT (3.0 4.0)").expect("Invalid geometry");
///
/// let geom = Geometry::create_multipoint(vec![geom1, geom2])
/// .expect("Failed to build multipoint");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(), "MULTIPOINT (1.0 2.0, 3.0 4.0)");
/// ```
pub fn create_multipoint(points: Vec<Geometry<'a>>) -> GResult<Geometry<'a>> {
if !check_same_geometry_type(&points, GeometryTypes::Point) {
return Err(Error::ImpossibleOperation(
"all the provided geometry have to be of type Point".to_owned(),
));
}
create_multi_geom(points, GeometryTypes::MultiPoint)
}
/// Creates a point geometry.
///
/// # Example
///
/// ```
/// use geos::{CoordDimensions, CoordSeq, Geom, Geometry};
///
/// let coords = CoordSeq::new_from_vec(&[&[1., 2.]])
/// .expect("failed to create CoordSeq");
///
/// let geom = Geometry::create_point(coords).expect("Failed to create a point");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(), "POINT (1.0 2.0)");
/// ```
pub fn create_point(mut s: CoordSeq<'a>) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSGeom_createPoint_r(s.get_raw_context(), s.as_raw_mut());
let res = Geometry::new_from_raw(ptr, s.clone_context(), "create_point");
s.ptr = PtrWrap(::std::ptr::null_mut());
res
}
}
/// Creates a line string geometry.
///
/// # Example
///
/// ```
/// use geos::{CoordDimensions, CoordSeq, Geom, Geometry};
///
/// let coords = CoordSeq::new_from_vec(&[&[1., 2.], &[3., 4.]])
/// .expect("failed to create CoordSeq");
///
/// let geom = Geometry::create_line_string(coords).expect("Failed to create a line string");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(), "LINESTRING (1.0 2.0, 3.0 4.0)");
/// ```
pub fn create_line_string(mut s: CoordSeq<'a>) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSGeom_createLineString_r(s.get_raw_context(), s.as_raw_mut());
let res = Geometry::new_from_raw(ptr, s.clone_context(), "create_line_string");
s.ptr = PtrWrap(::std::ptr::null_mut());
res
}
}
/// Creates a linear ring geometry.
///
/// # Example
///
/// ```
/// use geos::{CoordDimensions, CoordSeq, Geom, Geometry};
///
/// let coords = CoordSeq::new_from_vec(&[&[75.15, 29.53],
/// &[77., 29.],
/// &[77.6, 29.5],
/// &[75.15, 29.53]])
/// .expect("failed to create CoordSeq");
///
/// let geom = Geometry::create_linear_ring(coords)
/// .expect("Failed to create a linea ring");
///
/// assert_eq!(geom.to_wkt_precision(1).unwrap(),
/// "LINEARRING (75.2 29.5, 77.0 29.0, 77.6 29.5, 75.2 29.5)");
/// ```
pub fn create_linear_ring(mut s: CoordSeq<'a>) -> GResult<Geometry<'a>> {
unsafe {
let ptr = GEOSGeom_createLinearRing_r(s.get_raw_context(), s.as_raw_mut());
let res = Geometry::new_from_raw(ptr, s.clone_context(), "create_linear_ring");
s.ptr = PtrWrap(::std::ptr::null_mut());
res
}
}
}
impl<'a, 'b> ConstGeometry<'a, 'b> {
pub(crate) unsafe fn new_from_raw(
ptr: *const GEOSGeometry,
original: &'b Geometry<'a>,
caller: &str,
) -> GResult<ConstGeometry<'a, 'b>> {
if ptr.is_null() {
let extra = if let Some(x) = original.context.get_last_error() {
format!("\nLast error: {}", x)
} else {
String::new()
};
return Err(Error::NoConstructionFromNullPtr(format!(
"ConstGeometry::{}{}",
caller, extra
)));
}
Ok(ConstGeometry {
ptr: PtrWrap(ptr),
original,
})
}
/// Get the context handle of the geometry.
///
/// ```
/// use geos::{ContextInteractions, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// let context = point_geom.get_context_handle();
/// context.set_notice_message_handler(Some(Box::new(|s| println!("new message: {}", s))));
/// ```
pub fn get_context_handle(&self) -> &ContextHandle<'a> {
&self.original.context
}
}
impl<'a> Clone for Geometry<'a> {
/// Also passes the context to the newly created `Geometry`.
fn clone(&self) -> Geometry<'a> {
Geom::clone(self)
}
}
impl<'a> Drop for Geometry<'a> {
fn drop(&mut self) {
if !self.ptr.is_null() {
unsafe { GEOSGeom_destroy_r(self.get_raw_context(), self.as_raw_mut()) }
}
}
}
impl<'a> ContextInteractions<'a> for Geometry<'a> {
/// Set the context handle to the geometry.
///
/// ```
/// use geos::{ContextInteractions, ContextHandle, Geometry};
///
/// let context_handle = ContextHandle::init().expect("invalid init");
/// context_handle.set_notice_message_handler(Some(Box::new(|s| println!("new message: {}", s))));
/// let mut point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// point_geom.set_context_handle(context_handle);
/// ```
fn set_context_handle(&mut self, context: ContextHandle<'a>) {
self.context = Arc::new(context);
}
/// Get the context handle of the geometry.
///
/// ```
/// use geos::{ContextInteractions, Geometry};
///
/// let point_geom = Geometry::new_from_wkt("POINT (2.5 2.5)").expect("Invalid geometry");
/// let context = point_geom.get_context_handle();
/// context.set_notice_message_handler(Some(Box::new(|s| println!("new message: {}", s))));
/// ```
fn get_context_handle(&self) -> &ContextHandle<'a> {
&self.context
}
}
impl<'a> AsRaw for Geometry<'a> {
type RawType = GEOSGeometry;
fn as_raw(&self) -> *const Self::RawType {
*self.ptr
}
}
impl<'a> AsRawMut for Geometry<'a> {
type RawType = GEOSGeometry;
unsafe fn as_raw_mut_override(&self) -> *mut Self::RawType {
*self.ptr
}
}
impl<'a> ContextHandling for Geometry<'a> {
type Context = Arc<ContextHandle<'a>>;
fn get_raw_context(&self) -> GEOSContextHandle_t {
self.context.as_raw()
}
fn clone_context(&self) -> Arc<ContextHandle<'a>> {
Arc::clone(&self.context)
}
}
impl<'a, 'd> AsRaw for ConstGeometry<'a, 'd> {
type RawType = GEOSGeometry;
fn as_raw(&self) -> *const Self::RawType {
*self.ptr
}
}
impl<'a, 'd> ContextHandling for ConstGeometry<'a, 'd> {
type Context = Arc<ContextHandle<'a>>;
fn get_raw_context(&self) -> GEOSContextHandle_t {
self.original.context.as_raw()
}
fn clone_context(&self) -> Arc<ContextHandle<'a>> {
Arc::clone(&self.original.context)
}
}
|
let geoms = geometries
|
control_utils.py
|
#
# MIT License
#
# Copyright (c) 2020-2021 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.#
import math
import numpy as np
import torch
from torch.distributions.multivariate_normal import MultivariateNormal
import ghalton
def scale_ctrl(ctrl, action_lows, action_highs, squash_fn='clamp'):
if len(ctrl.shape) == 1:
ctrl = ctrl[np.newaxis, :, np.newaxis]
act_half_range = (action_highs - action_lows) / 2.0
act_mid_range = (action_highs + action_lows) / 2.0
if squash_fn == 'clamp':
# ctrl = torch.clamp(ctrl, action_lows[0], action_highs[0])
ctrl = torch.max(torch.min(ctrl, action_highs), action_lows)
return ctrl
elif squash_fn == 'clamp_rescale':
ctrl = torch.clamp(ctrl, -1.0, 1.0)
elif squash_fn == 'tanh':
ctrl = torch.tanh(ctrl)
elif squash_fn == 'identity':
return ctrl
return act_mid_range.unsqueeze(0) + ctrl * act_half_range.unsqueeze(0)
#######################
## STOMP Covariance ##
#######################
def get_stomp_cov(horizon, d_action,
tensor_args={'device':torch.device('cpu'),'dtype':torch.float32},
cov_mode='vel', RETURN_R=False):
""" Computes the covariance matrix following STOMP motion planner
Coefficients from here: https://en.wikipedia.org/wiki/Finite_difference_coefficient
More info here: https://github.com/ros-industrial/stomp_ros/blob/7fe40fbe6ad446459d8d4889916c64e276dbf882/stomp_core/src/utils.cpp#L36
"""
acc_fd_array = [0,-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12, 0]
#acc_fd_array = [1/90, -3/20, 3/2, -49/18, 3/2 , -3/20, 1/90 ]
#jerk_fd_array = [0, 1 / 12.0, -17 / 12.0, 46 / 12.0, -46 / 12.0, 17 / 12.0, -1 / 12.0]
jerk_fd_array = [1 / 8.0, -1, 13/8, 0 , -13/8, 1, -1/8]
#snap_fd_array = [-1/6, 2.0, -13/2, 28/3, -13/2, 2, -1/6]
snap_fd_array = [0, 1, -4, 6, -4, 1, 0]
#vel_fd_array = [0, 1.0/12.0 , -2.0/3.0 , 0 , 2.0/3.0 , -1.0/12.0 , 0 ]
vel_fd_array = [0, 0 , 1, -2 , 1,0, 0 ]
fd_array = acc_fd_array
A = torch.zeros((d_action * horizon, d_action * horizon), device=tensor_args['device'],dtype=torch.float64)
if(cov_mode == 'vel'):
for k in range(d_action):
for i in range(0, horizon):
for j in range(-3,4):
#print(j)
index = i + j
if(index < 0):
index = 0
continue
if(index >= horizon):
index = horizon - 1
continue
A[k * horizon + i,k * horizon + index] = fd_array[j + 3]
elif(cov_mode == 'acc'):
for k in range(d_action):
for i in range(0, horizon):
for j in range(-3,4):
#print(j)
index = i + j
if(index < 0):
index = 0
continue
if(index >= horizon):
index = horizon - 1
continue
if(index >= horizon/2):
#print(k * horizon + index - horizon//2)
A[k * horizon + i,k * horizon - index - horizon//2 -1] = fd_array[j + 3] #* float((horizon-index) / horizon)
else:
A[k * horizon + i,k * horizon + index] = fd_array[j + 3] #* float(index/horizon)
#plt.imshow(A)
#plt.show()
R = torch.matmul(A.transpose(-2,-1), A)
#print(R[:horizon, :horizon])
#plt.imshow(R)
#plt.show()
#print(R)
#print(torch.det(R))
cov = torch.inverse(R)
cov = cov / torch.max(torch.abs(cov))
#plt.imshow(cov)
#plt.show()
# also compute the cholesky decomposition:
scale_tril = torch.zeros((d_action * horizon, d_action * horizon), **tensor_args)
scale_tril = torch.linalg.cholesky(cov)
'''
k = 0
act_cov_matrix = cov[k * horizon:k * horizon + horizon, k * horizon:k * horizon + horizon]
print(act_cov_matrix.shape)
print(torch.det(act_cov_matrix))
local_cholesky = matrix_cholesky(act_cov_matrix)
for k in range(d_action):
scale_tril[k * horizon:k * horizon + horizon,k * horizon:k * horizon + horizon] = local_cholesky
'''
cov = cov.to(**tensor_args)
scale_tril = scale_tril.to(**tensor_args) #* 0.1
scale_tril = scale_tril / torch.max(scale_tril)
if(RETURN_R):
return cov, scale_tril, R
return cov, scale_tril
#######################
## Gaussian Sampling ##
#######################
def generate_noise(cov, shape, base_seed, filter_coeffs=None, device=torch.device('cpu')):
"""
Generate correlated Gaussian samples using autoregressive process
"""
torch.manual_seed(base_seed)
beta_0, beta_1, beta_2 = filter_coeffs
N = cov.shape[0]
m = MultivariateNormal(loc=torch.zeros(N).to(device), covariance_matrix=cov)
eps = m.sample(sample_shape=shape)
# eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)
if filter_coeffs is not None:
for i in range(2, eps.shape[1]):
eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]
return eps
def generate_noise_np(cov, shape, base_seed, filter_coeffs=None):
"""
Generate correlated noisy samples using autoregressive process
"""
np.random.seed(base_seed)
beta_0, beta_1, beta_2 = filter_coeffs
N = cov.shape[0]
eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)
if filter_coeffs is not None:
for i in range(2, eps.shape[1]):
eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]
return eps
###########################
## Quasi-Random Sampling ##
###########################
def generate_prime_numbers(num):
def is_prime(n):
for j in range(2, ((n //2) + 1),1):
if n % j == 0:
return False
return True
primes = [0] * num #torch.zeros(num, device=device)
primes[0] = 2
curr_num = 1
for i in range(1, num):
while True:
curr_num += 2
if is_prime(curr_num):
primes[i] = curr_num
break
return primes
def generate_van_der_corput_sample(idx, base):
f, r = 1.0, 0
while idx > 0:
f /= base*1.0
r += f * (idx % base)
idx = idx // base
return r
def generate_van_der_corput_samples_batch(idx_batch, base):
inp_device = idx_batch.device
batch_size = idx_batch.shape[0]
f = 1.0 #torch.ones(batch_size, device=inp_device)
r = torch.zeros(batch_size, device=inp_device)
while torch.any(idx_batch > 0):
f /= base*1.0
r += f * (idx_batch % base) #* (idx_batch > 0)
idx_batch = idx_batch // base
return r
# def generate_van_der_corput_samples_batch_2(idx_batch, bases):
# inp_device = idx_batch.device
# batch_size = idx_batch.shape[0]
# f = torch.ones(batch_size, device=inp_device)
# r = torch.zeros(batch_size, device=inp_device)
# while torch.any(idx_batch > 0):
# f /= bases*1.0
# r += f * (idx_batch % base) #* (idx_batch > 0)
# idx_batch = idx_batch // base
# return r
def generate_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):
if not use_ghalton:
samples = torch.zeros(num_samples, ndims, device=device, dtype=float_dtype)
if not bases:
bases = generate_prime_numbers(ndims)
idx_batch = torch.arange(1,num_samples+1, device=device)
for dim in range(ndims):
samples[:, dim] = generate_van_der_corput_samples_batch(idx_batch, bases[dim])
else:
if ndims <= 100:
perms = ghalton.EA_PERMS[:ndims]
sequencer = ghalton.GeneralizedHalton(perms)
else:
sequencer = ghalton.GeneralizedHalton(ndims, seed_val)
samples = torch.tensor(sequencer.get(num_samples), device=device, dtype=float_dtype)
return samples
def generate_gaussian_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):
uniform_halton_samples = generate_halton_samples(num_samples, ndims, bases, use_ghalton, seed_val, device, float_dtype)
gaussian_halton_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_halton_samples - 1)
return gaussian_halton_samples
def generate_gaussian_sobol_samples(num_samples, ndims, seed_val, device=torch.device('cpu'), float_dtype=torch.float64):
soboleng = torch.quasirandom.SobolEngine(dimension=ndims, scramble=True, seed=seed_val)
uniform_sobol_samples = soboleng.draw(num_samples).to(device)
gaussian_sobol_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_sobol_samples - 1)
return gaussian_sobol_samples
########################
## Gaussian Utilities ##
########################
def gaussian_logprob(mean, cov, x, cov_type="full"):
|
def gaussian_logprobgrad(mean, cov, x, cov_type="full"):
if cov_type == "diagonal":
cov_inv = np.diag(1.0/cov.diagonal())
else:
cov_inv = np.linalg.inv(cov)
diff = (x - mean).T
grad = diff @ cov_inv
return grad
def gaussian_entropy(cov=None, L=None): #, cov_type="full"):
"""
Entropy of multivariate gaussian given either covariance
or cholesky decomposition of covariance
"""
if cov is not None:
inp_device = cov.device
cov_logdet = torch.log(torch.det(cov))
# print(np.linalg.det(cov.cpu().numpy()))
# print(torch.det(cov))
N = cov.shape[0]
else:
inp_device = L.device
cov_logdet = 2.0 * torch.sum(torch.log(torch.diagonal(L)))
N = L.shape[0]
# if cov_type == "diagonal":
# cov_logdet = np.sum(np.log(cov.diagonal()))
# else:
# cov_logdet = np.log(np.linalg.det(cov))
term1 = 0.5 * cov_logdet
# pi = torch.tensor([math.pi], device=inp_device)
# pre-calculate 1.0 + torch.log(2.0*pi) = 2.837877066
term2 = 0.5 * N * 2.837877066
ent = term1 + term2
return ent.to(inp_device)
def gaussian_kl(mean0, cov0, mean1, cov1, cov_type="full"):
"""
KL-divergence between Gaussians given mean and covariance
KL(p||q) = E_{p}[log(p) - log(q)]
"""
N = cov0.shape[0]
if cov_type == "diagonal":
cov1_diag = cov1.diagonal()
cov1_inv = np.diag(1.0 / cov1_diag)
cov0_logdet = np.sum(np.log(cov0.diagonal()))
cov1_logdet = np.sum(np.log(cov1_diag))
else:
cov1_inv = np.linalg.inv(cov1)
cov0_logdet = np.log(np.linalg.det(cov0))
cov1_logdet = np.log(np.linalg.det(cov1))
term1 = 0.5 * np.trace(cov1_inv @ cov0)
diff = (mean1 - mean0).T
mahalanobis_dist = 0.5 * np.sum((diff @ cov1_inv) * diff, axis=1)
term3 = 0.5 * (-1.0*N + cov1_logdet - cov0_logdet)
return term1 + mahalanobis_dist + term3
def cost_to_go(cost_seq, gamma_seq):
"""
Calculate (discounted) cost to go for given cost sequence
"""
# if torch.any(gamma_seq == 0):
# return cost_seq
cost_seq = gamma_seq * cost_seq # discounted cost sequence
# cost_seq = torch.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq = torch.fliplr(torch.cumsum(torch.fliplr(cost_seq), axis=-1)) # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq /= gamma_seq # un-scale it to get true discounted cost to go
return cost_seq
def cost_to_go_np(cost_seq, gamma_seq):
"""
Calculate (discounted) cost to go for given cost sequence
"""
# if np.any(gamma_seq == 0):
# return cost_seq
cost_seq = gamma_seq * cost_seq # discounted reward sequence
cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq /= gamma_seq # un-scale it to get true discounted cost to go
return cost_seq
############
##Cholesky##
############
def matrix_cholesky(A):
L = torch.zeros_like(A)
for i in range(A.shape[-1]):
for j in range(i+1):
s = 0.0
for k in range(j):
s = s + L[i,k] * L[j,k]
L[i,j] = torch.sqrt(A[i,i] - s) if (i == j) else \
(1.0 / L[j,j] * (A[i,j] - s))
return L
# Batched Cholesky decomp
def batch_cholesky(A):
L = torch.zeros_like(A)
for i in range(A.shape[-1]):
for j in range(i+1):
s = 0.0
for k in range(j):
s = s + L[...,i,k] * L[...,j,k]
L[...,i,j] = torch.sqrt(A[...,i,i] - s) if (i == j) else \
(1.0 / L[...,j,j] * (A[...,i,j] - s))
return L
|
"""
Calculate gaussian log prob for given input batch x
Parameters
----------
mean (np.ndarray): [N x num_samples] batch of means
cov (np.ndarray): [N x N] covariance matrix
x (np.ndarray): [N x num_samples] batch of sample values
Returns
--------
log_prob (np.ndarray): [num_sampls] log probability of each sample
"""
N = cov.shape[0]
if cov_type == "diagonal":
cov_diag = cov.diagonal()
cov_inv = np.diag(1.0 / cov_diag)
cov_logdet = np.sum(np.log(cov_diag))
else:
cov_logdet = np.log(np.linalg.det(cov))
cov_inv = np.linalg.inv(cov)
diff = (x - mean).T
mahalanobis_dist = -0.5 * np.sum((diff @ cov_inv) * diff, axis=1)
const1 = -0.5 * N * np.log(2.0 * np.pi)
const2 = -0.5*cov_logdet
log_prob = mahalanobis_dist + const1 + const2
return log_prob
|
pipeline_selector_cookie.js
|
/*
* Copyright 2021 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var Blacklist = Class.create({
initialize: function(pipelineNames){
if(pipelineNames) {
this.hiddenPipelineNames = pipelineNames;
} else {
this._restoreHiddenPipelineNames();
}
},
_restoreHiddenPipelineNames: function(){
var name_array_string = getCookie('hidden-pipeline-names');
if(name_array_string && !name_array_string.blank()){
this.hiddenPipelineNames = $A(name_array_string.split(',')).invoke('strip');
} else {
this.hiddenPipelineNames = $A();
}
},
persist: function() {
setCookie('pipeline-selector-strategy', 'blacklist');
setCookie('hidden-pipeline-names', this.hiddenPipelineNames.join(','));
},
_cleanCookie: function(){
deleteCookie('hidden-pipeline-names');
},
isPipelineHidden: function(pipelineName) {
return this.hiddenPipelineNames.include(pipelineName);
},
showPipeline: function(pipelineName) {
this.hiddenPipelineNames = this.hiddenPipelineNames.without(pipelineName);
},
showPipelines: function(pipelineNames) {
var shouldHide = $A();
for(var i = 0; i < this.hiddenPipelineNames.length; i++) {
if(!pipelineNames.include(this.hiddenPipelineNames[i])){
shouldHide.push(this.hiddenPipelineNames[i]);
}
}
this.hiddenPipelineNames = shouldHide;
},
|
this.hiddenPipelineNames.push(pipelineName);
},
hidePipelines: function(pipelineNames) {
for(var i = 0; i < pipelineNames.length; i++) {
this.hidePipeline(pipelineNames[i]);
}
},
hasNoHiddenPipeline: function() {
return this.hiddenPipelineNames.length == 0;
},
hasNoShownPipeline: function(totalPipelinesCount) {
return this.hiddenPipelineNames.length == totalPipelinesCount;
},
pipelinesCount: function() {
return this.hiddenPipelineNames.length;
},
inverse: function(allPipelineNames) {
this._cleanCookie();
return new Whitelist(arrayMinus(allPipelineNames, this.hiddenPipelineNames));
},
toString: function() {
return 'blacklist';
}
});
function arrayMinus(array1, array2) {
var result = $A();
for(var i = 0; i < array1.length; i++) {
if(!array2.include(array1[i])) {
result.push(array1[i]);
}
}
return result;
}
var Whitelist = Class.create({
initialize: function(pipelineNames){
if(pipelineNames) {
this.shownPipelineNames = pipelineNames;
} else {
this._restoreShownPipelineNames();
}
},
_restoreShownPipelineNames: function(){
var name_array_string = getCookie('shown-pipeline-names');
if(name_array_string && !name_array_string.blank()){
this.shownPipelineNames = $A(name_array_string.split(',')).invoke('strip');
} else {
this.shownPipelineNames = $A();
}
},
persist: function() {
setCookie('pipeline-selector-strategy', 'whitelist');
setCookie('shown-pipeline-names', this.shownPipelineNames.join(','));
},
_cleanCookie: function(){
deleteCookie('shown-pipeline-names');
},
isPipelineHidden: function(pipelineName) {
return !this.shownPipelineNames.include(pipelineName);
},
showPipeline: function(pipelineName) {
this.shownPipelineNames.push(pipelineName);
},
showPipelines: function(pipelineNames) {
for(var i = 0; i < pipelineNames.length; i++) {
this.showPipeline(pipelineNames[i]);
}
},
hidePipeline: function(pipelineName) {
this.shownPipelineNames = this.shownPipelineNames.without(pipelineName);
},
hidePipelines: function(pipelineNames) {
var shouldShow = $A();
for(var i = 0; i < this.shownPipelineNames.length; i++) {
if(!pipelineNames.include(this.shownPipelineNames[i])){
shouldShow.push(this.shownPipelineNames[i]);
}
}
this.shownPipelineNames = shouldShow;
},
hasNoHiddenPipeline: function(totalPipelinesCount) {
return this.shownPipelineNames.length == totalPipelinesCount;
},
hasNoShownPipeline: function() {
return this.shownPipelineNames.length == 0;
},
pipelinesCount: function() {
return this.shownPipelineNames.length;
},
inverse: function(allPipelineNames) {
this._cleanCookie();
return new Blacklist(arrayMinus(allPipelineNames, this.shownPipelineNames));
},
toString: function() {
return 'whitelist';
}
});
var PipelineSelectorCookie = Class.create({
initialize: function(){
if(getCookie('pipeline-selector-strategy') == 'whitelist'){
this.strategy = new Whitelist();
} else {
this.strategy = new Blacklist();
}
},
persist: function(allPipelineNames){
if(allPipelineNames) {
var alwasyUseBlack = allPipelineNames.length < 70;
if(alwasyUseBlack){
if(this.strategy.toString() == 'whitelist'){
this.strategy = this.strategy.inverse(allPipelineNames);
}
} else if (this.strategy.pipelinesCount() > allPipelineNames.length/2){
this.strategy = this.strategy.inverse(allPipelineNames);
}
}
this.strategy.persist();
},
isPipelineHidden: function(pipelineName) {
return this.strategy.isPipelineHidden(pipelineName);
},
showPipeline: function(pipelineName) {
this.strategy.showPipeline(pipelineName);
},
showPipelines: function(pipelineNames) {
this.strategy.showPipelines(pipelineNames);
},
hidePipeline: function(pipelineName) {
this.strategy.hidePipeline(pipelineName);
},
hidePipelines: function(pipelineNames) {
this.strategy.hidePipelines(pipelineNames);
},
hasNoHiddenPipeline: function(totalPipelinesCount) {
return this.strategy.hasNoHiddenPipeline(totalPipelinesCount);
},
hasNoShownPipeline: function(totalPipelinesCount) {
return this.strategy.hasNoShownPipeline(totalPipelinesCount);
}
});
|
hidePipeline: function(pipelineName) {
|
cookie-utils.go
|
package easyequities
import (
"encoding/base64"
"encoding/json"
)
func encodeToAuthToken(cookies map[string]string) (string, error) {
jsonObj, err := json.Marshal(cookies)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(jsonObj), nil
}
func decodeFromAuthToken(authToken string) (map[string]string, error) {
jsonObj, err := base64.StdEncoding.DecodeString(authToken)
if err != nil {
return nil, err
}
var cookies map[string]string
err = json.Unmarshal(jsonObj, &cookies)
if err != nil
|
return cookies, nil
}
|
{
return nil, err
}
|
0026_auto_20170819_0854.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-18 23:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
|
dependencies = [
('user', '0025_remove_user_user_todo'),
]
operations = [
migrations.RenameField(
model_name='skill',
old_name='user_todo',
new_name='skilltodo',
),
migrations.RenameField(
model_name='skill',
old_name='frame_user',
new_name='userskill',
),
]
|
|
install.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import os
import shutil
import sys
import textwrap
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import spack.build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.fetch_strategy
import spack.monitor
import spack.paths
import spack.report
from spack.error import SpackError
from spack.installer import PackageInstaller
description = "build and install packages"
section = "build"
level = "short"
def update_kwargs_from_args(args, kwargs):
"""Parse cli arguments and construct a dictionary
that will be passed to the package installer."""
kwargs.update({
'fail_fast': args.fail_fast,
'keep_prefix': args.keep_prefix,
'keep_stage': args.keep_stage,
'restage': not args.dont_restage,
'install_source': args.install_source,
'verbose': args.verbose,
'fake': args.fake,
'dirty': args.dirty,
'use_cache': args.use_cache,
'cache_only': args.cache_only,
'include_build_deps': args.include_build_deps,
'explicit': True, # Always true for install command
'stop_at': args.until,
'unsigned': args.unsigned,
'full_hash_match': args.full_hash_match,
})
kwargs.update({
'install_deps': ('dependencies' in args.things_to_install),
'install_package': ('package' in args.things_to_install)
})
if hasattr(args, 'setup'):
setups = set()
for arglist_s in args.setup:
for arg in [x.strip() for x in arglist_s.split(',')]:
setups.add(arg)
kwargs['setup'] = setups
tty.msg('Setup={0}'.format(kwargs['setup']))
def setup_parser(subparser):
subparser.add_argument(
'--only',
default='package,dependencies',
dest='things_to_install',
choices=['package', 'dependencies'],
help="""select the mode of installation.
the default is to install the package along with all its dependencies.
alternatively one can decide to install only the package or only
the dependencies"""
)
subparser.add_argument(
'-u', '--until', type=str, dest='until', default=None,
help="phase to stop after when installing (default None)")
arguments.add_common_arguments(subparser, ['jobs'])
subparser.add_argument(
'--overwrite', action='store_true',
help="reinstall an existing spec, even if it has dependents")
subparser.add_argument(
'--fail-fast', action='store_true',
help="stop all builds if any build fails (default is best effort)")
subparser.add_argument(
'--keep-prefix', action='store_true',
help="don't remove the install prefix if installation fails")
subparser.add_argument(
'--keep-stage', action='store_true',
help="don't remove the build stage if installation succeeds")
subparser.add_argument(
'--dont-restage', action='store_true',
help="if a partial install is detected, don't delete prior state")
cache_group = subparser.add_mutually_exclusive_group()
cache_group.add_argument(
'--use-cache', action='store_true', dest='use_cache', default=True,
help="check for pre-built Spack packages in mirrors (default)")
cache_group.add_argument(
'--no-cache', action='store_false', dest='use_cache', default=True,
help="do not check for pre-built Spack packages in mirrors")
cache_group.add_argument(
'--cache-only', action='store_true', dest='cache_only', default=False,
help="only install package from binary mirrors")
monitor_group = spack.monitor.get_monitor_group(subparser) # noqa
subparser.add_argument(
'--include-build-deps', action='store_true', dest='include_build_deps',
default=False, help="""include build deps when installing from cache,
which is useful for CI pipeline troubleshooting""")
subparser.add_argument(
'--no-check-signature', action='store_true',
dest='unsigned', default=False,
help="do not check signatures of binary packages")
subparser.add_argument(
'--require-full-hash-match', action='store_true',
dest='full_hash_match', default=False, help="""when installing from
binary mirrors, do not install binary package unless the full hash of the
remote spec matches that of the local spec""")
subparser.add_argument(
'--show-log-on-error', action='store_true',
help="print full build log to stderr if build fails")
subparser.add_argument(
'--source', action='store_true', dest='install_source',
help="install source files in prefix")
arguments.add_common_arguments(subparser, ['no_checksum', 'deprecated'])
subparser.add_argument(
'-v', '--verbose', action='store_true',
help="display verbose build output while installing")
subparser.add_argument(
'--fake', action='store_true',
help="fake install for debug purposes.")
subparser.add_argument(
'--only-concrete', action='store_true', default=False,
help='(with environment) only install already concretized specs')
subparser.add_argument(
'--no-add', action='store_true', default=False,
help="""(with environment) only install specs provided as argument
if they are already in the concretized environment""")
subparser.add_argument(
'-f', '--file', action='append', default=[],
dest='specfiles', metavar='SPEC_YAML_FILE',
help="install from file. Read specs to install from .yaml files")
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
testing = subparser.add_mutually_exclusive_group()
testing.add_argument(
'--test', default=None,
choices=['root', 'all'],
help="""If 'root' is chosen, run package tests during
installation for top-level packages (but skip tests for dependencies).
if 'all' is chosen, run package tests during installation for all
packages. If neither are chosen, don't run tests for any packages."""
)
testing.add_argument(
'--run-tests', action='store_true',
help='run package tests during installation (same as --test=all)'
)
subparser.add_argument(
'--log-format',
default=None,
choices=spack.report.valid_formats,
help="format to be used for log files"
)
subparser.add_argument(
'--log-file',
default=None,
help="filename for the log file. if not passed a default will be used"
)
subparser.add_argument(
'--help-cdash',
action='store_true',
help="Show usage instructions for CDash reporting"
)
arguments.add_cdash_args(subparser, False)
arguments.add_common_arguments(subparser, ['yes_to_all', 'spec'])
def default_log_file(spec):
"""Computes the default filename for the log file and creates
the corresponding directory if not present
"""
fmt = 'test-{x.name}-{x.version}-{hash}.xml'
basename = fmt.format(x=spec, hash=spec.dag_hash())
dirname = fs.os.path.join(spack.paths.reports_path, 'junit')
fs.mkdirp(dirname)
return fs.os.path.join(dirname, basename)
def install_specs(cli_args, kwargs, specs):
"""Do the actual installation.
Args:
cli_args (argparse.Namespace): argparse namespace with command arguments
kwargs (dict): keyword arguments
specs (list): list of (abstract, concrete) spec tuples
"""
# handle active environment, if any
env = ev.get_env(cli_args, 'install')
try:
if env:
specs_to_install = []
specs_to_add = []
for abstract, concrete in specs:
# This won't find specs added to the env since last
# concretize, therefore should we consider enforcing
# concretization of the env before allowing to install
# specs?
m_spec = env.matching_spec(abstract)
# If there is any ambiguity in the above call to matching_spec
# (i.e. if more than one spec in the environment matches), then
# SpackEnvironmentError is rasied, with a message listing the
# the matches. Getting to this point means there were either
# no matches or exactly one match.
if not m_spec:
tty.debug('{0} matched nothing in the env'.format(
abstract.name))
# no matches in the env
if cli_args.no_add:
msg = ('You asked to install {0} without adding it ' +
'(--no-add), but no such spec exists in ' +
'environment').format(abstract.name)
tty.die(msg)
else:
tty.debug('adding {0} as a root'.format(abstract.name))
specs_to_add.append((abstract, concrete))
continue
tty.debug('exactly one match for {0} in env -> {1}'.format(
m_spec.name, m_spec.dag_hash()))
if m_spec in env.roots() or cli_args.no_add:
# either the single match is a root spec (and --no-add is
# the default for roots) or --no-add was stated explictly
tty.debug('just install {0}'.format(m_spec.name))
specs_to_install.append(m_spec)
else:
# the single match is not a root (i.e. it's a dependency),
# and --no-add was not specified, so we'll add it as a
# root before installing
tty.debug('add {0} then install it'.format(m_spec.name))
specs_to_add.append((abstract, concrete))
if specs_to_add:
tty.debug('Adding the following specs as roots:')
for abstract, concrete in specs_to_add:
tty.debug(' {0}'.format(abstract.name))
with env.write_transaction():
specs_to_install.append(
env.concretize_and_add(abstract, concrete))
env.write(regenerate=False)
# Install the validated list of cli specs
if specs_to_install:
tty.debug('Installing the following cli specs:')
for s in specs_to_install:
tty.debug(' {0}'.format(s.name))
env.install_specs(specs_to_install, args=cli_args, **kwargs)
else:
installs = [(concrete.package, kwargs) for _, concrete in specs]
builder = PackageInstaller(installs)
builder.install()
except spack.build_environment.InstallError as e:
if cli_args.show_log_on_error:
e.print_context()
if not os.path.exists(e.pkg.build_log_path):
tty.error("'spack install' created no log.")
else:
sys.stderr.write('Full build log:\n')
with open(e.pkg.build_log_path) as log:
shutil.copyfileobj(log, sys.stderr)
raise
def install(parser, args, **kwargs):
if args.help_cdash:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
|
epilog=textwrap.dedent('''\
environment variables:
SPACK_CDASH_AUTH_TOKEN
authentication token to present to CDash
'''))
arguments.add_cdash_args(parser, True)
parser.print_help()
return
# The user wants to monitor builds using github.com/spack/spack-monitor
if args.use_monitor:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
disable_auth=args.monitor_disable_auth,
tags=args.monitor_tags,
save_local=args.monitor_save_local,
)
reporter = spack.report.collect_info(
spack.package.PackageInstaller, '_install_task', args.log_format, args)
if args.log_file:
reporter.filename = args.log_file
if args.run_tests:
tty.warn("Deprecated option: --run-tests: use --test=all instead")
def get_tests(specs):
if args.test == 'all' or args.run_tests:
return True
elif args.test == 'root':
return [spec.name for spec in specs]
else:
return False
if not args.spec and not args.specfiles:
# if there are no args but an active environment
# then install the packages from it.
env = ev.get_env(args, 'install')
if env:
tests = get_tests(env.user_specs)
kwargs['tests'] = tests
if not args.only_concrete:
with env.write_transaction():
concretized_specs = env.concretize(tests=tests)
ev.display_specs(concretized_specs)
# save view regeneration for later, so that we only do it
# once, as it can be slow.
env.write(regenerate=False)
specs = env.all_specs()
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
# Tell the monitor about the specs
if args.use_monitor and specs:
monitor.new_configuration(specs)
tty.msg("Installing environment {0}".format(env.name))
with reporter('build'):
env.install_all(args, **kwargs)
tty.debug("Regenerating environment views for {0}"
.format(env.name))
with env.write_transaction():
# write env to trigger view generation and modulefile
# generation
env.write()
return
else:
msg = "install requires a package argument or active environment"
if 'spack.yaml' in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
# have intended to use that
msg += "\n\n"
msg += "Did you mean to install using the `spack.yaml`"
msg += " in this directory? Try: \n"
msg += " spack env activate .\n"
msg += " spack install\n"
msg += " OR\n"
msg += " spack --env . install"
tty.die(msg)
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
if args.deprecated:
spack.config.set('config:deprecated', True, scope='command_line')
# Parse cli arguments and construct a dictionary
# that will be passed to the package installer
update_kwargs_from_args(args, kwargs)
# 1. Abstract specs from cli
abstract_specs = spack.cmd.parse_specs(args.spec)
tests = get_tests(abstract_specs)
kwargs['tests'] = tests
try:
specs = spack.cmd.parse_specs(
args.spec, concretize=True, tests=tests)
except SpackError as e:
tty.debug(e)
reporter.concretization_report(e.message)
raise
# 2. Concrete specs from yaml files
for file in args.specfiles:
with open(file, 'r') as f:
s = spack.spec.Spec.from_yaml(f)
concretized = s.concretized()
if concretized.dag_hash() != s.dag_hash():
msg = 'skipped invalid file "{0}". '
msg += 'The file does not contain a concrete spec.'
tty.warn(msg.format(file))
continue
abstract_specs.append(s)
specs.append(concretized)
if len(specs) == 0:
tty.die('The `spack install` command requires a spec to install.')
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
with reporter('build'):
if args.overwrite:
installed = list(filter(lambda x: x,
map(spack.store.db.query_one, specs)))
if not args.yes_to_all:
display_args = {
'long': True,
'show_flags': True,
'variants': True
}
if installed:
tty.msg('The following package specs will be '
'reinstalled:\n')
spack.cmd.display_specs(installed, **display_args)
not_installed = list(filter(lambda x: x not in installed,
specs))
if not_installed:
tty.msg('The following package specs are not installed and'
' the --overwrite flag was given. The package spec'
' will be newly installed:\n')
spack.cmd.display_specs(not_installed, **display_args)
# We have some specs, so one of the above must have been true
answer = tty.get_yes_or_no(
'Do you want to proceed?', default=False
)
if not answer:
tty.die('Reinstallation aborted.')
# overwrite all concrete explicit specs from this build
kwargs['overwrite'] = [spec.dag_hash() for spec in specs]
# Update install_args with the monitor args, needed for build task
kwargs.update({
"monitor_disable_auth": args.monitor_disable_auth,
"monitor_keep_going": args.monitor_keep_going,
"monitor_host": args.monitor_host,
"use_monitor": args.use_monitor,
"monitor_prefix": args.monitor_prefix,
})
# If we are using the monitor, we send configs. and create build
# The full_hash is the main package id, the build_hash for others
if args.use_monitor and specs:
monitor.new_configuration(specs)
install_specs(args, kwargs, zip(abstract_specs, specs))
| |
discordevents.go
|
package bot
import (
"runtime/debug"
"sync/atomic"
"time"
"emperror.dev/errors"
"github.com/Nsadow311/stranger/bot/eventsystem"
"github.com/Nsadow311/stranger/bot/joinedguildsupdater"
"github.com/Nsadow311/stranger/bot/models"
"github.com/Nsadow311/stranger/common"
"github.com/Nsadow311/stranger/common/featureflags"
"github.com/Nsadow311/stranger/common/pubsub"
"github.com/jonas747/discordgo/v2"
"github.com/mediocregopher/radix/v3"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
"github.com/volatiletech/sqlboiler/boil"
)
func addBotHandlers() {
eventsystem.AddHandlerFirstLegacy(BotPlugin, HandleReady, eventsystem.EventReady)
eventsystem.AddHandlerFirstLegacy(BotPlugin, HandleMessageCreateUpdateFirst, eventsystem.EventMessageCreate, eventsystem.EventMessageUpdate)
eventsystem.AddHandlerSecondLegacy(BotPlugin, StateHandler, eventsystem.EventAll)
eventsystem.AddHandlerAsyncLastLegacy(BotPlugin, EventLogger.handleEvent, eventsystem.EventAll)
eventsystem.AddHandlerAsyncLast(BotPlugin, HandleGuildCreate, eventsystem.EventGuildCreate)
eventsystem.AddHandlerAsyncLast(BotPlugin, HandleGuildDelete, eventsystem.EventGuildDelete)
eventsystem.AddHandlerAsyncLast(BotPlugin, HandleGuildUpdate, eventsystem.EventGuildUpdate)
eventsystem.AddHandlerAsyncLast(BotPlugin, handleInvalidateCacheEvent,
eventsystem.EventGuildRoleCreate,
eventsystem.EventGuildRoleUpdate,
eventsystem.EventGuildRoleDelete,
eventsystem.EventChannelCreate,
eventsystem.EventChannelUpdate,
eventsystem.EventChannelDelete,
eventsystem.EventGuildMemberUpdate)
eventsystem.AddHandlerAsyncLast(BotPlugin, HandleGuildMemberAdd, eventsystem.EventGuildMemberAdd)
eventsystem.AddHandlerAsyncLast(BotPlugin, HandleGuildMemberRemove, eventsystem.EventGuildMemberRemove)
eventsystem.AddHandlerAsyncLastLegacy(BotPlugin, HandleGuildMembersChunk, eventsystem.EventGuildMembersChunk)
eventsystem.AddHandlerAsyncLastLegacy(BotPlugin, HandleReactionAdd, eventsystem.EventMessageReactionAdd)
eventsystem.AddHandlerAsyncLastLegacy(BotPlugin, HandleMessageCreate, eventsystem.EventMessageCreate)
eventsystem.AddHandlerAsyncLastLegacy(BotPlugin, HandleRatelimit, eventsystem.EventRateLimit)
eventsystem.AddHandlerAsyncLastLegacy(BotPlugin, ReadyTracker.handleReadyOrResume, eventsystem.EventReady, eventsystem.EventResumed)
eventsystem.AddHandlerAsyncLastLegacy(BotPlugin, handleResumed, eventsystem.EventResumed)
}
var (
connectedGuildsCache = common.CacheSet.RegisterSlot("bot_connected_guilds", func(_ interface{}) (interface{}, error) {
var listedServers []int64
err := common.RedisPool.Do(radix.Cmd(&listedServers, "SMEMBERS", "connected_guilds"))
return listedServers, err
}, 0)
)
func HandleReady(data *eventsystem.EventData) {
evt := data.Ready()
commonEventsTotal.With(prometheus.Labels{"type": "Ready"}).Inc()
RefreshStatus(ContextSession(data.Context()))
// We pass the common.Session to the command system and that needs the user from the state
common.BotSession.State.Lock()
ready := discordgo.Ready{
Version: evt.Version,
SessionID: evt.SessionID,
User: evt.User,
}
common.BotSession.State.Ready = ready
common.BotSession.State.Unlock()
var listedServers []int64
if listedServersI, err := connectedGuildsCache.Get(0); err == nil {
listedServers = listedServersI.([]int64)
} else {
logger.WithError(err).Error("Failed retrieving connected servers")
}
numShards := ShardManager.GetNumShards()
OUTER:
for _, v := range listedServers {
shard := (v >> 22) % int64(numShards)
if int(shard) != data.Session.ShardID {
continue
}
for _, readyGuild := range evt.Guilds {
if readyGuild.ID == v {
continue OUTER
}
}
logger.Info("Left server while bot was down: ", v)
go guildRemoved(v)
}
guilds := make([]int64, len(evt.Guilds))
for i, v := range evt.Guilds {
guilds[i] = v.ID
}
featureflags.BatchInitCache(guilds)
}
var guildJoinHandler = joinedguildsupdater.NewUpdater()
var metricsJoinedGuilds = promauto.NewCounter(prometheus.CounterOpts{
Name: "yagpdb_joined_guilds",
Help: "Guilds yagpdb newly joined",
})
var commonEventsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "bot_events_total",
Help: "Common bot events",
}, []string{"type"})
func HandleGuildCreate(evt *eventsystem.EventData) (retry bool, err error) {
g := evt.GuildCreate()
logger.WithFields(logrus.Fields{
"g_name": g.Name,
"guild": g.ID,
}).Debug("Joined guild")
saddRes := 0
isBanned := false
err = common.RedisPool.Do(radix.Pipeline(
radix.Cmd(&saddRes, "SADD", "connected_guilds", discordgo.StrID(g.ID)),
radix.Cmd(&isBanned, "SISMEMBER", "banned_servers", discordgo.StrID(g.ID)),
))
if err != nil {
return true, errors.WithStackIf(err)
}
// check if this server is new
if saddRes > 0 {
logger.WithField("g_name", g.Name).WithField("guild", g.ID).Info("Joined new guild!")
go eventsystem.EmitEvent(eventsystem.NewEventData(nil, eventsystem.EventNewGuild, g), eventsystem.EventNewGuild)
metricsJoinedGuilds.Inc()
commonEventsTotal.With(prometheus.Labels{"type": "Guild Create"}).Inc()
}
// check if the server is banned from using the bot
if isBanned {
logger.WithField("guild", g.ID).Info("Banned server tried to add bot back")
common.BotSession.ChannelMessageSend(g.ID, "This server is banned from using this bot. Join the support server for more info.")
err = common.BotSession.GuildLeave(g.ID)
if err != nil {
return CheckDiscordErrRetry(err), errors.WithStackIf(err)
}
}
guildJoinHandler.Incoming <- evt
return false, nil
}
func HandleGuildDelete(evt *eventsystem.EventData) (retry bool, err error) {
if evt.GuildDelete().Unavailable {
// Just a guild outage
return
}
logger.WithFields(logrus.Fields{
"guild": evt.GuildDelete().ID,
}).Info("Left guild")
go guildRemoved(evt.GuildDelete().ID)
return false, nil
}
func HandleGuildMemberAdd(evt *eventsystem.EventData) (retry bool, err error) {
// ma := evt.GuildMemberAdd()
// failedUsersCache.Delete(discordgo.StrID(ma.GuildID) + ":" + discordgo.StrID(ma.User.ID))
guildJoinHandler.Incoming <- evt
return false, nil
}
func HandleGuildMemberRemove(evt *eventsystem.EventData) (retry bool, err error) {
guildJoinHandler.Incoming <- evt
return false, nil
}
// StateHandler updates the world state
// use AddHandlerBefore to add handler before this one, otherwise they will alwyas be after
func StateHandler(evt *eventsystem.EventData) {
stateTracker.HandleEvent(evt.Session, evt.EvtInterface)
// State.HandleEvent(ContextSession(evt.Context()), evt.EvtInterface)
}
func HandleGuildUpdate(evt *eventsystem.EventData) (retry bool, err error)
|
func handleInvalidateCacheEvent(evt *eventsystem.EventData) (bool, error) {
if evt.GS == nil {
return false, nil
}
userID := int64(0)
if evt.Type == eventsystem.EventGuildMemberUpdate {
userID = evt.GuildMemberUpdate().User.ID
}
InvalidateCache(evt.GS.ID, userID)
return false, nil
}
func InvalidateCache(guildID, userID int64) {
if userID != 0 {
if err := common.RedisPool.Do(radix.Cmd(nil, "DEL", common.CacheKeyPrefix+discordgo.StrID(userID)+":guilds")); err != nil {
logger.WithField("guild", guildID).WithField("user", userID).WithError(err).Error("failed invalidating user guilds cache")
}
}
if guildID != 0 {
if err := common.RedisPool.Do(radix.Cmd(nil, "DEL", common.CacheKeyPrefix+common.KeyGuild(guildID))); err != nil {
logger.WithField("guild", guildID).WithField("user", userID).WithError(err).Error("failed invalidating guild cache")
}
if err := common.RedisPool.Do(radix.Cmd(nil, "DEL", common.CacheKeyPrefix+common.KeyGuildChannels(guildID))); err != nil {
logger.WithField("guild", guildID).WithField("user", userID).WithError(err).Error("failed invalidating guild channels cache")
}
}
}
func ConcurrentEventHandler(inner eventsystem.HandlerFuncLegacy) eventsystem.HandlerFuncLegacy {
return eventsystem.HandlerFuncLegacy(func(evt *eventsystem.EventData) {
go func() {
defer func() {
if err := recover(); err != nil {
stack := string(debug.Stack())
logger.WithField(logrus.ErrorKey, err).WithField("evt", evt.Type.String()).Error("Recovered from panic in (concurrent) event handler\n" + stack)
}
}()
inner(evt)
}()
})
}
func LimitedConcurrentEventHandler(inner eventsystem.HandlerFuncLegacy, limit int64, sleepDur time.Duration) eventsystem.HandlerFuncLegacy {
counter := new(int64)
return eventsystem.HandlerFuncLegacy(func(evt *eventsystem.EventData) {
go func() {
defer func() {
atomic.AddInt64(counter, -1)
if err := recover(); err != nil {
stack := string(debug.Stack())
logger.WithField(logrus.ErrorKey, err).WithField("evt", evt.Type.String()).Error("Recovered from panic in (concurrent) event handler\n" + stack)
}
}()
for {
// spin lock
if atomic.AddInt64(counter, 1) <= limit {
break
} else {
atomic.AddInt64(counter, -1)
time.Sleep(sleepDur)
}
}
inner(evt)
}()
})
}
func HandleReactionAdd(evt *eventsystem.EventData) {
ra := evt.MessageReactionAdd()
if ra.GuildID != 0 {
return
}
if ra.UserID == common.BotUser.ID {
return
}
err := pubsub.Publish("dm_reaction", -1, ra)
if err != nil {
logger.WithError(err).Error("failed publishing dm reaction")
}
}
func HandleMessageCreate(evt *eventsystem.EventData) {
commonEventsTotal.With(prometheus.Labels{"type": "Message Create"}).Inc()
mc := evt.MessageCreate()
if mc.GuildID != 0 {
return
}
if mc.Author == nil || mc.Author.ID == common.BotUser.ID {
return
}
err := pubsub.Publish("dm_message", -1, mc)
if err != nil {
logger.WithError(err).Error("failed publishing dm message")
}
}
// HandleMessageCreateUpdateFirst transforms the message events a little to make them easier to deal with
// Message.Member.User is null from the api, so we assign it to Message.Author
func HandleMessageCreateUpdateFirst(evt *eventsystem.EventData) {
if evt.GS == nil {
return
}
if evt.Type == eventsystem.EventMessageCreate {
msg := evt.MessageCreate()
if !IsNormalUserMessage(msg.Message) {
return
}
if msg.Member != nil {
msg.Member.User = msg.Author
msg.Member.GuildID = msg.GuildID
}
} else {
edit := evt.MessageUpdate()
if !IsNormalUserMessage(edit.Message) {
return
}
edit.Member.User = edit.Author
edit.Member.GuildID = edit.GuildID
}
}
func HandleRatelimit(evt *eventsystem.EventData) {
rl := evt.RateLimit()
if !rl.TooManyRequests.Global {
return
}
pubsub.PublishRatelimit(rl)
}
func handleResumed(evt *eventsystem.EventData) {
commonEventsTotal.With(prometheus.Labels{"type": "Resumed"}).Inc()
}
|
{
InvalidateCache(evt.GuildUpdate().Guild.ID, 0)
g := evt.GuildUpdate().Guild
gm := &models.JoinedGuild{
ID: g.ID,
MemberCount: int64(g.MemberCount),
OwnerID: g.OwnerID,
JoinedAt: time.Now(),
Name: g.Name,
Avatar: g.Icon,
}
err = gm.Upsert(evt.Context(), common.PQ, true, []string{"id"}, boil.Whitelist("name", "avatar", "owner_id"), boil.Infer())
if err != nil {
return true, errors.WithStackIf(err)
}
return false, nil
}
|
pncounter-test.js
|
/*
* Copyright 2019 Lightbend Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const should = require("chai").should();
const Long = require("long");
const PNCounter = require("../../src/crdts/pncounter");
const protobufHelper = require("../../src/protobuf-helper");
const CrdtDelta = protobufHelper.moduleRoot.cloudstate.crdt.CrdtDelta;
function
|
(delta) {
return CrdtDelta.decode(CrdtDelta.encode(delta).finish());
}
describe("PNCounter", () => {
it("should have a value of zero when instantiated", () => {
const counter = new PNCounter();
counter.value.should.equal(0);
should.equal(counter.getAndResetDelta(), null);
});
it("should reflect a delta update", () => {
const counter = new PNCounter();
counter.applyDelta(roundTripDelta({
pncounter: {
change: 10
}
}));
counter.value.should.equal(10);
// Try incrementing it again
counter.applyDelta(roundTripDelta({
pncounter: {
change: -3
}
}));
counter.value.should.equal(7);
});
it("should generate deltas", () => {
const counter = new PNCounter();
counter.increment(10);
counter.value.should.equal(10);
roundTripDelta(counter.getAndResetDelta()).pncounter.change.toNumber().should.equal(10);
should.equal(counter.getAndResetDelta(), null);
counter.decrement(3);
counter.value.should.equal(7);
counter.decrement(4);
counter.value.should.equal(3);
roundTripDelta(counter.getAndResetDelta()).pncounter.change.toNumber().should.equal(-7);
should.equal(counter.getAndResetDelta(), null);
});
it("should support long values", () => {
const impossibleDouble = Long.ZERO.add(Number.MAX_SAFE_INTEGER).add(1);
const counter = new PNCounter();
counter.increment(Number.MAX_SAFE_INTEGER);
counter.increment(1);
counter.longValue.should.eql(impossibleDouble);
roundTripDelta(counter.getAndResetDelta()).pncounter.change.should.eql(impossibleDouble);
});
it("should support incrementing by long values", () => {
const impossibleDouble = Long.ZERO.add(Number.MAX_SAFE_INTEGER).add(1);
const counter = new PNCounter();
counter.increment(impossibleDouble);
counter.longValue.should.eql(impossibleDouble);
roundTripDelta(counter.getAndResetDelta()).pncounter.change.should.eql(impossibleDouble);
});
it("should support empty initial deltas (for ORMap added)", () => {
const counter = new PNCounter();
counter.value.should.equal(0);
should.equal(counter.getAndResetDelta(), null);
roundTripDelta(counter.getAndResetDelta(/* initial = */ true)).pncounter.change.toNumber().should.equal(0);
});
});
|
roundTripDelta
|
main.go
|
package main
import (
"flag"
"log"
"time"
"github.com/weaveworks/flagger/pkg/loadtester"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/signals"
"go.uber.org/zap"
)
var VERSION = "0.13.0"
var (
logLevel string
port string
timeout time.Duration
zapReplaceGlobals bool
zapEncoding string
)
func init() {
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "9090", "Port to listen on.")
flag.DurationVar(&timeout, "timeout", time.Hour, "Load test exec timeout.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
}
func main()
|
{
flag.Parse()
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
if zapReplaceGlobals {
zap.ReplaceGlobals(logger.Desugar())
}
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
taskRunner := loadtester.NewTaskRunner(logger, timeout)
go taskRunner.Start(100*time.Millisecond, stopCh)
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
gateStorage := loadtester.NewGateStorage("in-memory")
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, stopCh)
}
|
|
asset_service_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/services/asset_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.resources import asset_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/services/asset_service.proto',
package='google.ads.googleads.v3.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v3.servicesB\021AssetServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V3.Services\312\002 Google\\Ads\\GoogleAds\\V3\\Services\352\002$Google::Ads::GoogleAds::V3::Services'),
serialized_pb=_b('\n:google/ads/googleads_v3/proto/services/asset_service.proto\x12 google.ads.googleads.v3.services\x1a\x33google/ads/googleads_v3/proto/resources/asset.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"P\n\x0fGetAssetRequest\x12=\n\rresource_name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1egoogleads.googleapis.com/Asset\"z\n\x13MutateAssetsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12I\n\noperations\x18\x02 \x03(\x0b\x32\x30.google.ads.googleads.v3.services.AssetOperationB\x03\xe0\x41\x02\"Y\n\x0e\x41ssetOperation\x12:\n\x06\x63reate\x18\x01 \x01(\x0b\x32(.google.ads.googleads.v3.resources.AssetH\x00\x42\x0b\n\toperation\"\\\n\x14MutateAssetsResponse\x12\x44\n\x07results\x18\x02 \x03(\x0b\x32\x33.google.ads.googleads.v3.services.MutateAssetResult\"*\n\x11MutateAssetResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xa8\x03\n\x0c\x41ssetService\x12\xa9\x01\n\x08GetAsset\x12\x31.google.ads.googleads.v3.services.GetAssetRequest\x1a(.google.ads.googleads.v3.resources.Asset\"@\x82\xd3\xe4\x93\x02*\x12(/v3/{resource_name=customers/*/assets/*}\xda\x41\rresource_name\x12\xce\x01\n\x0cMutateAssets\x12\x35.google.ads.googleads.v3.services.MutateAssetsRequest\x1a\x36.google.ads.googleads.v3.services.MutateAssetsResponse\"O\x82\xd3\xe4\x93\x02\x30\"+/v3/customers/{customer_id=*}/assets:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xf8\x01\n$com.google.ads.googleads.v3.servicesB\x11\x41ssetServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V3.Services\xca\x02 Google\\Ads\\GoogleAds\\V3\\Services\xea\x02$Google::Ads::GoogleAds::V3::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETASSETREQUEST = _descriptor.Descriptor(
name='GetAssetRequest',
full_name='google.ads.googleads.v3.services.GetAssetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.GetAssetRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A \n\036googleads.googleapis.com/Asset'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=344,
)
_MUTATEASSETSREQUEST = _descriptor.Descriptor(
name='MutateAssetsRequest',
full_name='google.ads.googleads.v3.services.MutateAssetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=346,
serialized_end=468,
)
_ASSETOPERATION = _descriptor.Descriptor(
name='AssetOperation',
full_name='google.ads.googleads.v3.services.AssetOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v3.services.AssetOperation.create', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v3.services.AssetOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=470,
serialized_end=559,
)
_MUTATEASSETSRESPONSE = _descriptor.Descriptor(
name='MutateAssetsResponse',
full_name='google.ads.googleads.v3.services.MutateAssetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v3.services.MutateAssetsResponse.results', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=653,
)
_MUTATEASSETRESULT = _descriptor.Descriptor(
name='MutateAssetResult',
full_name='google.ads.googleads.v3.services.MutateAssetResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.MutateAssetResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=655,
serialized_end=697,
)
_MUTATEASSETSREQUEST.fields_by_name['operations'].message_type = _ASSETOPERATION
_ASSETOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET
_ASSETOPERATION.oneofs_by_name['operation'].fields.append(
_ASSETOPERATION.fields_by_name['create'])
_ASSETOPERATION.fields_by_name['create'].containing_oneof = _ASSETOPERATION.oneofs_by_name['operation']
_MUTATEASSETSRESPONSE.fields_by_name['results'].message_type = _MUTATEASSETRESULT
DESCRIPTOR.message_types_by_name['GetAssetRequest'] = _GETASSETREQUEST
DESCRIPTOR.message_types_by_name['MutateAssetsRequest'] = _MUTATEASSETSREQUEST
DESCRIPTOR.message_types_by_name['AssetOperation'] = _ASSETOPERATION
DESCRIPTOR.message_types_by_name['MutateAssetsResponse'] = _MUTATEASSETSRESPONSE
DESCRIPTOR.message_types_by_name['MutateAssetResult'] = _MUTATEASSETRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetRequest = _reflection.GeneratedProtocolMessageType('GetAssetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETASSETREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.GetAsset][google.ads.googleads.v3.services.AssetService.GetAsset]
Attributes:
resource_name:
Required. The resource name of the asset to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.GetAssetRequest)
))
_sym_db.RegisterMessage(GetAssetRequest)
MutateAssetsRequest = _reflection.GeneratedProtocolMessageType('MutateAssetsRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.MutateAssets][google.ads.googleads.v3.services.AssetService.MutateAssets]
Attributes:
customer_id:
Required. The ID of the customer whose assets are being
modified.
operations:
Required. The list of operations to perform on individual
assets.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetsRequest)
))
_sym_db.RegisterMessage(MutateAssetsRequest)
AssetOperation = _reflection.GeneratedProtocolMessageType('AssetOperation', (_message.Message,), dict(
DESCRIPTOR = _ASSETOPERATION,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """A single operation to create an asset. Supported asset types are
YoutubeVideoAsset, MediaBundleAsset, ImageAsset, and LeadFormAsset.
TextAsset should be created with Ad inline.
Attributes:
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
asset.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.AssetOperation)
))
_sym_db.RegisterMessage(AssetOperation)
MutateAssetsResponse = _reflection.GeneratedProtocolMessageType('MutateAssetsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSRESPONSE,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Response message for an asset mutate.
Attributes:
results:
All results for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetsResponse)
))
_sym_db.RegisterMessage(MutateAssetsResponse)
MutateAssetResult = _reflection.GeneratedProtocolMessageType('MutateAssetResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETRESULT,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """The result for the asset mutate.
Attributes:
resource_name:
The resource name returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetResult)
))
_sym_db.RegisterMessage(MutateAssetResult)
DESCRIPTOR._options = None
_GETASSETREQUEST.fields_by_name['resource_name']._options = None
_MUTATEASSETSREQUEST.fields_by_name['customer_id']._options = None
_MUTATEASSETSREQUEST.fields_by_name['operations']._options = None
_ASSETSERVICE = _descriptor.ServiceDescriptor(
name='AssetService',
full_name='google.ads.googleads.v3.services.AssetService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=700,
serialized_end=1124,
methods=[
_descriptor.MethodDescriptor(
name='GetAsset',
full_name='google.ads.googleads.v3.services.AssetService.GetAsset',
index=0,
containing_service=None,
input_type=_GETASSETREQUEST,
output_type=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET,
serialized_options=_b('\202\323\344\223\002*\022(/v3/{resource_name=customers/*/assets/*}\332A\rresource_name'),
),
_descriptor.MethodDescriptor(
name='MutateAssets',
full_name='google.ads.googleads.v3.services.AssetService.MutateAssets',
|
index=1,
containing_service=None,
input_type=_MUTATEASSETSREQUEST,
output_type=_MUTATEASSETSRESPONSE,
serialized_options=_b('\202\323\344\223\0020\"+/v3/customers/{customer_id=*}/assets:mutate:\001*\332A\026customer_id,operations'),
),
])
_sym_db.RegisterServiceDescriptor(_ASSETSERVICE)
DESCRIPTOR.services_by_name['AssetService'] = _ASSETSERVICE
# @@protoc_insertion_point(module_scope)
| |
snapzblock.go
|
package bubt
import "fmt"
import "bytes"
import "encoding/binary"
//---- znode for reading entries.
type zsnap []byte
func (z zsnap) findkey(
adjust int, index blkindex,
key []byte) (
idx int, actualkey []byte, lv lazyvalue, seqno uint64, del, ok bool) {
//fmt.Printf("zfindkey %v %v %q\n", adjust, len(index), key)
var cmp int
switch len(index) {
case 0:
panic(fmt.Errorf("impossible situation"))
case 1:
cmp, actualkey, lv, seqno, del = z.compareat(adjust, key)
if cmp == 0 { // adjust+half >= key
//fmt.Printf("zfindkey-1 %v %v %q\n", adjust, 0, actualkey)
return adjust, actualkey, lv, seqno, del, true
}
// cmp < 0
//fmt.Printf("zfindkey-2 %v %v %q\n", adjust, -1, actualkey)
return adjust + 1, actualkey, lv, 0, false, false
default:
half := len(index) / 2
arg1 := adjust + half
cmp, actualkey, lv, seqno, del = z.compareat(arg1, key)
if cmp == 0 {
//fmt.Println("zfindkey", adjust+half, 0)
return adjust + half, actualkey, lv, seqno, del, true
} else if cmp < 0 { // adjust+half < key
return z.findkey(adjust+half, index[half:], key)
}
return z.findkey(adjust, index[:half], key)
}
panic("unreachable code")
}
func (z zsnap) compareat(
i int, key []byte) (
cmp int, currkey []byte, lv lazyvalue, cas uint64, deleted bool) {
offset := 4 + (i * 4)
x := int(binary.BigEndian.Uint32(z[offset : offset+4]))
ze := zentry(z[x : x+zentrysize])
ln := int(ze.keylen())
x += zentrysize
currkey, cas, deleted = z[x:x+ln], 0, false
cmp = bytes.Compare(currkey, key)
//fmt.Printf("z.compareat %v %s %s %v\n", i, key, z[x:x+ln], cmp)
lv.setfields(0, 0, nil)
if cmp >= 0 {
x, ln = x+ln, int(ze.valuelen())
cas, deleted = ze.seqno(), ze.isdeleted()
if ze.isvlog() {
vlogpos := int64(binary.BigEndian.Uint64(z[x : x+8]))
lv.setfields(int64(ln), vlogpos, nil)
} else if ln > 0 {
lv.setfields(int64(ln), 0, z[x:x+ln])
}
}
return cmp, currkey, lv, cas, deleted
}
func (z zsnap) getindex(index blkindex) blkindex {
nums, n := binary.BigEndian.Uint32(z[:4]), 4
for i := uint32(0); i < nums; i++ {
index = append(index, binary.BigEndian.Uint32(z[n:n+4]))
n += 4
}
return index
}
func (z zsnap) entryat(
index int) (key []byte, lv lazyvalue, seqno uint64, deleted bool) {
x := int((index * 4) + 4)
x = int(binary.BigEndian.Uint32(z[x : x+4]))
ze := zentry(z[x : x+zentrysize])
seqno, deleted = ze.seqno(), ze.isdeleted()
vlogok := ze.isvlog()
keylen, valuelen := int(ze.keylen()), int(ze.valuelen())
x += zentrysize
//fmt.Printf("z-entryat %v %v %v\n", index, x, keylen)
key = z[x : x+keylen]
x += keylen
if vlogok {
vlogpos := int64(binary.BigEndian.Uint64(z[x : x+8]))
lv.setfields(int64(valuelen), vlogpos, nil)
} else if valuelen > 0
|
else {
lv.setfields(0, 0, nil)
}
return
}
func (z zsnap) getnext(
index int) (key []byte, lv lazyvalue, seqno uint64, deleted bool) {
if index >= 0 && z.isbounded(index+1) {
return z.entryat(index + 1)
}
return key, lv, 0, false
}
func (z zsnap) isbounded(index int) bool {
idxlen := int(binary.BigEndian.Uint32(z[:4]))
return (index >= 0) && (index < idxlen)
}
|
{
lv.setfields(int64(valuelen), 0, z[x:x+valuelen])
}
|
ArrayFieldItem.test.tsx
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
import React from 'react';
import { render, fireEvent, screen } from '@botframework-composer/test-utils';
import assign from 'lodash/assign';
import { ArrayFieldItem } from '../ArrayFieldItem';
import { fieldProps } from './testUtils';
function renderSubject(overrides = {}) {
const props = assign(
{
index: 0,
canMoveUp: false,
canMoveDown: false,
canRemove: false,
onReorder: jest.fn(),
onRemove: jest.fn(),
},
fieldProps(),
overrides
);
return render(<ArrayFieldItem {...props} />);
}
describe('<ArrayFieldItem />', () => {
describe('context menu', () => {
it('disables the action if prop is false', () => {
const { getByLabelText } = renderSubject();
const menu = getByLabelText('Item actions');
fireEvent.click(menu);
const items = screen.getAllByRole('menuitem');
expect(items).toHaveLength(3);
items.forEach((item) => {
expect(item).toHaveAttribute('aria-disabled', 'true');
});
});
it('allows moving up, down and removing', () => {
const onReorder = jest.fn();
const onRemove = jest.fn();
const { getByLabelText } = renderSubject({
index: 2,
canRemove: true,
canMoveDown: true,
canMoveUp: true,
onRemove,
onReorder,
});
const menu = getByLabelText('Item actions');
fireEvent.click(menu);
const moveUp = screen.getByText('Move up');
|
expect(onReorder).toHaveBeenCalledWith(1);
fireEvent.click(menu);
const moveDown = screen.getByText('Move down');
fireEvent.click(moveDown);
expect(onReorder).toHaveBeenCalledWith(3);
fireEvent.click(menu);
const remove = screen.getByText('Remove');
fireEvent.click(remove);
expect(onRemove).toHaveBeenCalled();
});
});
it('shows a label if the items are stacked', () => {
const { getByLabelText } = renderSubject({
schema: { type: 'object', properties: { foo: { title: 'Foo Title' } } },
stackArrayItems: true,
});
expect(getByLabelText('Foo Title')).toBeInTheDocument();
});
it('passes correct error message', async () => {
const { findByText } = renderSubject({
rawErrors: ['error 1', 'error 2'],
index: 1,
});
expect(await findByText('Test Name error 2')).toBeInTheDocument();
});
});
|
fireEvent.click(moveUp);
|
pipeline_factory.go
|
package dbng
import (
sq "github.com/Masterminds/squirrel"
"github.com/concourse/atc/db/lock"
)
//go:generate counterfeiter . PipelineFactory
type PipelineFactory interface {
GetPipelineByID(teamID int, pipelineID int) Pipeline
PublicPipelines() ([]Pipeline, error)
}
type pipelineFactory struct {
conn Conn
lockFactory lock.LockFactory
}
func NewPipelineFactory(conn Conn, lockFactory lock.LockFactory) PipelineFactory
|
func (f *pipelineFactory) GetPipelineByID(teamID int, pipelineID int) Pipeline {
// XXX: construct a real one using the regular pipeline constructors; don't just set teamID etc inline
return &pipeline{
id: pipelineID,
teamID: teamID,
conn: f.conn,
lockFactory: f.lockFactory,
}
}
func (f *pipelineFactory) PublicPipelines() ([]Pipeline, error) {
rows, err := pipelinesQuery.
Where(sq.Eq{"p.public": true}).
OrderBy("t.name, ordering").
RunWith(f.conn).
Query()
if err != nil {
return nil, err
}
pipelines, err := scanPipelines(f.conn, f.lockFactory, rows)
if err != nil {
return nil, err
}
return pipelines, nil
}
|
{
return &pipelineFactory{
conn: conn,
lockFactory: lockFactory,
}
}
|
18b9d421fbde_add_initial_models.py
|
"""add initial models
Revision ID: 18b9d421fbde
Revises:
Create Date: 2022-03-19 12:36:16.067795
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "18b9d421fbde"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"stats",
sa.Column("user", sa.BigInteger(), nullable=False),
sa.Column("count", sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint("user"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
|
op.drop_table("stats")
# ### end Alembic commands ###
|
|
tree.rs
|
//! Used to build a SCPI command tree
use crate::command::Command;
use crate::error::{ErrorCode, Result};
use crate::response::Formatter;
use crate::tokenizer::Tokenizer;
use crate::Context;
#[macro_export]
macro_rules! scpi_tree {
($($node:expr),*) => {
&Node{name: b"ROOT", optional: false, handler: None, sub: &[
$(
$node
),*
]}
};
}
/// A SCPI command node
/// These nodes are structured as a command tree where each node represent a SCPI header mnemonic.
///
/// # Example
///
/// ```
/// use scpi::tree::Node;
/// use scpi::scpi_tree;
/// use scpi::ieee488::commands::*;
///
/// let root = scpi_tree![
/// Node{name: b"*IDN?", optional: false, handler: Some(&IdnCommand{
/// manufacturer: b"GPA-Robotics",
/// model: b"Potato",
/// serial: b"42",
/// firmware: b"0"
/// }), sub: &[]}
/// //...
/// ];
/// ```
/// Note that all strings are ascii-/bytestrings, this is because only ASCII is defined in SCPI thus
/// the normal UTF8 &str in rust would be improper. To send a unicode string you can use Arbitrary Block Data
/// (or, this parser has an alternative arbitrary data header `#s"..."` which allows and checks UTF8 data inside the quotes.
///
pub struct Node<'a> {
/// Mnemonic of this node, must follow the form SCPI notation (eg `LARGEsmall[<index>]` etc)
pub name: &'static [u8],
/// Command handler. If None, the parser will return a UndefinedHeader error if the node is called (may still be traversed)
pub handler: Option<&'a dyn Command>,
/// Subnodes. The node may contain None or an array of subcommands. If a message attempts to traverse
/// this node and it does not have any subnodes (eg `IMhelping:THISnode:DONTexist), a UndefinedHeaderError will be returned.
pub sub: &'a [Node<'a>],
///Marks the node as being optional (called default with inverse behaviour in IEE488)
pub optional: bool,
}
impl<'a> Node<'a> {
pub(crate) fn exec<FMT>(
&self,
context: &mut Context,
args: &mut Tokenizer,
response: &mut FMT,
query: bool,
) -> Result<()>
where
FMT: Formatter,
{
if let Some(handler) = self.handler {
//Execute self
if query {
handler.query(context, args, &mut response.response_unit()?)
} else {
handler.event(context, args)
}
} else if !self.sub.is_empty()
|
else {
Err(ErrorCode::CommandHeaderError.into())
}
}
}
|
{
//No handler, check for a default child
for child in self.sub {
if child.optional {
return child.exec(context, args, response, query);
}
}
//No optional child
Err(ErrorCode::CommandHeaderError.into())
}
|
client.go
|
// Copyright (c) 2016, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"github.com/jlmucb/cloudproxy/go/apps/roughtime/agl_roughtime/config"
"github.com/jlmucb/cloudproxy/go/apps/roughtime"
)
var (
chainFile = flag.String("chain-file", "roughtime-chain.json", "The name of a file in which the query chain will be maintained")
maxChainSize = flag.Int("max-chain-size", 128, "The maximum number of entries to maintain in the chain file")
serversFile = flag.String("servers-file", "roughtime-servers.json", "The name of a file that lists trusted Roughtime servers")
configPath = flag.String("config", "tao.config", "Path to domain configuration file.")
)
const (
// defaultServerQuorum is the default number of overlapping responses
// that are required to establish the current time.
defaultServerQuorum = 2
)
func main() {
serversData, err := ioutil.ReadFile(*serversFile)
if err != nil {
log.Fatal(err)
}
servers, numServersSkipped, err := roughtime.LoadServers(serversData)
if err != nil {
log.Fatal(err)
}
if numServersSkipped > 0 {
fmt.Fprintf(os.Stderr, "Ignoring %d unsupported servers\n", numServersSkipped)
}
c, err := roughtime.NewClient(*configPath, "tcp", defaultServerQuorum, servers)
// Read existing chain, if one exists
chain := &config.Chain{}
chainData, err := ioutil.ReadFile(*chainFile)
if err == nil {
if chain, err = roughtime.LoadChain(chainData); err != nil {
log.Fatal(err)
}
} else if !os.IsNotExist(err) {
log.Fatal(err)
}
chain, err = c.Do(chain)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
chainBytes, err := json.MarshalIndent(chain, "", " ")
if err != nil {
log.Fatal(err)
}
tempFile, err := ioutil.TempFile(filepath.Dir(*chainFile), filepath.Base(*chainFile))
if err != nil {
log.Fatal(err)
}
defer tempFile.Close()
if _, err := tempFile.Write(chainBytes); err != nil {
log.Fatal(err)
}
if err := os.Rename(tempFile.Name(), *chainFile); err != nil {
log.Fatal(err)
}
}
|
// limitations under the License.
// this is an adapted version of the client code to cnonect to cloudproxy.
|
authenticateTokenResponseDto.ts
|
export interface AuthenticateTokenResponseDto {
|
}
|
|
config.rs
|
use std::net::SocketAddr;
use std::rc::Rc;
use actix_http::Extensions;
use actix_router::ResourceDef;
use actix_service::{boxed, IntoServiceFactory, ServiceFactory};
use crate::data::{Data, DataFactory};
use crate::error::Error;
use crate::guard::Guard;
use crate::resource::Resource;
use crate::rmap::ResourceMap;
use crate::route::Route;
use crate::service::{
AppServiceFactory, HttpServiceFactory, ServiceFactoryWrapper, ServiceRequest,
ServiceResponse,
};
type Guards = Vec<Box<dyn Guard>>;
type HttpNewService =
boxed::BoxServiceFactory<(), ServiceRequest, ServiceResponse, Error, ()>;
/// Application configuration
pub struct AppService {
config: AppConfig,
root: bool,
default: Rc<HttpNewService>,
services: Vec<(
ResourceDef,
HttpNewService,
Option<Guards>,
Option<Rc<ResourceMap>>,
)>,
service_data: Rc<[Box<dyn DataFactory>]>,
}
impl AppService {
/// Crate server settings instance
pub(crate) fn new(
config: AppConfig,
default: Rc<HttpNewService>,
service_data: Rc<[Box<dyn DataFactory>]>,
) -> Self {
AppService {
config,
default,
service_data,
root: true,
services: Vec::new(),
}
}
/// Check if root is being configured
pub fn is_root(&self) -> bool {
self.root
}
pub(crate) fn into_services(
self,
) -> (
AppConfig,
Vec<(
ResourceDef,
HttpNewService,
Option<Guards>,
Option<Rc<ResourceMap>>,
)>,
) {
(self.config, self.services)
}
pub(crate) fn clone_config(&self) -> Self {
AppService {
config: self.config.clone(),
default: self.default.clone(),
services: Vec::new(),
root: false,
service_data: self.service_data.clone(),
}
}
/// Service configuration
pub fn config(&self) -> &AppConfig {
&self.config
}
/// Default resource
pub fn
|
(&self) -> Rc<HttpNewService> {
self.default.clone()
}
/// Set global route data
pub fn set_service_data(&self, extensions: &mut Extensions) -> bool {
for f in self.service_data.iter() {
f.create(extensions);
}
!self.service_data.is_empty()
}
/// Register http service
pub fn register_service<F, S>(
&mut self,
rdef: ResourceDef,
guards: Option<Vec<Box<dyn Guard>>>,
factory: F,
nested: Option<Rc<ResourceMap>>,
) where
F: IntoServiceFactory<S>,
S: ServiceFactory<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
> + 'static,
{
self.services.push((
rdef,
boxed::factory(factory.into_factory()),
guards,
nested,
));
}
}
/// Application connection config
#[derive(Clone)]
pub struct AppConfig(Rc<AppConfigInner>);
struct AppConfigInner {
secure: bool,
host: String,
addr: SocketAddr,
}
impl AppConfig {
pub(crate) fn new(secure: bool, addr: SocketAddr, host: String) -> Self {
AppConfig(Rc::new(AppConfigInner { secure, addr, host }))
}
/// Server host name.
///
/// Host name is used by application router as a hostname for url generation.
/// Check [ConnectionInfo](./struct.ConnectionInfo.html#method.host)
/// documentation for more information.
///
/// By default host name is set to a "localhost" value.
pub fn host(&self) -> &str {
&self.0.host
}
/// Returns true if connection is secure(https)
pub fn secure(&self) -> bool {
self.0.secure
}
/// Returns the socket address of the local half of this TCP connection
pub fn local_addr(&self) -> SocketAddr {
self.0.addr
}
}
impl Default for AppConfig {
fn default() -> Self {
AppConfig::new(
false,
"127.0.0.1:8080".parse().unwrap(),
"localhost:8080".to_owned(),
)
}
}
/// Service config is used for external configuration.
/// Part of application configuration could be offloaded
/// to set of external methods. This could help with
/// modularization of big application configuration.
pub struct ServiceConfig {
pub(crate) services: Vec<Box<dyn AppServiceFactory>>,
pub(crate) data: Vec<Box<dyn DataFactory>>,
pub(crate) external: Vec<ResourceDef>,
}
impl ServiceConfig {
pub(crate) fn new() -> Self {
Self {
services: Vec::new(),
data: Vec::new(),
external: Vec::new(),
}
}
/// Set application data. Application data could be accessed
/// by using `Data<T>` extractor where `T` is data type.
///
/// This is same as `App::data()` method.
pub fn data<S: 'static>(&mut self, data: S) -> &mut Self {
self.data.push(Box::new(Data::new(data)));
self
}
/// Configure route for a specific path.
///
/// This is same as `App::route()` method.
pub fn route(&mut self, path: &str, mut route: Route) -> &mut Self {
self.service(
Resource::new(path)
.add_guards(route.take_guards())
.route(route),
)
}
/// Register http service.
///
/// This is same as `App::service()` method.
pub fn service<F>(&mut self, factory: F) -> &mut Self
where
F: HttpServiceFactory + 'static,
{
self.services
.push(Box::new(ServiceFactoryWrapper::new(factory)));
self
}
/// Register an external resource.
///
/// External resources are useful for URL generation purposes only
/// and are never considered for matching at request time. Calls to
/// `HttpRequest::url_for()` will work as expected.
///
/// This is same as `App::external_service()` method.
pub fn external_resource<N, U>(&mut self, name: N, url: U) -> &mut Self
where
N: AsRef<str>,
U: AsRef<str>,
{
let mut rdef = ResourceDef::new(url.as_ref());
*rdef.name_mut() = name.as_ref().to_string();
self.external.push(rdef);
self
}
}
#[cfg(test)]
mod tests {
use actix_service::Service;
use bytes::Bytes;
use super::*;
use crate::http::{Method, StatusCode};
use crate::test::{call_service, init_service, read_body, TestRequest};
use crate::{web, App, HttpRequest, HttpResponse};
#[actix_rt::test]
async fn test_data() {
let cfg = |cfg: &mut ServiceConfig| {
cfg.data(10usize);
};
let mut srv =
init_service(App::new().configure(cfg).service(
web::resource("/").to(|_: web::Data<usize>| HttpResponse::Ok()),
))
.await;
let req = TestRequest::default().to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
// #[actix_rt::test]
// async fn test_data_factory() {
// let cfg = |cfg: &mut ServiceConfig| {
// cfg.data_factory(|| {
// sleep(std::time::Duration::from_millis(50)).then(|_| {
// println!("READY");
// Ok::<_, ()>(10usize)
// })
// });
// };
// let mut srv =
// init_service(App::new().configure(cfg).service(
// web::resource("/").to(|_: web::Data<usize>| HttpResponse::Ok()),
// ));
// let req = TestRequest::default().to_request();
// let resp = srv.call(req).await.unwrap();
// assert_eq!(resp.status(), StatusCode::OK);
// let cfg2 = |cfg: &mut ServiceConfig| {
// cfg.data_factory(|| Ok::<_, ()>(10u32));
// };
// let mut srv = init_service(
// App::new()
// .service(web::resource("/").to(|_: web::Data<usize>| HttpResponse::Ok()))
// .configure(cfg2),
// );
// let req = TestRequest::default().to_request();
// let resp = srv.call(req).await.unwrap();
// assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR);
// }
#[actix_rt::test]
async fn test_external_resource() {
let mut srv = init_service(
App::new()
.configure(|cfg| {
cfg.external_resource(
"youtube",
"https://youtube.com/watch/{video_id}",
);
})
.route(
"/test",
web::get().to(|req: HttpRequest| {
HttpResponse::Ok().body(
req.url_for("youtube", &["12345"]).unwrap().to_string(),
)
}),
),
)
.await;
let req = TestRequest::with_uri("/test").to_request();
let resp = call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = read_body(resp).await;
assert_eq!(body, Bytes::from_static(b"https://youtube.com/watch/12345"));
}
#[actix_rt::test]
async fn test_service() {
let mut srv = init_service(App::new().configure(|cfg| {
cfg.service(
web::resource("/test").route(web::get().to(HttpResponse::Created)),
)
.route("/index.html", web::get().to(HttpResponse::Ok));
}))
.await;
let req = TestRequest::with_uri("/test")
.method(Method::GET)
.to_request();
let resp = call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::CREATED);
let req = TestRequest::with_uri("/index.html")
.method(Method::GET)
.to_request();
let resp = call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
}
}
|
default_service
|
ser.rs
|
// Copyright 2016 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Serialization and deserialization layer specialized for binary encoding.
//! Ensures consistency and safety. Basically a minimal subset or
//! rustc_serialize customized for our need.
//!
//! To use it simply implement `Writeable` or `Readable` and then use the
//! `serialize` or `deserialize` functions on them as appropriate.
use std::{error, fmt};
use std::io::{self, Write, Read};
use byteorder::{ByteOrder, ReadBytesExt, BigEndian};
/// Possible errors deriving from serializing or deserializing.
#[derive(Debug)]
pub enum Error {
/// Wraps an io error produced when reading or writing
IOErr(io::Error),
/// Expected a given value that wasn't found
UnexpectedData {
expected: Vec<u8>,
received: Vec<u8>,
},
/// Data wasn't in a consumable format
CorruptedData,
/// When asked to read too much data
TooLargeReadErr(String),
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::IOErr(e)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::IOErr(ref e) => write!(f, "{}", e),
Error::UnexpectedData { expected: ref e, received: ref r } => {
write!(f, "expected {:?}, got {:?}", e, r)
}
Error::CorruptedData => f.write_str("corrupted data"),
Error::TooLargeReadErr(ref s) => f.write_str(&s),
}
}
}
impl error::Error for Error {
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::IOErr(ref e) => Some(e),
_ => None,
}
}
fn description(&self) -> &str {
match *self {
Error::IOErr(ref e) => error::Error::description(e),
Error::UnexpectedData { expected: _, received: _ } => "unexpected data",
Error::CorruptedData => "corrupted data",
Error::TooLargeReadErr(ref s) => s,
}
}
}
/// Useful trait to implement on types that can be translated to byte slices
/// directly. Allows the use of `write_fixed_bytes` on them.
pub trait AsFixedBytes {
/// The slice representation of self
fn as_fixed_bytes(&self) -> &[u8];
}
/// Signal to a serializable object how much of its data should be serialized
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum SerializationMode {
/// Serialize everything sufficiently to fully reconstruct the object
Full,
/// Serialize the data that defines the object
Hash,
/// Serialize everything that a signer of the object should know
SigHash,
}
/// Implementations defined how different numbers and binary structures are
/// written to an underlying stream or container (depending on implementation).
pub trait Writer {
/// The mode this serializer is writing in
fn serialization_mode(&self) -> SerializationMode;
/// Writes a u8 as bytes
fn write_u8(&mut self, n: u8) -> Result<(), Error> {
self.write_fixed_bytes(&[n])
}
/// Writes a u16 as bytes
fn write_u16(&mut self, n: u16) -> Result<(), Error> {
let mut bytes = [0; 2];
BigEndian::write_u16(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
/// Writes a u32 as bytes
fn write_u32(&mut self, n: u32) -> Result<(), Error> {
let mut bytes = [0; 4];
BigEndian::write_u32(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
/// Writes a u64 as bytes
fn write_u64(&mut self, n: u64) -> Result<(), Error> {
let mut bytes = [0; 8];
BigEndian::write_u64(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
/// Writes a i64 as bytes
fn write_i64(&mut self, n: i64) -> Result<(), Error> {
let mut bytes = [0; 8];
BigEndian::write_i64(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
/// Writes a variable number of bytes. The length is encoded as a 64-bit
/// prefix.
fn write_bytes(&mut self, bytes: &AsFixedBytes) -> Result<(), Error> {
try!(self.write_u64(bytes.as_fixed_bytes().len() as u64));
self.write_fixed_bytes(bytes)
}
/// Writes a fixed number of bytes from something that can turn itself into
/// a `&[u8]`. The reader is expected to know the actual length on read.
fn write_fixed_bytes(&mut self, fixed: &AsFixedBytes) -> Result<(), Error>;
}
/// Implementations defined how different numbers and binary structures are
/// read from an underlying stream or container (depending on implementation).
pub trait Reader {
/// Read a u8 from the underlying Read
fn read_u8(&mut self) -> Result<u8, Error>;
/// Read a u16 from the underlying Read
fn read_u16(&mut self) -> Result<u16, Error>;
/// Read a u32 from the underlying Read
fn read_u32(&mut self) -> Result<u32, Error>;
/// Read a u64 from the underlying Read
fn read_u64(&mut self) -> Result<u64, Error>;
/// Read a i32 from the underlying Read
fn read_i64(&mut self) -> Result<i64, Error>;
/// first before the data bytes.
fn read_vec(&mut self) -> Result<Vec<u8>, Error>;
/// Read a fixed number of bytes from the underlying reader.
fn read_fixed_bytes(&mut self, length: usize) -> Result<Vec<u8>, Error>;
/// Convenience function to read 32 fixed bytes
fn read_32_bytes(&mut self) -> Result<Vec<u8>, Error>;
/// Convenience function to read 33 fixed bytes
fn read_33_bytes(&mut self) -> Result<Vec<u8>, Error>;
/// Consumes a byte from the reader, producing an error if it doesn't have
/// the expected value
fn expect_u8(&mut self, val: u8) -> Result<u8, Error>;
}
/// Trait that every type that can be serialized as binary must implement.
/// Writes directly to a Writer, a utility type thinly wrapping an
/// underlying Write implementation.
pub trait Writeable {
/// Write the data held by this Writeable to the provided writer
fn write(&self, writer: &mut Writer) -> Result<(), Error>;
}
/// Trait that every type that can be deserialized from binary must implement.
/// Reads directly to a Reader, a utility type thinly wrapping an
/// underlying Read implementation.
pub trait Readable<T> {
/// Reads the data necessary to this Readable from the provided reader
fn read(reader: &mut Reader) -> Result<T, Error>;
}
/// Deserializes a Readeable from any std::io::Read implementation.
pub fn deserialize<T: Readable<T>>(mut source: &mut Read) -> Result<T, Error> {
let mut reader = BinReader { source: source };
T::read(&mut reader)
}
/// Serializes a Writeable into any std::io::Write implementation.
pub fn serialize(mut sink: &mut Write, thing: &Writeable) -> Result<(), Error> {
let mut writer = BinWriter { sink: sink };
thing.write(&mut writer)
}
/// Utility function to serialize a writeable directly in memory using a
/// Vec<u8>.
pub fn ser_vec(thing: &Writeable) -> Result<Vec<u8>, Error> {
let mut vec = Vec::new();
try!(serialize(&mut vec, thing));
Ok(vec)
}
struct BinReader<'a> {
source: &'a mut Read,
}
/// Utility wrapper for an underlying byte Reader. Defines higher level methods
/// to read numbers, byte vectors, hashes, etc.
impl<'a> Reader for BinReader<'a> {
fn read_u8(&mut self) -> Result<u8, Error> {
self.source.read_u8().map_err(Error::IOErr)
}
fn read_u16(&mut self) -> Result<u16, Error> {
self.source.read_u16::<BigEndian>().map_err(Error::IOErr)
}
fn read_u32(&mut self) -> Result<u32, Error> {
self.source.read_u32::<BigEndian>().map_err(Error::IOErr)
}
fn read_u64(&mut self) -> Result<u64, Error> {
self.source.read_u64::<BigEndian>().map_err(Error::IOErr)
}
fn read_i64(&mut self) -> Result<i64, Error> {
self.source.read_i64::<BigEndian>().map_err(Error::IOErr)
}
/// Read a variable size vector from the underlying Read. Expects a usize
fn
|
(&mut self) -> Result<Vec<u8>, Error> {
let len = try!(self.read_u64());
self.read_fixed_bytes(len as usize)
}
fn read_fixed_bytes(&mut self, length: usize) -> Result<Vec<u8>, Error> {
// not reading more than 100k in a single read
if length > 100000 {
return Err(Error::TooLargeReadErr(format!("fixed bytes length too large: {}", length)));
}
let mut buf = vec![0; length];
self.source.read_exact(&mut buf).map(move |_| buf).map_err(Error::IOErr)
}
fn read_32_bytes(&mut self) -> Result<Vec<u8>, Error> {
self.read_fixed_bytes(32)
}
fn read_33_bytes(&mut self) -> Result<Vec<u8>, Error> {
self.read_fixed_bytes(33)
}
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
let b = try!(self.read_u8());
if b == val {
Ok(b)
} else {
Err(Error::UnexpectedData {
expected: vec![val],
received: vec![b],
})
}
}
}
/// Utility wrapper for an underlying byte Writer. Defines higher level methods
/// to write numbers, byte vectors, hashes, etc.
struct BinWriter<'a> {
sink: &'a mut Write,
}
impl<'a> Writer for BinWriter<'a> {
fn serialization_mode(&self) -> SerializationMode {
SerializationMode::Full
}
fn write_fixed_bytes(&mut self, fixed: &AsFixedBytes) -> Result<(), Error> {
let bs = fixed.as_fixed_bytes();
try!(self.sink.write_all(bs));
Ok(())
}
}
macro_rules! impl_slice_bytes {
($byteable: ty) => {
impl AsFixedBytes for $byteable {
fn as_fixed_bytes(&self) -> &[u8] {
&self[..]
}
}
}
}
impl_slice_bytes!(::secp::key::SecretKey);
impl_slice_bytes!(::secp::Signature);
impl_slice_bytes!(::secp::pedersen::Commitment);
impl_slice_bytes!(Vec<u8>);
impl_slice_bytes!([u8; 1]);
impl_slice_bytes!([u8; 2]);
impl_slice_bytes!([u8; 4]);
impl_slice_bytes!([u8; 8]);
impl<'a> AsFixedBytes for &'a [u8] {
fn as_fixed_bytes(&self) -> &[u8] {
*self
}
}
impl<'a> AsFixedBytes for String {
fn as_fixed_bytes(&self) -> &[u8] {
self.as_bytes()
}
}
impl AsFixedBytes for ::core::hash::Hash {
fn as_fixed_bytes(&self) -> &[u8] {
self.to_slice()
}
}
impl AsFixedBytes for ::secp::pedersen::RangeProof {
fn as_fixed_bytes(&self) -> &[u8] {
&self.bytes()
}
}
|
read_vec
|
wkt_step_duration.rs
|
use crate::fields::FieldContent;
use serde::Serialize;
#[derive(Clone, Copy, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum WktStepDuration {
Calories,
Distance,
HrGreaterThan,
HrLessThan,
Open,
Power10SGreaterThan,
Power10SLessThan,
Power30SGreaterThan,
Power30SLessThan,
Power3SGreaterThan,
Power3SLessThan,
PowerGreaterThan,
PowerLapGreaterThan,
PowerLapLessThan,
PowerLessThan,
RepeatUntilCalories,
RepeatUntilDistance,
RepeatUntilHrGreaterThan,
RepeatUntilHrLessThan,
RepeatUntilMaxPowerLastLapLessThan,
RepeatUntilPowerGreaterThan,
RepeatUntilPowerLastLapLessThan,
RepeatUntilPowerLessThan,
RepeatUntilStepsCmplt,
RepeatUntilTime,
RepeatUntilTrainingPeaksTss,
RepetitionTime,
Reps,
Time,
TrainingPeaksTss,
UnknownValue(u64),
}
impl From<FieldContent> for WktStepDuration {
fn from(field: FieldContent) -> Self {
if let FieldContent::Enum(enum_value) = field {
match enum_value {
|
3 => WktStepDuration::HrGreaterThan,
4 => WktStepDuration::Calories,
5 => WktStepDuration::Open,
6 => WktStepDuration::RepeatUntilStepsCmplt,
7 => WktStepDuration::RepeatUntilTime,
8 => WktStepDuration::RepeatUntilDistance,
9 => WktStepDuration::RepeatUntilCalories,
10 => WktStepDuration::RepeatUntilHrLessThan,
11 => WktStepDuration::RepeatUntilHrGreaterThan,
12 => WktStepDuration::RepeatUntilPowerLessThan,
13 => WktStepDuration::RepeatUntilPowerGreaterThan,
14 => WktStepDuration::PowerLessThan,
15 => WktStepDuration::PowerGreaterThan,
16 => WktStepDuration::TrainingPeaksTss,
17 => WktStepDuration::RepeatUntilPowerLastLapLessThan,
18 => WktStepDuration::RepeatUntilMaxPowerLastLapLessThan,
19 => WktStepDuration::Power3SLessThan,
20 => WktStepDuration::Power10SLessThan,
21 => WktStepDuration::Power30SLessThan,
22 => WktStepDuration::Power3SGreaterThan,
23 => WktStepDuration::Power10SGreaterThan,
24 => WktStepDuration::Power30SGreaterThan,
25 => WktStepDuration::PowerLapLessThan,
26 => WktStepDuration::PowerLapGreaterThan,
27 => WktStepDuration::RepeatUntilTrainingPeaksTss,
28 => WktStepDuration::RepetitionTime,
29 => WktStepDuration::Reps,
n => WktStepDuration::UnknownValue(n as u64),
}
} else {
panic!("can't convert WktStepDuration to {:?}", field);
}
}
}
|
0 => WktStepDuration::Time,
1 => WktStepDuration::Distance,
2 => WktStepDuration::HrLessThan,
|
assetfilters.go
|
package media
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// AssetFiltersClient is the client for the AssetFilters methods of the Media service.
type AssetFiltersClient struct {
BaseClient
}
// NewAssetFiltersClient creates an instance of the AssetFiltersClient client.
func NewAssetFiltersClient(subscriptionID string) AssetFiltersClient {
return NewAssetFiltersClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewAssetFiltersClientWithBaseURI creates an instance of the AssetFiltersClient client.
func NewAssetFiltersClientWithBaseURI(baseURI string, subscriptionID string) AssetFiltersClient {
return AssetFiltersClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates an Asset Filter associated with the specified Asset.
// Parameters:
// resourceGroupName - the name of the resource group within the Azure subscription.
// accountName - the Media Services account name.
// assetName - the Asset name.
// filterName - the Asset Filter name
// parameters - the request parameters
func (client AssetFiltersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (result AssetFilter, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.FilterProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.FilterProperties.PresentationTimeRange", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.FilterProperties.PresentationTimeRange.StartTimestamp", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.FilterProperties.PresentationTimeRange.EndTimestamp", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.FilterProperties.PresentationTimeRange.PresentationWindowDuration", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.FilterProperties.PresentationTimeRange.LiveBackoffDuration", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.FilterProperties.PresentationTimeRange.Timescale", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.FilterProperties.PresentationTimeRange.ForceEndTimestamp", Name: validation.Null, Rule: true, Chain: nil},
}},
{Target: "parameters.FilterProperties.FirstQuality", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.FilterProperties.FirstQuality.Bitrate", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("media.AssetFiltersClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, assetName, filterName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client AssetFiltersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"assetName": autorest.Encode("path", assetName),
"filterName": autorest.Encode("path", filterName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client AssetFiltersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client AssetFiltersClient) CreateOrUpdateResponder(resp *http.Response) (result AssetFilter, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes an Asset Filter associated with the specified Asset.
// Parameters:
// resourceGroupName - the name of the resource group within the Azure subscription.
// accountName - the Media Services account name.
// assetName - the Asset name.
// filterName - the Asset Filter name
func (client AssetFiltersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.Delete")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, assetName, filterName)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Delete", resp, "Failure responding to request")
}
return
}
// DeletePreparer prepares the Delete request.
func (client AssetFiltersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"assetName": autorest.Encode("path", assetName),
"filterName": autorest.Encode("path", filterName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client AssetFiltersClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client AssetFiltersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get get the details of an Asset Filter associated with the specified Asset.
// Parameters:
// resourceGroupName - the name of the resource group within the Azure subscription.
// accountName - the Media Services account name.
// assetName - the Asset name.
// filterName - the Asset Filter name
func (client AssetFiltersClient) Get(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (result AssetFilter, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, accountName, assetName, filterName)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client AssetFiltersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"assetName": autorest.Encode("path", assetName),
"filterName": autorest.Encode("path", filterName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client AssetFiltersClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client AssetFiltersClient) GetResponder(resp *http.Response) (result AssetFilter, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List list Asset Filters associated with the specified Asset.
// Parameters:
// resourceGroupName - the name of the resource group within the Azure subscription.
// accountName - the Media Services account name.
// assetName - the Asset name.
func (client AssetFiltersClient) List(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result AssetFilterCollectionPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.List")
defer func() {
sc := -1
if result.afc.Response.Response != nil {
sc = result.afc.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, accountName, assetName)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.afc.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "List", resp, "Failure sending request")
return
}
result.afc, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client AssetFiltersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"assetName": autorest.Encode("path", assetName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client AssetFiltersClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client AssetFiltersClient) ListResponder(resp *http.Response) (result AssetFilterCollection, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client AssetFiltersClient) listNextResults(ctx context.Context, lastResults AssetFilterCollection) (result AssetFilterCollection, err error) {
req, err := lastResults.assetFilterCollectionPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "media.AssetFiltersClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "media.AssetFiltersClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client AssetFiltersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result AssetFilterCollectionIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx, resourceGroupName, accountName, assetName)
return
}
// Update updates an existing Asset Filter associated with the specified Asset.
// Parameters:
// resourceGroupName - the name of the resource group within the Azure subscription.
// accountName - the Media Services account name.
// assetName - the Asset name.
// filterName - the Asset Filter name
|
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.Update")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, assetName, filterName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Update", resp, "Failure responding to request")
}
return
}
// UpdatePreparer prepares the Update request.
func (client AssetFiltersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"assetName": autorest.Encode("path", assetName),
"filterName": autorest.Encode("path", filterName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client AssetFiltersClient) UpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client AssetFiltersClient) UpdateResponder(resp *http.Response) (result AssetFilter, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
|
// parameters - the request parameters
func (client AssetFiltersClient) Update(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (result AssetFilter, err error) {
|
users.controller.ts
|
import {
Body,
Controller,
Post,
Get,
Patch,
Param,
Delete,
NotFoundException,
Session,
UseGuards,
} from '@nestjs/common';
import { CreateUserDto } from './dtos/create-user.dto';
import { UpdateUserDto } from './dtos/update-user.dto';
import { Serialize } from '../interceptors/serialize.interceptor';
import { UsersService } from './users.service';
import { UserDto } from './dtos/user.dto';
import { AuthService } from './auth.service';
import { User } from './user.entity';
import { CurrentUser } from './decorators/current-user.decorator';
import { CurrentUserGuard } from './guards/auth.gaurd';
@Controller('auth')
@Serialize(UserDto)
export class
|
{
constructor(
private userService: UsersService,
private authService: AuthService,
) {}
@Get('/')
findAllUser() {
return this.userService.findAllUser();
}
@Post('/signup')
async createUser(@Body() body: CreateUserDto, @Session() session: any) {
const user = await this.authService.signup(body.email, body.password);
session.userId = user.id;
return user;
// return this.authService.signup(body.email, body.password)
}
@Post('/signin')
async signin(@Body() body: CreateUserDto, @Session() session: any) {
const user = await this.authService.signin(body.email, body.password);
session.userId = user.id;
return user;
}
// @Get('/whoami')
// whoami(@Session() session: any, @CurrentUser() user: any) {
// return this.userService.findOneUser(session.userId);
// }
@Get('/whoami')
whoami(@CurrentUser() user: User) {
return user;
}
@Get('/:id')
async findOneUser(@Param('id') id: string) {
const user = await this.userService.findOneUser(parseInt(id));
if (!user) {
throw new NotFoundException('User does not exist!');
}
return user;
}
@Patch('/:id')
@UseGuards(CurrentUserGuard)
updateUser(@Param('id') id: string, @Body() body: Partial<UpdateUserDto>) {
return this.userService.updateUser(parseInt(id), body);
}
@Delete('/:id')
removeUser(@Param('id') id: string) {
return this.userService.removeUser(parseInt(id));
}
@Post('/signout')
signout(@Session() session: any) {
session.userId = null;
return { message: 'You are signed out!' };
}
}
|
UsersController
|
main.ts
|
import {AmmoPhysics, ExtendedObject3D, PhysicsLoader} from "@enable3d/ammo-physics/dist";
import { OrbitControls } from "three/examples/jsm/controls/OrbitControls"
import { Scene, Color, PerspectiveCamera, WebGLRenderer, HemisphereLight, AmbientLight, DirectionalLight, BoxBufferGeometry, MeshLambertMaterial, Mesh, SphereBufferGeometry, Clock, Vector3, MeshBasicMaterial } from "three";
import {Ai} from "./ai";
import {Population} from "./population";
import {Creature} from "./creature";
import Chart = require("chart.js");
console.log('hello world')
const MainScene = () => {
const scene = new Scene()
scene.background = new Color(0xf0f0f0)
document.getElementsByTagName("body")[0].setAttribute("style", "margin: 0")
// camera
const camera = new PerspectiveCamera(50, window.innerWidth / (window.innerHeight /2), 0.1, 1000)
camera.position.set(10, 10, 10)
// renderer
const renderer = new WebGLRenderer({antialias: true})
renderer.setSize(window.innerWidth, window.innerHeight /2)
renderer.shadowMap.enabled = true;
document.body.appendChild(renderer.domElement)
const chartCanvas = document.createElement("canvas");
const chartCTX = chartCanvas.getContext("2d");
document.body.appendChild(chartCanvas)
const chartStorage = localStorage.getItem('chart');
let chartData;
if (chartStorage) {
chartData = JSON.parse(chartStorage);
} else {
chartData = {
labels: [],
data: []
}
}
Chart.defaults.global.elements.point.radius = chartData.labels.length > 50 ? 0 : 3;
Chart.defaults.global.elements.line.tension = chartData.labels.length > 50 ? 0 : 0.4;
var chart = new Chart(chartCTX, {
// The type of chart we want to create
type: 'line',
// The data for our dataset
data: {
labels: chartData.labels,
datasets: [{
label: 'score',
backgroundColor: 'rgb(255, 99, 132)',
borderColor: 'rgb(255, 99, 132)',
data: chartData.data
}]
},
// Configuration options go here
options: {
aspectRatio: window.innerWidth / (window.innerHeight /2)
}
});
// dpr
const DPR = window.devicePixelRatio
renderer.setPixelRatio(Math.min(2, DPR))
// orbit controls
const controls = new OrbitControls(camera, renderer.domElement)
controls.target = new Vector3(0, 3,0)
controls.update();
// light
scene.add(new HemisphereLight(0xffffbb, 0x080820, 0.5))
scene.add(new AmbientLight(0x666666))
const light = new DirectionalLight(0xdfebff, 0.5)
light.position.set(50, 200, 100)
light.position.multiplyScalar(1.3)
light.castShadow = true;
//light.shadow.bias = 0.00001
scene.add(light)
// physics
const physics = new AmmoPhysics(scene)
//physics.debug.enable()
//physics.debug.mode(4097)
// extract the object factory from physics
// the factory will make/add object without physics
const { factory } = physics
// static ground
physics.add.ground(
{ width: 200, height: 200, name: 'ground', y: -0.1},
{lambert: {color: '#566573'}}
);
const geo = new BoxBufferGeometry(200, 0.91, 200, 50, undefined, 50 )
const mat = new MeshBasicMaterial({wireframe: true, wireframeLinewidth: 4, wireframeLinecap: 'round', wireframeLinejoin: "round", transparent: true, opacity: 0.5, color: 0xFFEEEEEE});
scene.add(new Mesh(geo, mat));
const dna = localStorage.getItem('dna');
const creature = new Creature(factory, physics, scene);
const population = new Population(10, creature.bodies.length * 12 +3, creature.hinges.length, dna ? JSON.parse(dna) : undefined);
const clock = new Clock();
const deadline = 5000;
const delta = 1/60 * 1000;
let currentCreatureTime = 0;
let currentSpecies = population.species[0];
let currentSpeciesIndex = 0;
|
let ticks = 0
creature.torso.body.on.collision((o, e) => {
if (o.name === 'ground' && e === 'start') {
//currentSpecies.reward = -4;
//currentCreatureTime += deadline * 10;
}
})
setInterval(() => {
//const delta = clock.getDelta() * 1000;
currentCreatureTime += delta;
physics.update(delta);
// update net
let j = 0;
for(const body of creature.bodies) {
currentSpecies.ai.input[j++] = body.position.x;
currentSpecies.ai.input[j++] = body.position.y;
currentSpecies.ai.input[j++] = body.position.z;
currentSpecies.ai.input[j++] = body.rotation.x;
currentSpecies.ai.input[j++] = body.rotation.y;
currentSpecies.ai.input[j++] = body.rotation.z;
currentSpecies.ai.input[j++] = body.velocity.x;
currentSpecies.ai.input[j++] = body.velocity.y;
currentSpecies.ai.input[j++] = body.velocity.z;
currentSpecies.ai.input[j++] = body.angularVelocity.x;
currentSpecies.ai.input[j++] = body.angularVelocity.y;
currentSpecies.ai.input[j++] = body.angularVelocity.z;
}
currentSpecies.ai.input[j++] = creature.torso.position.y;
currentSpecies.ai.input[j++] = creature.torso.position.x;
currentSpecies.ai.input[j++] = currentCreatureTime % 1000;
currentSpecies.ai.update();
let i = 0;
for (const hinge of creature.hinges) {
hinge.enableAngularMotor(true, currentSpecies.ai.output[i], 5)
//hinge.setAngularOnly(currentSpecies.ai.output[i])
i++;
}
ticks++;
accumulatedVelocity += creature.torso.body.velocity.z
if (currentCreatureTime > deadline + creature.torso.position.z * 1000) {
// life is ended here
currentSpecies.reward = accumulatedVelocity / ticks + (creature.head.body.position.y - 4);
currentSpeciesIndex++;
if (currentSpeciesIndex >= population.species.length) {
currentSpeciesIndex = 0;
population.populate();
if (highScore < population.species[0].reward) {
highScore = population.species[0].reward;
console.log(highScore, "HIGHSCORE");
}
gen++;
console.log(population.species[0].reward, "Gen", gen);
chartData.labels.push(gen)
chartData.data.push(population.species[0].reward);
localStorage.setItem('chart', JSON.stringify(chartData));
localStorage.setItem('dna', JSON.stringify(population.species[0].ai.dna));
chart.update();
}
currentSpecies = population.species[currentSpeciesIndex];
//currentSpecies.reward = 0;
currentCreatureTime = 0;
ticks = 0;
accumulatedVelocity = 0;
creature.reset();
}
}, delta)
const animate = () => {
renderer.render(scene, camera)
requestAnimationFrame(animate)
}
requestAnimationFrame(animate)
}
PhysicsLoader('lib', () => MainScene())
|
let highScore = 0;
let gen = chartData.labels.length;
let accumulatedVelocity = 0;
|
search_aggs_terms_test.go
|
package elastic
import (
"encoding/json"
"testing"
)
func TestTermsAggregation(t *testing.T) {
agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc()
data, err := json.Marshal(agg.Source())
if err != nil {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
expected := `{"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
}
func TestTermsAggregationWithSubAggregation(t *testing.T) {
subAgg := NewAvgAggregation().Field("height")
agg := NewTermsAggregation().Field("gender").Size(10).
OrderByAggregation("avg_height", false)
agg = agg.SubAggregation("avg_height", subAgg)
data, err := json.Marshal(agg.Source())
if err != nil {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
}
func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) {
subAgg1 := NewAvgAggregation().Field("height")
subAgg2 := NewAvgAggregation().Field("width")
agg := NewTermsAggregation().Field("gender").Size(10).
|
agg = agg.SubAggregation("avg_width", subAgg2)
data, err := json.Marshal(agg.Source())
if err != nil {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
}
|
OrderByAggregation("avg_height", false)
agg = agg.SubAggregation("avg_height", subAgg1)
|
CustomModalDialog.js
|
/**
* CustomModalDialog creates custom ReactBootstrap ModalDialog
* https://github.com/react-bootstrap/react-bootstrap/blob/master/src/ModalDialog.js
*
* This extends ModalDialog and adds contentClassName prop for setting
* `modal-content` div's class
*/
import classNames from 'classnames';
import React from 'react';
import PropTypes from 'prop-types';
import { utils } from 'react-bootstrap';
const { bsClass } = utils.bootstrapUtils;
const { bsSizes } = utils.bootstrapUtils;
const { getClassSet } = utils.bootstrapUtils;
const { prefix } = utils.bootstrapUtils;
const { splitBsProps } = utils.bootstrapUtils;
// React Bootstrap utils/StyleConfig Size is currently not exported
const Size = {
LARGE: 'large',
SMALL: 'small'
};
// eslint-disable-next-line react/prefer-stateless-function
class
|
extends React.Component {
render() {
const {
dialogClassName,
contentClassName,
className,
style,
children,
...props
} = this.props;
const [bsProps, elementProps] = splitBsProps(props);
const bsClassName = prefix(bsProps);
const modalStyle = { display: 'block', ...style };
const dialogClasses = {
...getClassSet(bsProps),
[bsClassName]: false,
[prefix(bsProps, 'dialog')]: true
};
return (
<div
{...elementProps}
tabIndex="-1"
role="dialog"
style={modalStyle}
className={classNames(className, bsClassName)}
>
<div className={classNames(dialogClassName, dialogClasses)}>
<div
className={classNames(prefix(bsProps, 'content'), contentClassName)}
role="document"
>
{children}
</div>
</div>
</div>
);
}
}
CustomModalDialog.propTypes = {
/** A css class to apply to the Modal dialog DOM node. */
dialogClassName: PropTypes.string,
/** custom modal-content class added to the content DOM node */
contentClassName: PropTypes.string,
/** base modal class name */
className: PropTypes.string,
/** additional modal styles */
style: PropTypes.object,
/** Children nodes */
children: PropTypes.node
};
CustomModalDialog.defaultProps = {
dialogClassName: '',
contentClassName: '',
className: '',
style: {},
children: null
};
export default bsClass(
'modal',
bsSizes([Size.LARGE, Size.SMALL], CustomModalDialog)
);
|
CustomModalDialog
|
main.rs
|
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32
}
struct Foo(String);
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
|
fn main() {
let rec = Rectangle { width: 30, height: 50 };
println!("rectangle are is {}", rec.area());
let a = Foo(String::from("a"));
let b = Foo(String::from("b"));
let Foo(ref A) = a;
let B = b.0;
}
|
}
}
|
loadtowns.py
|
import os
from django.contrib.gis.utils import LayerMapping
from liveapp.models import Town
town_mapping = {
'town_name': 'Town_Name',
'town_type': 'Town_Type',
'geom': 'MULTIPOINT',
}
town_shp = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/subcounty', 'towns.shp'),)
def run(verbose=True):
|
transform=False, encoding='iso-8859-1',
)
lm.save(strict=True, verbose=verbose)
|
lm = LayerMapping(
Town, town_shp, town_mapping,
|
example_test.go
|
package example_test
import (
"fmt"
"github.com/zzerjae/sejong"
)
func ExampleT()
|
{
sejong.Locale = "en-GB"
message, err := sejong.T("message.welcome", "nickname", "John")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = sejong.T("message.farewell", "nickname", "John", "time", "5")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
sejong.Locale = "ko"
message, err = sejong.T("message.welcome", "nickname", "길동")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
ko, err := sejong.New("ko")
if err != nil {
fmt.Println(err)
return
}
gb, err := sejong.New("en-GB")
if err != nil {
fmt.Println(err)
return
}
message, err = ko.T("message.welcome", "nickname", "길동")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = gb.T("message.welcome", "nickname", "John")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = gb.T("message.friend", "count", "0")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = gb.T("message.friend", "count", "1")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = gb.T("message.friend", "count", "2")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = ko.T("message.friend", "count", "0")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = ko.T("message.friend", "count", "1")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
message, err = ko.T("message.friend", "count", "2")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(message)
// Output:
// Hello, John!
// It's 5 o'clock. Good bye, John!
// 안녕, 길동!
// 안녕, 길동!
// Hello, John!
// I have no friend.
// I have a friend.
// I have 2 friends.
// 저는 친구가 없어요.
// 저는 1명의 친구가 있어요.
// 저는 2명의 친구가 있어요.
}
|
|
providers.go
|
// Copyright 2022 VMware Tanzu Community Edition contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package providers
|
// Provider represents a provider of cluster / node infrastructure
// This is an alpha-grade internal API
type Provider interface {
}
| |
widgets.go
|
package progress
import (
"fmt"
"runtime"
"sort"
"strings"
"time"
"github.com/urionz/goutil/fmtutil"
)
var builtinWidgets = map[string]WidgetFunc{
"elapsed": func(p *Progress) string { // 消耗时间
// fmt.Sprintf("%.3f", time.Since(startTime).Seconds()*1000)
elapsed := time.Since(p.StartedAt()).Seconds()
return fmtutil.HowLongAgo(int64(elapsed))
},
"remaining": func(p *Progress) string { // 剩余时间
step := p.Progress() // current progress
// not set max steps OR current progress is 0
if p.MaxSteps == 0 || step == 0 {
return "unknown"
}
// get elapsed time
elapsed := int64(time.Since(p.StartedAt()).Seconds())
// calc remaining time
remaining := uint(elapsed) / step * (p.MaxSteps - step)
return fmtutil.HowLongAgo(int64(remaining))
},
"estimated": func(p *Progress) string { // 计算总的预计时间
step := p.Progress() // current progress
// not set max steps OR current progress is 0
if p.MaxSteps == 0 || step == 0 {
return "unknown"
}
// get elapsed time
elapsed := int64(time.Since(p.StartedAt()).Seconds())
// calc estimated time
estimated := float32(elapsed) / float32(step) * float32(p.MaxSteps)
return fmtutil.HowLongAgo(int64(estimated))
},
"memory": func(p *Progress) string { // Memory consumption
mem := new(runtime.MemStats)
runtime.ReadMemStats(mem)
return fmtutil.DataSize(mem.Sys)
},
"max": func(p *Progress) string {
return fmt.Sprint(p.MaxSteps)
},
"current": func(p *Progress) string {
step := fmt.Sprint(p.Progress())
width := fmt.Sprint(p.StepWidth)
diff := len(width) - len(step)
if diff <= 0 {
return step
}
return strings.Repeat(" ", diff) + step
},
"percent": func(p *Progress) string {
return fmt.Sprintf("%.1f", p.Percent()*100)
},
}
// DynamicTextWidget dynamic text message widget for progress bar.
// for param messages: int is percent, range is 0 - 100. value is message string.
// Usage please example.
func DynamicTextWidget(messages map[int]string) WidgetFunc {
var numbers []int
for val := range messages {
numbers = append(numbers, val)
}
// sort
sort.Ints(numbers)
return func(p *Progress) string {
percent := int(p.Percent() * 100)
for _, val := range numbers {
if percent <= val {
return messages[val]
}
}
return " Handling ..." // Should never happen
}
}
// LoadingWidget create a loading progress widget
func LoadingWidget(chars []rune) WidgetFunc {
builder := loadingCharBuilder
|
und-trip widget for progress bar.
//
// Output like `[ ==== ]`
func RoundTripWidget(char rune, charNum, boxWidth int) WidgetFunc {
builder := roundTripTextBuilder(char, charNum, boxWidth)
return func(_ *Progress) string {
return builder()
}
}
// BarWidget create a progress bar widget.
//
// Output like `[==============>-------------]`
func BarWidget(width int, cs BarChars) WidgetFunc {
if width < 1 {
width = BarWidth
}
if cs.Completed == 0 {
cs.Completed = CharWell
}
return func(p *Progress) string {
var completeLen float32
if p.MaxSteps > 0 { // MaxSteps is valid
completeLen = p.percent * float32(width)
} else { // not set MaxSteps
completeLen = float32(p.step % uint(width))
}
bar := string(repeatRune(cs.Completed, int(completeLen)))
if diff := width - int(completeLen); diff > 0 {
bar += string(cs.Processing) + string(repeatRune(cs.Remaining, diff-1))
}
return bar
}
}
func loadingCharBuilder(chars []rune) func() string {
if len(chars) == 0 {
chars = RandomCharsTheme()
}
index := 0
length := len(chars)
return func() string {
char := string(chars[index])
if index+1 == length { // reset
index = 0
} else {
index++
}
return char
}
}
func roundTripTextBuilder(char rune, charNum, boxWidth int) func() string {
if char == 0 {
char = CharEqual
}
if charNum < 1 {
charNum = 4
}
if boxWidth < 1 {
boxWidth = 12
}
cursor := string(repeatRune(char, charNum))
// control direction. False: -> True: <->
direction := false
// record cursor position
position := 0
return func() string {
var bar string
if position > 0 {
bar += strings.Repeat(" ", position)
}
bar += cursor + strings.Repeat(" ", boxWidth-position-charNum)
if direction { // left <-
if position <= 0 { // begin ->
direction = false
} else {
position--
}
} else { // -> right
if position+charNum >= boxWidth { // begin <-
direction = true
} else {
position++
}
}
return bar
}
}
|
(chars)
return func(_ *Progress) string {
return builder()
}
}
// RoundTripWidget create a ro
|
config_v1.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_radarcfg_v1.ipynb (unless otherwise specified).
__all__ = ['logger', 'read_radar_params', 'parse_commands', 'dict_to_list', 'channelStr_to_dict', 'profileStr_to_dict',
'chirp_to_dict', 'power_to_dict', 'frameStr_to_dict', 'adcStr_to_dict', 'command_handlers']
# Cell
import logging
logger = logging.getLogger()
# Cell
def read_radar_params(filename):
"""Reads a text file containing serial commands and returns parsed config as a dictionary"""
with open(filename) as cfg:
iwr_cmds = cfg.readlines()
iwr_cmds = [x.strip() for x in iwr_cmds]
radar_cfg = parse_commands(iwr_cmds)
logger.debug(radar_cfg)
return radar_cfg
def parse_commands(commands):
"""Calls the corresponding parser for each command in commands list"""
cfg = None
for line in commands:
try:
cmd = line.split()[0]
args = line.split()[1:]
cfg = command_handlers[cmd](args, cfg)
except KeyError:
logger.debug(f'{cmd} is not handled')
except IndexError:
logger.debug(f'line is empty "{line}"')
return cfg
def dict_to_list(cfg):
"""Generates commands from config dictionary"""
cfg_list = ['flushCfg','dfeDataOutputMode 1']
# rx antennas/lanes for channel config
rx_bool = [cfg['rx4'], cfg['rx3'], cfg['rx2'], cfg['rx1']]
rx_mask = sum(2 ** i for i, v in enumerate(reversed(rx_bool)) if v)
# number of tx antennas for channel config
tx_bool = [cfg['tx3'], cfg['tx2'], cfg['tx1']]
tx_mask = sum(2 ** i for i, v in enumerate(reversed(tx_bool)) if v)
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) is False else 0
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is False and (cfg['tx1'] or cfg['tx3']) is True else 0
#print('[NOTE] Elevation and Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) else 0
cfg_list.append('channelCfg %s %s 0' % (rx_mask, tx_mask)) # rx and tx mask
# adc config
if cfg['isComplex'] and cfg['image_band']:
outputFmt = 2
#print('[NOTE] Complex 2x mode, both Imaginary and Real IF spectrum is filtered and sent to ADC, so\n'
# ' if Sampling rate is X, ADC data would include frequency spectrum from -X/2 to X/2.')
elif cfg['isComplex'] and not cfg['image_band'] == True:
outputFmt = 1
#print('[NOTE] Complex 1x mode, Only Real IF Spectrum is filtered and sent to ADC, so if Sampling rate\n'
# ' is X, ADC data would include frequency spectrum from 0 to X.')
else: raise ValueError("Real Data Type Not Supported")
cfg_list.append('adcCfg 2 %s' % outputFmt) # 16 bits (mandatory), complex 1x or 2x
# adc power
if cfg['adcPower'] =='low':
power_mode = 1
#print('[NOTE] The Low power ADC mode limits the sampling rate to half the max value.')
elif cfg['adcPower'] =='regular': power_mode = 0
else: raise ValueError("ADC power level Not Supported")
cfg_list.append('lowPower 0 %s' % power_mode) # power mode
# profile configs
for profile_ii in cfg['profiles']:
cfg_list.append('profileCfg %s %s %s %s %s %s %s %s %s %s %s %s %s %s'
% (profile_ii['id'],
float(profile_ii['start_frequency']/1e9),
float(profile_ii['idle']/1e-6),
float(profile_ii['adcStartTime']/1e-6),
float(profile_ii['rampEndTime']/1e-6),
int(profile_ii['txPower']),
int(profile_ii['txPhaseShift']),
float(profile_ii['freqSlopeConst']/1e12),
float(profile_ii['txStartTime']/1e-6),
int(profile_ii['adcSamples']),
int(profile_ii['adcSampleRate']/1e3),
int(profile_ii['hpfCornerFreq1']),
int(profile_ii['hpfCornerFreq2']),
int(profile_ii['rxGain'])))
# chirp configs
for chirp_ii in cfg['chirps']:
# Check if chirp is referring to valid profile config
profile_valid = False
for profile_ii in cfg['profiles']:
if chirp_ii['profileID'] == profile_ii['id']: profile_valid = True
if profile_valid is False: raise ValueError("The following profile id used in chirp "
"is invalid: %i" % chirp_ii['profileID'])
###############################################################################################################
'''
# check if tx values are valid
if hamming([chirp_ii['chirptx3'],chirp_ii['chirptx2'],chirp_ii['chirptx1']],
[cfg['tx3'], cfg['tx2'], cfg['tx1']])*3 > 1:
raise ValueError("Chirp should have at most one different Tx than channel cfg")
'''
###############################################################################################################
if chirp_ii['chirpStartIndex'] > chirp_ii['chirpStopIndex']: raise ValueError("Particular chirp start index after chirp stop index")
tx_bool = [chirp_ii['chirptx3'],chirp_ii['chirptx2'],chirp_ii['chirptx1']]
tx_mask = sum(2 ** i for i, v in enumerate(reversed(tx_bool)) if v)
cfg_list.append('chirpCfg %s %s %s %s %s %s %s %s'
% (chirp_ii['chirpStartIndex'],
chirp_ii['chirpStopIndex'],
chirp_ii['profileID'],
chirp_ii['startFreqVariation'],
chirp_ii['slopeVariation'],
chirp_ii['idleVariation'],
chirp_ii['adcStartVariation'],
tx_mask))
# frame config
chirpStop = 0
chirpStart = 511 # max value for chirp start index
for chirp_ii in cfg['chirps']:
chirpStop = max(chirpStop, chirp_ii['chirpStopIndex'])
chirpStart = min(chirpStart,chirp_ii['chirpStartIndex'])
chirps_len = chirpStop + 1
numLoops = cfg['numChirps']/chirps_len
if chirpStart > chirpStop: raise ValueError("Chirp(s) start index is after chirp stop index")
if numLoops % 1 != 0: raise ValueError("Number of loops is not integer")
if numLoops > 255 or numLoops < 1: raise ValueError("Number of loops must be int in [1,255]")
numFrames = cfg['numFrames'] if 'numFrames' in cfg.keys() else 0 # if zero => inf
cfg_list.append('frameCfg %s %s %s %s %s 1 0'
% (chirpStart, chirpStop, int(numLoops), numFrames, 1000/cfg['fps']))
cfg_list.append('testFmkCfg 0 0 0 1')
cfg_list.append('setProfileCfg disable ADC disable')
return cfg_list
def channelStr_to_dict(args, curr_cfg=None):
"""Handler for `channelcfg`"""
if curr_cfg:
cfg = curr_cfg
else:
cfg = {}
# This is the number of receivers which is equivalent to the number of lanes in the source code
# Later, may include the result from the number of transmitters
rx_bin = bin(int(args[0]))[2:].zfill(4)
cfg['numLanes'] = len([ones for ones in rx_bin if ones == '1'])
(cfg['rx4'],cfg['rx3'],cfg['rx2'],cfg['rx1']) = [bool(int(ones)) for ones in rx_bin]
# This is the number of transmitters
tx_bin = bin(int(args[1]))[2:].zfill(3)
cfg['numTx'] = len([ones for ones in tx_bin if ones == '1'])
(cfg['tx3'], cfg['tx2'], cfg['tx1']) = [bool(int(ones)) for ones in tx_bin]
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) is False else 0
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is False and (cfg['tx1'] or cfg['tx3']) is True else 0
#print('[NOTE] Elevation and Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) else 0
return cfg
def profileStr_to_dict(args, curr_cfg=None):
"""Handler for `profileCfg`"""
normalizer = [None, 1e9, 1e-6, 1e-6, 1e-6, None, None, 1e12, 1e-6, None, 1e3, None, None, None]
dtype = [int, float, float, float, float, float, float, float, float, int, float, int, int, float]
keys = ['id',
'start_frequency',
'idle',
'adcStartTime',
'rampEndTime',
'txPower',
'txPhaseShift',
'freqSlopeConst',
'txStartTime',
'adcSamples',
'adcSampleRate',
'hpfCornerFreq1',
'hpfCornerFreq2',
'rxGain',
]
# Check if the main dictionary exists
if curr_cfg:
cfg = curr_cfg
if 'profiles' not in cfg.keys():
cfg['profiles']=[]
else:
cfg = {'profiles': []}
profile_dict = {}
for k, v, n, d in zip(keys, args, normalizer, dtype):
profile_dict[k] = d(float(v) * n if n else v)
cfg['profiles'].append(profile_dict)
return cfg
def chirp_to_dict(args,curr_cfg=None):
"""Handler for `chirpCfg`"""
if curr_cfg:
cfg = curr_cfg
if 'chirps' not in cfg.keys():
cfg['chirps'] = []
else:
cfg = {'chirps': []}
chirp_dict = {}
chirp_dict['chirpStartIndex'] = int(args[0])
chirp_dict['chirpStopIndex'] = int(args[1])
chirp_dict['profileID'] = int(args[2])
chirp_dict['startFreqVariation'] = float(args[3])
chirp_dict['slopeVariation'] = float(args[4])
chirp_dict['idleVariation'] = float(args[5])
chirp_dict['adcStartVariation'] = float(args[6])
tx_bin = bin(int(args[7]))[2:].zfill(3)
(chirp_dict['chirptx3'], chirp_dict['chirptx2'], chirp_dict['chirptx1']) = [bool(int(ones)) for ones in tx_bin]
cfg['chirps'].append(chirp_dict)
return cfg
def power_to_dict(args,curr_cfg=None):
"""handler for `lowPower`"""
if curr_cfg:
cfg = curr_cfg
else:
cfg = {}
if int(args[1]) ==1:
cfg['adcPower'] = 'low'
#print('[NOTE] The Low power ADC mode limits the sampling rate to half the max value.')
elif int(args[1]) ==0:
cfg['adcPower'] = 'regular'
else:
raise ValueError ("Invalid Power Level")
return cfg
def
|
(args, cfg):
"""Handler for `frameCfg`"""
# Number of chirps
if 'chirps' not in cfg.keys():
raise ValueError("Need to define chirps before frame")
chirpStop =0
for ii in range(len(cfg['chirps'])):
chirpStop = max(chirpStop,cfg['chirps'][ii]['chirpStopIndex'])
chirps_len = chirpStop + 1
cfg['numChirps'] = int(args[2]) * chirps_len # num loops * len(chirps)
if int(args[3]) != 0: cfg['numFrames'] = int(args[3])
# args[4] is the time in milliseconds of each frame
cfg['fps'] = 1000/float(args[4])
return cfg
def adcStr_to_dict(args, curr_cfg=None):
"""Handler for `adcCfg`"""
if curr_cfg:
cfg = curr_cfg
else:
cfg = {}
if int(args[1]) == 1:
cfg['isComplex'] = True
cfg['image_band'] = False
#print('[NOTE] Complex 1x mode, Only Real IF Spectrum is filtered and sent to ADC, so if Sampling rate\n'
# ' is X, ADC data would include frequency spectrum from 0 to X.')
elif int(args[1]) == 2:
cfg['isComplex'] = True
cfg['image_band'] = True
#print('[NOTE] Complex 2x mode, both Imaginary and Real IF spectrum is filtered and sent to ADC, so\n'
# ' if Sampling rate is X, ADC data would include frequency spectrum from -X/2 to X/2.')
else:
raise ValueError("Real Data Type Not Supported")
return cfg
#Mapping of serial command to command handler
command_handlers = {
'channelCfg': channelStr_to_dict,
'profileCfg': profileStr_to_dict,
'chirpCfg': chirp_to_dict,
'frameCfg': frameStr_to_dict,
'adcCfg': adcStr_to_dict,
'lowPower': power_to_dict,
}
|
frameStr_to_dict
|
main.go
|
package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"regexp"
"time"
"crypto/rand"
"github.com/puppetlabs/kreamlet/bootstrap/kubelet"
pb "github.com/puppetlabs/kreamlet/bootstrap/messaging"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
const (
port = ":50091"
pathToCredsFile = "/etc/kubernetes/admin.conf"
)
func main() {
//in a background go routine start a tcp listener for grpc connections
go startListening()
// Time to wait for the kubelet container to start
time.Sleep(5 * time.Second)
taskRoot := random()
runCmd(taskRoot, []string{"ls"}, true)
err := initKubeAdm(taskRoot)
if err != nil {
//if initialising kube admin doesn't succeed, there is nothing we can do here, so just exit
log.Fatalf("kube admin initialisation failed %+v", err)
}
//for now keep the main thread alive whilst we wait for a tcp connection
//(we should be make a listener channel and waiting for it to complete?)
for true {
fmt.Println("Listening.....")
time.Sleep(time.Minute)
}
}
func runCmd(taskRoot string, cmd []string, captureOutput bool)
|
func initKubeAdm(taskRoot string) error {
var output string
var err error
output, err = kubelet.Run("services.linuxkit", nextExecID(taskRoot), "kubelet", []string{"kubeadm-init.sh"}, true)
if err != nil {
fmt.Printf("initKubeAdm::Error occured running kubeadm-init.sh - %v", err)
os.Exit(1)
}
fmt.Printf("initKubeAdm::output is [%v]\n\n\n", output)
return err
}
func getJoinToken(taskRoot string) (string, error) {
var output, joinToken string
var err error
output, err = kubelet.Run("services.linuxkit", nextExecID(taskRoot), "kubelet", []string{"kubeadm", "token", "create"}, true)
fmt.Printf("getJoinToken::the output is [%v] err is %v\n", output, err)
if err == nil {
joinToken, err = extractJoinToken(output)
fmt.Printf("getJoinToken::jt is [%v] err is %v\n", joinToken, err)
}
fmt.Printf("getJoinToken::returning: \n output [%v], \n jt [%v], \n err [%v].", output, joinToken, err)
return joinToken, err
}
// server is used to implement AdminCredsServer
type server struct{}
// GetJoinToken implements AdminCredsServer.GetJoinToken
func (s *server) GetJoinToken(ctx context.Context, in *pb.JoinTokenRequest) (*pb.JoinTokenResponse, error) {
jt, err := getJoinToken(nextExecID(random()))
r := &pb.JoinTokenResponse{}
r.JoinToken = jt
return r, err
}
// GetAdminCreds implements AdminCredsServer.GetAdminCreds
func (s *server) GetAdminCreds(ctx context.Context, in *pb.AdminCredsRequest) (*pb.AdminCredsResponse, error) {
r := &pb.AdminCredsResponse{}
content, err := ioutil.ReadFile(pathToCredsFile)
if err == nil {
r.Content = content
r.StatusCode = pb.StatusCode_Ok
} else {
r.StatusCode = pb.StatusCode_Failed
r.Message = err.Error()
}
fmt.Printf("GetAdminCreds::Returning %v and err of %v\n", r, err)
return r, err
}
func startListening() {
fmt.Printf("startListening::Entered startListening about to listen on port %v\n", port)
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("startListening::failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterAdminCredsServer(s, &server{})
reflection.Register(s)
fmt.Printf("startListening::About to listen on port %v\n", port)
if err := s.Serve(lis); err != nil {
log.Fatalf("startListening::failed to serve: %v", err)
}
fmt.Printf("startListening::Listening on port %v\n", port)
}
func extractJoinToken(output string) (string, error) {
re := regexp.MustCompile(`\r?\n`)
return re.ReplaceAllString(output, ""), nil
}
func random() string {
n := 3
b := make([]byte, n)
if _, err := rand.Read(b); err != nil {
panic(err)
}
return fmt.Sprintf("%X", b)
}
func nextExecID(taskRoot string) string {
execIDCounter = execIDCounter + 1
return fmt.Sprintf("%v_%v", taskRoot, execIDCounter)
}
var execIDCounter = 0
|
{
output, err := kubelet.Run("services.linuxkit", nextExecID(taskRoot), "kubelet", cmd, captureOutput)
if err != nil {
fmt.Printf("runCmd::Ran: %v with captureOutput [%v] and output: %v\nErr: %v\n\n\n\n\n", cmd, captureOutput, output, err)
}
}
|
simulate.py
|
# imports
import numpy as np
import os, sys, pickle
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(file_dir)
# custom import
from theory import depth
from viz import get_colours
from numpy_simulation import *
from utils import load_experiment
from theory import critical_point
def perform_experiment(experiments):
for i, experiment in enumerate(experiments):
dist = experiment['dist']
noise = experiment['noise']
act = experiment['act']
init = experiment['init']
# run simulations for scenario
noisy_signal_prop_simulations(dist, noise, act, init, seed=i)
def variance():
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"underflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"overflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"underflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"overflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def correlation():
# Compute experimental data
experiments = [
{"dist": "none", "noise": (None, None), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 2), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def fixed_point():
# Compute experimental data
|
if __name__ == "__main__":
# results directory
results_dir = os.path.join(file_dir, "../results")
# variance()
# correlation()
fixed_point()
|
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.1), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.2), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.3), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.4), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.5), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.7), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.9), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.1), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.4), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.55), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.7), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.85), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.0), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.15), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.3), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.45), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.75), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.9), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
|
8106.go
|
package main
import (
"command"
"context"
"errors"
"flag"
"fmt"
"github.com/fatih/color"
"log"
"net"
"regexp"
"rut"
"strconv"
"strings"
)
const (
IPV4 = 1
IPV6 = 2
)
type Threshold struct {
MaxNexthopIndex int64
MaxIPv4ALPMIndex int64
MaxIPv6ALPM64Index int64
MaxIPv6ALPM128Index int64
}
var threshold = Threshold{
MaxNexthopIndex: 49152,
MaxIPv4ALPMIndex: 393216,
MaxIPv6ALPM64Index: 262144,
MaxIPv6ALPM128Index: 131072,
}
type ALPMBucket struct {
}
type RouteEntry struct {
Index int64
Valid string
RPE string
ReservedECMPPTR string
Reserved0 string
PRI string
NextHopIndex int64
Length int64
Key net.IP
Hit bool
EvenParity string
EntryOnly string
ECMPPTR int64
ECMP bool
DstDiscard bool
SrcDiscard bool
DefaultRoute bool
Data string
Class_ID string
NH Nexthop
EG ECMPGroup
AF int
IsValid bool
}
type HostEntry struct {
Index int64
Valid string
RPE string
PRI string
NextHopIndex int64
IP net.IP
Hit bool
Length int64
ECMPPTR int64
ECMP bool
DstDiscard bool
Data string
KeyType int64
NH Nexthop
EG ECMPGroup
AF int
IsValid bool
}
var Dev *rut.RUT
type IF struct {
Index int64
Vid int64
MAC string
}
type Nexthop struct {
Index int64
OIF IF
DstPort int64
DstMac string
Drop bool
CopyToCPU bool
MTU int64
TGID int64
VLAN int64
}
type ECMPGroup struct {
Index int64
MemberCount int64
Member []*Nexthop
ECMPBasePTR int64
}
var CTX = context.Background()
func (eg *ECMPGroup) ParseNexthop() {
nhs := make([]*Nexthop, 0, eg.MemberCount)
var i int64
for i = 0; i < eg.MemberCount; i++ {
nh, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_ECMP %d %d", eg.ECMPBasePTR+i, eg.ECMPBasePTR+i),
})
if err != nil {
panic(err)
}
if res, err := match(nh, getNextHopIndex); err != nil {
panic(err)
} else {
nhi, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
} else {
nh, err := ParseNexthopByIndex(nhi)
if err != nil {
panic(err)
}
nhs = append(nhs, nh)
}
}
}
eg.Member = nhs
}
func DumpECMPGroupByIndex(index int64) (ECMPGroup, error) {
var EG = ECMPGroup{}
ecmpg, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_ECMP_GROUP %d %d", index, index),
})
if err != nil {
panic(err)
}
if res, err := match(ecmpg, getECMPMemberCount); err != nil {
panic(err)
} else {
mcount, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
EG.MemberCount = mcount + 1
}
if res, err := match(ecmpg, getECMPBasePTR); err != nil {
panic(err)
} else {
base, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
EG.ECMPBasePTR = base
}
EG.ParseNexthop()
return EG, nil
}
func ParseNexthopByIndex(index int64) (*Nexthop, error) {
if index < 0 || index > threshold.MaxNexthopIndex {
panic("Invalid nexthop index")
}
nh := &Nexthop{
Index: index,
}
nh.GetINGNexthopInfo()
nh.GetEGRNexthopInfo()
nh.ParseOIF()
return nh, nil
}
func (nh *Nexthop) GetINGNexthopInfo() {
inh, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 ING_L3_NEXT_HOP %d %d", nh.Index, nh.Index),
})
if err != nil {
panic(err)
}
if res, err := match(inh, getINGNexthopCopyToCPU); err != nil {
panic(err)
} else {
if res == "0" {
nh.CopyToCPU = false
} else {
nh.CopyToCPU = true
}
}
if res, err := match(inh, getINGNexthopDrop); err != nil {
panic(err)
} else {
if res == "0" {
nh.Drop = false
} else {
nh.Drop = true
}
}
if res, err := match(inh, getINGNexthopTGID); err != nil {
panic(err)
} else {
tgid, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
nh.TGID = tgid
}
}
if res, err := match(inh, getINGNexthopPNUM); err != nil {
panic(err)
} else {
pnum, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
nh.DstPort = pnum
}
}
if res, err := match(inh, getINGNexthopVLAN); err != nil {
panic(err)
} else {
vlan, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
nh.VLAN = vlan
}
}
/*
if res, err := match(inh, getINGNexthopOIF); err != nil {
panic(err)
} else {
oif, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
nh.OIF = oif
}
}
*/
//fmt.Println(inh, err)
}
func (nh *Nexthop) GetEGRNexthopInfo() {
enh, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 EGR_L3_NEXT_HOP %d %d", nh.Index, nh.Index),
})
if err != nil {
panic(err)
}
if res, err := match(enh, getEGRNexthopMACAddress); err != nil {
panic(err)
} else {
nh.DstMac = FixMACAddress(res)
}
if res, err := match(enh, getEGRIfNum); err != nil {
panic(err)
} else {
index, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
}
nh.OIF.Index = index
}
}
func (re *RouteEntry) String() string {
var Out *color.Color
if re.Hit {
Out = color.New(color.FgGreen)
} else {
Out = color.New(color.FgWhite)
}
if re.AF == IPV4 {
if re.Length > 32 || re.NextHopIndex > threshold.MaxNexthopIndex {
if re.Length > 32 {
return Out.Sprintf("[%6d]: %39s/%-3d >> %20s", re.Index, re.Key, re.Length, "is not a valid IPv4 Address")
} else {
return Out.Sprintf("[%6d]: %39s/%-3d >> has a invalid nexthop index: %d", re.Index, re.Key, re.Length, re.NextHopIndex)
}
} else if !re.ECMP {
return Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> NH[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", re.Index, re.Key, re.Length, re.DstDiscard, re.SrcDiscard, re.NextHopIndex, re.NH.OIF.MAC, re.NH.DstMac, re.NH.OIF.Index, re.NH.OIF.Vid, re.NH.DstPort)
} else {
base := Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> is ECMP Route, ECMP_PTR: %5d, BASE_PTR: %5d, MemberCount: %2d", re.Index, re.Key, re.Length, re.DstDiscard, re.SrcDiscard, re.ECMPPTR, re.EG.ECMPBasePTR, re.EG.MemberCount)
for i := 0; i < len(re.EG.Member); i++ {
base += "\n"
base += Out.Sprintf("%73s[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", "NH", re.EG.Member[i].Index, re.EG.Member[i].OIF.MAC, re.EG.Member[i].DstMac, re.EG.Member[i].OIF.Index, re.EG.Member[i].OIF.Vid, re.EG.Member[i].DstPort)
}
return base
}
} else if re.AF == IPV6 {
if re.Length > 128 || re.NextHopIndex > threshold.MaxNexthopIndex {
if re.Length > 128 {
return Out.Sprintf("[%6d]: %39s/%-3d >> %20s", re.Index, re.Key, re.Length, "is not a valid IPv6 Address")
} else {
return Out.Sprintf("[%6d]: %39s/%-3d >> has a invalid nexthop index: %d", re.Index, re.Key, re.Length, re.NextHopIndex)
}
} else if !re.ECMP {
return Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> NH[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", re.Index, re.Key, re.Length, re.DstDiscard, re.SrcDiscard, re.NextHopIndex, re.NH.OIF.MAC, re.NH.DstMac, re.NH.OIF.Index, re.NH.OIF.Vid, re.NH.DstPort)
} else {
base := Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> is ECMP Route, ECMP_PTR: %5d, BASE_PTR: %5d, MemberCount: %2d", re.Index, re.Key, re.Length, re.DstDiscard, re.SrcDiscard, re.ECMPPTR, re.EG.ECMPBasePTR, re.EG.MemberCount)
for i := 0; i < len(re.EG.Member); i++ {
base += "\n"
base += Out.Sprintf("%73s[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", "NH", re.EG.Member[i].Index, re.EG.Member[i].OIF.MAC, re.EG.Member[i].DstMac, re.EG.Member[i].OIF.Index, re.EG.Member[i].OIF.Vid, re.EG.Member[i].DstPort)
}
return base
}
}
return fmt.Sprintf("Invalid route entry: %s/%d\n", re.Key, re.Length)
}
func (he *HostEntry) String() string {
var Out *color.Color
if he.Hit {
Out = color.New(color.FgGreen)
} else {
Out = color.New(color.FgWhite)
}
if he.AF == IPV4 && he.KeyType == 0 {
if he.NextHopIndex > threshold.MaxNexthopIndex {
return Out.Sprintf("[%6d]: %39s/%-3d >> has a invalid nexthop index: %d", he.Index, he.IP, he.Length, he.NextHopIndex)
} else if !he.ECMP {
return Out.Sprintf("[%6d]: %39s/%-3d (%5t|%2d) >> NH[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", he.Index, he.IP, he.Length, he.DstDiscard, he.KeyType, he.NextHopIndex, he.NH.OIF.MAC, he.NH.DstMac, he.NH.OIF.Index, he.NH.OIF.Vid, he.NH.DstPort)
} else {
base := Out.Sprintf("[%6d]: %39s/%-3d (%5t|%2d) >> is ECMP Route, ECMP_PTR: %5d, BASE_PTR: %5d, MemberCount: %2d", he.Index, he.IP, he.Length, he.DstDiscard, he.KeyType, he.ECMPPTR, he.EG.ECMPBasePTR, he.EG.MemberCount)
for i := 0; i < len(he.EG.Member); i++ {
base += "\n"
base += Out.Sprintf("%73s[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", "NH", he.EG.Member[i].Index, he.EG.Member[i].OIF.MAC, he.EG.Member[i].DstMac, he.EG.Member[i].OIF.Index, he.EG.Member[i].OIF.Vid, he.EG.Member[i].DstPort)
}
return base
}
} else if he.AF == IPV6 && he.KeyType == 2 {
if he.NextHopIndex > threshold.MaxNexthopIndex {
return Out.Sprintf("[%6d]: %39s/%-3d >> has a invalid nexthop index: %d", he.Index, he.IP, he.Length, he.NextHopIndex)
} else if !he.ECMP {
return Out.Sprintf("[%6d]: %39s/%-3d (%5t|%2d) >> NH[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", he.Index, he.IP, he.Length, he.DstDiscard, he.KeyType, he.NextHopIndex, he.NH.OIF.MAC, he.NH.DstMac, he.NH.OIF.Index, he.NH.OIF.Vid, he.NH.DstPort)
} else {
base := Out.Sprintf("[%6d]: %39s/%-3d (%5t|%2d) >> is ECMP Route, ECMP_PTR: %5d, BASE_PTR: %5d, MemberCount: %2d", he.Index, he.IP, he.Length, he.DstDiscard, he.KeyType, he.ECMPPTR, he.EG.ECMPBasePTR, he.EG.MemberCount)
for i := 0; i < len(he.EG.Member); i++ {
base += "\n"
base += Out.Sprintf("%73s[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", "NH", he.EG.Member[i].Index, he.EG.Member[i].OIF.MAC, he.EG.Member[i].DstMac, he.EG.Member[i].OIF.Index, he.EG.Member[i].OIF.Vid, he.EG.Member[i].DstPort)
}
return base
}
}
//Currently just skip the invalid entry.
return ""
}
var getEntryIndex = regexp.MustCompile(`[[:word:]_]+\.\*\[(?P<index>[0-9]+)\]`)
var getValidBit = regexp.MustCompile(`VALID=(?P<valid>[0-9]+)`)
var getNextHopIndex = regexp.MustCompile(`,NEXT_HOP_INDEX=(?P<nhi>[0x]?[[:alnum:]]+)`)
var getLength = regexp.MustCompile(`,LENGTH=(?P<len>[0x]?[[:alnum:]]+)`)
var getKey = regexp.MustCompile(`,KEY=(?P<key>[0x]?[[:alnum:]]+)`)
var getHitBit = regexp.MustCompile(`,HIT=(?P<hit>[0-9]+)`)
var getECMPPTR = regexp.MustCompile(`,ECMP_PTR=(?P<ecmpptr>[0x]?[[:alnum:]]+)`)
var getECMP = regexp.MustCompile(`,ECMP=(?P<ecmpptr>[0x]?[[:alnum:]]+)`)
var getSrcDiscard = regexp.MustCompile(`,SRC_DISCARD=(?P<srcdis>[0-9]+)`)
var getDstDiscard = regexp.MustCompile(`,DST_DISCARD=(?P<dstdis>[0-9]+)`)
var getDefaultRoute = regexp.MustCompile(`,DEFAULTROUTE=(?P<default>[0-9]+)`)
//ING_L3_NEXT_HOP
var getEGRNexthopMACAddress = regexp.MustCompile(`L3:MAC_ADDRESS=(?P<nmac>[0x]?[[:alnum:]]+)`)
var getEGRIfNum = regexp.MustCompile(`L3:INTF_NUM=(?P<ifn>[0x]?[[:alnum:]]+)`)
var getEGRIVID = regexp.MustCompile(`L3:IVID=(?P<ivid>[0x]?[[:alnum:]]+)`)
var getEGROVID = regexp.MustCompile(`L3:OVID=(?P<ovid>[0x]?[[:alnum:]]+)`)
var getEGRNexthopEntryType = regexp.MustCompile(`,ENTRY_TYPE=(?P<type>[0x]?[[:alnum:]]+)`)
//EGR_L3_NEXT_HOP
var getINGNexthopCopyToCPU = regexp.MustCompile(`COPY_TO_CPU=(?P<ctc>[0x]?[[:alnum:]]+)`)
var getINGNexthopDrop = regexp.MustCompile(`DROP=(?P<ctc>[0x]?[[:alnum:]]+)`)
var getINGNexthopTGID = regexp.MustCompile(`TGID=(?P<tgid>[0x]?[[:alnum:]]+)`)
var getINGNexthopPNUM = regexp.MustCompile(`,PORT_NUM=(?P<pnum>[0x]?[[:alnum:]]+)`)
var getINGNexthopVLAN = regexp.MustCompile(`VLAN_ID=(?P<vid>[0x]?[[:alnum:]]+)`)
var getINGNexthopOIF = regexp.MustCompile(`,L3_OIF=(?P<oif>[0x]?[[:alnum:]]+)`)
var getINGNexthopEntryType = regexp.MustCompile(`,ENTRY_TYPE=(?P<type>[0x]?[[:alnum:]]+)`)
var getINGNexthopMTU = regexp.MustCompile(`MTU_SIZE=(?P<mtu>[0x]?[[:alnum:]]+)`)
func (re *RouteEntry) Validate() {
if re.AF == IPV4 {
if re.Index >= threshold.MaxIPv4ALPMIndex {
re.IsValid = false
}
}
if re.AF == IPV6 {
if re.Index >= threshold.MaxIPv6ALPM64Index {
re.IsValid = false
}
}
if re.NextHopIndex >= threshold.MaxNexthopIndex {
re.IsValid = false
}
re.IsValid = true
}
func (re *RouteEntry) ParseINGNexthopInfo() {
inh, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 ING_L3_NEXT_HOP %d %d", re.NextHopIndex, re.NextHopIndex),
})
if err != nil {
panic(err)
}
if res, err := match(inh, getINGNexthopCopyToCPU); err != nil {
panic(err)
} else {
if res == "0" {
re.NH.CopyToCPU = false
} else {
re.NH.CopyToCPU = true
}
}
if res, err := match(inh, getINGNexthopDrop); err != nil {
panic(err)
} else {
if res == "0" {
re.NH.Drop = false
} else {
re.NH.Drop = true
}
}
if res, err := match(inh, getINGNexthopTGID); err != nil {
panic(err)
} else {
tgid, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
re.NH.TGID = tgid
}
}
if res, err := match(inh, getINGNexthopPNUM); err != nil {
panic(err)
} else {
pnum, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
re.NH.DstPort = pnum
}
}
if res, err := match(inh, getINGNexthopVLAN); err != nil {
panic(err)
} else {
vlan, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
re.NH.VLAN = vlan
}
}
/*
if res, err := match(inh, getINGNexthopOIF); err != nil {
panic(err)
} else {
oif, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
re.NH.OIF = oif
}
}
*/
//fmt.Println(inh, err)
}
func (he *HostEntry) ParseINGNexthopInfo() {
inh, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 ING_L3_NEXT_HOP %d %d", he.NextHopIndex, he.NextHopIndex),
})
if err != nil {
panic(err)
}
if res, err := match(inh, getINGNexthopCopyToCPU); err != nil {
panic(err)
} else {
if res == "0" {
he.NH.CopyToCPU = false
} else {
he.NH.CopyToCPU = true
}
}
if res, err := match(inh, getINGNexthopDrop); err != nil {
panic(err)
} else {
if res == "0" {
he.NH.Drop = false
} else {
he.NH.Drop = true
}
}
if res, err := match(inh, getINGNexthopTGID); err != nil {
panic(err)
} else {
tgid, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
he.NH.TGID = tgid
}
}
if res, err := match(inh, getINGNexthopPNUM); err != nil {
panic(err)
} else {
pnum, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
he.NH.DstPort = pnum
}
}
if res, err := match(inh, getINGNexthopVLAN); err != nil {
panic(err)
} else {
vlan, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
he.NH.VLAN = vlan
}
}
/*
if res, err := match(inh, getINGNexthopOIF); err != nil {
panic(err)
} else {
oif, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
re.NH.OIF = oif
}
}
*/
//fmt.Println(inh, err)
}
func (re *RouteEntry) ParseEgrNexthopInfo() {
enh, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 EGR_L3_NEXT_HOP %d %d", re.NextHopIndex, re.NextHopIndex),
})
if err != nil {
panic(err)
}
if res, err := match(enh, getEGRNexthopMACAddress); err != nil {
panic(err)
} else {
re.NH.DstMac = FixMACAddress(res)
}
if res, err := match(enh, getEGRIfNum); err != nil
|
else {
index, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
}
re.NH.OIF.Index = index
}
//fmt.Println(enh, err)
}
func (he *HostEntry) ParseEgrNexthopInfo() {
enh, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 EGR_L3_NEXT_HOP %d %d", he.NextHopIndex, he.NextHopIndex),
})
if err != nil {
panic(err)
}
if res, err := match(enh, getEGRNexthopMACAddress); err != nil {
panic(err)
} else {
he.NH.DstMac = FixMACAddress(res)
}
if res, err := match(enh, getEGRIfNum); err != nil {
panic(err)
} else {
index, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
}
he.NH.OIF.Index = index
}
//fmt.Println(enh, err)
}
var getOIFMACAddress = regexp.MustCompile(`,MAC_ADDRESS=(?P<nmac>[0x]?[[:alnum:]]+)`)
var getOIFVID = regexp.MustCompile(`,VID=(?P<vid>[0x]?[[:alnum:]]+)`)
func (nh *Nexthop) ParseOIF() {
oif, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 EGR_L3_INTF %d, %d", nh.OIF.Index, nh.OIF.Index),
})
if err != nil {
panic(err)
}
if res, err := match(oif, getOIFMACAddress); err != nil {
panic(err)
} else {
nh.OIF.MAC = FixMACAddress(res)
}
if res, err := match(oif, getOIFVID); err != nil {
panic(err)
} else {
vlan, err := strconv.ParseInt(res, 0, 32)
if err != nil {
panic(err)
} else {
nh.OIF.Vid = vlan
}
}
}
func (re *RouteEntry) ParseNexthopInfo() {
if re.NextHopIndex >= threshold.MaxNexthopIndex {
//fmt.Printf("Skip the nexthop parse for %s due to Invalid nexthop index!", re)
return
}
if !re.ECMP {
re.NH.Index = re.NextHopIndex
re.ParseINGNexthopInfo()
re.ParseEgrNexthopInfo()
re.NH.ParseOIF()
} else {
re.EG.Index = re.ECMPPTR
re.ParseECMPGroup()
}
}
func (he *HostEntry) ParseNexthopInfo() {
if he.NextHopIndex >= threshold.MaxNexthopIndex {
//fmt.Printf("Skip the nexthop parse for %s due to Invalid nexthop index!", re)
return
}
if !he.ECMP {
he.NH.Index = he.NextHopIndex
he.ParseINGNexthopInfo()
he.ParseEgrNexthopInfo()
he.NH.ParseOIF()
} else {
he.EG.Index = he.ECMPPTR
he.ParseECMPGroup()
}
}
//L3_ECMP_GROUP.*[256]: <URPF_COUNT=1,RSVD_COUNT=0,RSVD_BASE_PTR=0,RH_FLOW_SET_SIZE=0,RH_FLOW_SET_BASE=0,RESERVED_0=0,L3_OIF_7_TYPE=0,L3_OIF_7=0,L3_OIF_6_TYPE=0,L3_OIF_6=0,L3_OIF_5_TYPE=0,L3_OIF_5=0,L3_OIF_4_TYPE=0,L3_OIF_4=0,L3_OIF_3_TYPE=0,L3_OIF_3=0,L3_OIF_2_TYPE=0,L3_OIF_2=0,L3_OIF_1_TYPE=0,L3_OIF_1=0x46,L3_OIF_0_TYPE=0,L3_OIF_0=0x50,EVEN_PARITY_1=0,EVEN_PARITY_0=0,ENHANCED_HASHING_ENABLE=0,ECMP_GT8=0,COUNT=1,BASE_PTR=0x1000>
var getECMPMemberCount = regexp.MustCompile(`\,COUNT=(?P<count>[0x]?[[:alnum:]]+)`)
var getECMPBasePTR = regexp.MustCompile(`,BASE_PTR=(?P<baseptr>[0x]?[[:alnum:]]+)`)
func (re *RouteEntry) ParseECMPGroup() {
if re.ECMP == false {
log.Println("Cannot parse ECMP for none ECMP entry")
return
}
ecmpg, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_ECMP_GROUP %d %d", re.ECMPPTR, re.ECMPPTR),
})
if err != nil {
panic(err)
}
//fmt.Println(ecmpg)
if res, err := match(ecmpg, getECMPMemberCount); err != nil {
panic(err)
} else {
mcount, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
re.EG.MemberCount = mcount + 1
}
if res, err := match(ecmpg, getECMPBasePTR); err != nil {
panic(err)
} else {
base, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
re.EG.ECMPBasePTR = base
}
re.EG.ParseNexthop()
}
func (he *HostEntry) ParseECMPGroup() {
if he.ECMP == false {
log.Println("Cannot parse ECMP for none ECMP entry")
return
}
ecmpg, err := Dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf("scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_ECMP_GROUP %d %d", he.ECMPPTR, he.ECMPPTR),
})
if err != nil {
panic(err)
}
//fmt.Println(ecmpg)
if res, err := match(ecmpg, getECMPMemberCount); err != nil {
panic(err)
} else {
mcount, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
he.EG.MemberCount = mcount + 1
}
if res, err := match(ecmpg, getECMPBasePTR); err != nil {
panic(err)
} else {
base, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
he.EG.ECMPBasePTR = base
}
he.EG.ParseNexthop()
}
type FIB struct {
DB map[string]*RouteEntry
}
func IsValid(es string) bool {
if es == "" || strings.Contains(es, "scontrol") {
return false
}
if !strings.Contains(es, "VALID") ||
!strings.Contains(es, "DST_DISCARD") ||
!strings.Contains(es, "NEXT_HOP_INDEX") ||
!strings.Contains(es, "KEY") ||
!strings.Contains(es, "ECMP") ||
!strings.Contains(es, "ECMP_PTR") {
return false
}
return true
}
func FixMACAddress(s string) string {
if strings.HasPrefix(s, "0x") {
s = s[2:]
}
if len(s) == 0 {
s = "000000000000"
} else if len(s) == 1 {
s = "00000000000" + s
} else if len(s) == 2 {
s = "0000000000" + s
} else if len(s) == 3 {
s = "000000000" + s
} else if len(s) == 4 {
s = "00000000" + s
} else if len(s) == 5 {
s = "0000000" + s
} else if len(s) == 6 {
s = "000000" + s
} else if len(s) == 7 {
s = "00000" + s
} else if len(s) == 8 {
s = "0000" + s
} else if len(s) == 9 {
s = "000" + s
} else if len(s) == 10 {
s = "00" + s
} else if len(s) == 11 {
s = "0" + s
}
f1, _ := strconv.ParseInt("0x"+s[:2], 0, 32)
f2, _ := strconv.ParseInt("0x"+s[2:4], 0, 32)
f3, _ := strconv.ParseInt("0x"+s[4:6], 0, 32)
f4, _ := strconv.ParseInt("0x"+s[6:8], 0, 32)
f5, _ := strconv.ParseInt("0x"+s[8:10], 0, 32)
f6, _ := strconv.ParseInt("0x"+s[10:12], 0, 32)
mac := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", f1, f2, f3, f4, f5, f6)
return mac
}
func FixIPv4Address(s string) net.IP {
if strings.HasPrefix(s, "0x") {
s = s[2:]
}
if len(s) == 0 {
s = "00000000"
} else if len(s) == 1 {
s = "0000000" + s
} else if len(s) == 2 {
s = "000000" + s
} else if len(s) == 3 {
s = "00000" + s
} else if len(s) == 4 {
s = "0000" + s
} else if len(s) == 5 {
s = "000" + s
} else if len(s) == 6 {
s = "00" + s
} else if len(s) == 7 {
s = "0" + s
}
f1, _ := strconv.ParseInt("0x"+s[:2], 0, 32)
f2, _ := strconv.ParseInt("0x"+s[2:4], 0, 32)
f3, _ := strconv.ParseInt("0x"+s[4:6], 0, 32)
f4, _ := strconv.ParseInt("0x"+s[6:8], 0, 32)
return net.IPv4(byte(f1), byte(f2), byte(f3), byte(f4))
}
func FixIPv4NetMask(s string) net.IPMask {
if strings.HasPrefix(s, "0x") {
s = s[2:]
}
if len(s) == 0 {
s = "00000000"
} else if len(s) == 1 {
s = "0000000" + s
} else if len(s) == 2 {
s = "000000" + s
} else if len(s) == 3 {
s = "00000" + s
} else if len(s) == 4 {
s = "0000" + s
} else if len(s) == 5 {
s = "000" + s
} else if len(s) == 6 {
s = "00" + s
} else if len(s) == 7 {
s = "0" + s
}
f1, _ := strconv.ParseInt("0x"+s[:2], 0, 32)
f2, _ := strconv.ParseInt("0x"+s[2:4], 0, 32)
f3, _ := strconv.ParseInt("0x"+s[4:6], 0, 32)
f4, _ := strconv.ParseInt("0x"+s[6:8], 0, 32)
return net.IPv4Mask(byte(f1), byte(f2), byte(f3), byte(f4))
}
func FixIPv6AddressDEFIP(s string) net.IP {
if strings.HasPrefix(s, "0x") {
s = s[2:]
}
if len(s) == 17 {
s = "000000000000000" + s
} else if len(s) == 18 {
s = "00000000000000" + s
} else if len(s) == 19 {
s = "0000000000000" + s
} else if len(s) == 20 {
s = "000000000000" + s
} else if len(s) == 21 {
s = "00000000000" + s
} else if len(s) == 22 {
s = "0000000000" + s
} else if len(s) == 23 {
s = "000000000" + s
} else if len(s) == 24 {
s = "00000000" + s
} else if len(s) == 25 {
s = "0000000" + s
} else if len(s) == 26 {
s = "000000" + s
} else if len(s) == 27 {
s = "00000" + s
} else if len(s) == 28 {
s = "0000" + s
} else if len(s) == 29 {
s = "000" + s
} else if len(s) == 30 {
s = "00" + s
} else if len(s) == 31 {
s = "0" + s
} else if len(s) == 16 {
s = s + "0000000000000000"
} else if len(s) == 15 {
s = s + "0" + "0000000000000000"
} else if len(s) == 14 {
s = s + "00" + "0000000000000000"
} else if len(s) == 13 {
s = s + "000" + "0000000000000000"
} else if len(s) == 12 {
s = s + "0000" + "0000000000000000"
} else if len(s) == 11 {
s = s + "00000" + "0000000000000000"
} else if len(s) == 10 {
s = s + "000000" + "0000000000000000"
} else if len(s) == 9 {
s = s + "0000000" + "0000000000000000"
} else if len(s) == 8 {
s = s + "00000000" + "0000000000000000"
} else if len(s) == 7 {
s = s + "000000000" + "0000000000000000"
} else if len(s) == 6 {
s = s + "0000000000" + "0000000000000000"
} else if len(s) == 5 {
s = s + "00000000000" + "0000000000000000"
} else if len(s) == 4 {
s = s + "000000000000" + "0000000000000000"
} else if len(s) == 3 {
s = s + "0000000000000" + "0000000000000000"
} else if len(s) == 2 {
s = s + "00000000000000" + "0000000000000000"
} else if len(s) == 1 {
s = s + "000000000000000" + "0000000000000000"
}
if len(s) != 32 {
panic("Invalid IPv6 address to parse")
}
return net.ParseIP(s[:4] + ":" + s[4:8] + ":" + s[8:12] + ":" + s[12:16] + ":" + s[16:20] + ":" + s[20:24] + ":" + s[24:28] + ":" + s[28:32])
}
func FixIPv6Address(s string) net.IP {
if strings.HasPrefix(s, "0x") {
s = s[2:]
}
if len(s) == 17 {
s = "000000000000000" + s
} else if len(s) == 18 {
s = "00000000000000" + s
} else if len(s) == 19 {
s = "0000000000000" + s
} else if len(s) == 20 {
s = "000000000000" + s
} else if len(s) == 21 {
s = "00000000000" + s
} else if len(s) == 22 {
s = "0000000000" + s
} else if len(s) == 23 {
s = "000000000" + s
} else if len(s) == 24 {
s = "00000000" + s
} else if len(s) == 25 {
s = "0000000" + s
} else if len(s) == 26 {
s = "000000" + s
} else if len(s) == 27 {
s = "00000" + s
} else if len(s) == 28 {
s = "0000" + s
} else if len(s) == 29 {
s = "000" + s
} else if len(s) == 30 {
s = "00" + s
} else if len(s) == 31 {
s = "0" + s
} else if len(s) == 16 {
s = s + "0000000000000000"
} else if len(s) == 15 {
s = "0" + s + "0000000000000000"
} else if len(s) == 14 {
s = "00" + s + "0000000000000000"
} else if len(s) == 13 {
s = "000" + s + "0000000000000000"
} else if len(s) == 12 {
s = "0000" + s + "0000000000000000"
} else if len(s) == 11 {
s = "00000" + s + "0000000000000000"
} else if len(s) == 10 {
s = "000000" + s + "0000000000000000"
} else if len(s) == 9 {
s = "0000000" + s + "0000000000000000"
} else if len(s) == 8 {
s = "00000000" + s + "0000000000000000"
} else if len(s) == 7 {
s = "000000000" + s + "0000000000000000"
} else if len(s) == 6 {
s = "0000000000" + s + "0000000000000000"
} else if len(s) == 5 {
s = "00000000000" + s + "0000000000000000"
} else if len(s) == 4 {
s = "000000000000" + s + "0000000000000000"
} else if len(s) == 3 {
s = "0000000000000" + s + "0000000000000000"
} else if len(s) == 2 {
s = "00000000000000" + s + "0000000000000000"
} else if len(s) == 1 {
s = "000000000000000" + s + "0000000000000000"
}
//fmt.Println(s, len(s))
if len(s) != 32 {
panic("Invalid IPv6 address to parse")
}
return net.ParseIP(s[:4] + ":" + s[4:8] + ":" + s[8:12] + ":" + s[12:16] + ":" + s[16:20] + ":" + s[20:24] + ":" + s[24:28] + ":" + s[28:32])
}
//L3_DEFIP_ALPM_IPV4.*[360456]: <VALID=1,SRC_DISCARD=0,RPE=0,RESERVED_ECMP_PTR=0,RESERVED_0=0,PRI=0,NEXT_HOP_INDEX=1,LENGTH=0x18,KEY=0x46000000,HIT=0,EVEN_PARITY=0,ENTRY_ONLY=0x26118000002,ECMP_PTR=1,ECMP=0,DST_DISCARD=0,DEFAULTROUTE=0,DATA=2,CLASS_ID=0>
func ParseRouteEntryString(es string, af int) (*RouteEntry, error) {
if !IsValid(es) {
return nil, errors.New("Invalid input string: " + es)
}
var Entry RouteEntry
if res, err := match(es, getEntryIndex); err != nil {
panic(err)
} else {
index, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
Entry.Index = index
}
if res, err := match(es, getValidBit); err != nil {
panic(err)
} else {
Entry.Valid = res
}
if res, err := match(es, getNextHopIndex); err != nil {
panic(err)
} else {
nhi, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
} else {
Entry.NextHopIndex = nhi
}
}
if res, err := match(es, getLength); err != nil {
panic(err)
} else {
length, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
Entry.Length = length
}
if res, err := match(es, getKey); err != nil {
panic(err)
} else {
if af == IPV4 {
Entry.AF = af
Entry.Key = FixIPv4Address(res)
} else if af == IPV6 {
Entry.AF = af
Entry.Key = FixIPv6Address(res)
} else {
panic("Unknown Address family")
}
}
if res, err := match(es, getHitBit); err != nil {
panic(err)
} else {
if res == "1" {
Entry.Hit = true
} else {
Entry.Hit = false
}
}
if res, err := match(es, getECMPPTR); err != nil {
panic(err)
} else {
ptr, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
Entry.ECMPPTR = ptr
}
if res, err := match(es, getECMP); err != nil {
panic(err)
} else {
if res == "0" {
Entry.ECMP = false
} else {
Entry.ECMP = true
}
}
if res, err := match(es, getSrcDiscard); err != nil {
panic(err)
} else {
if res == "0" {
Entry.SrcDiscard = false
} else {
Entry.SrcDiscard = true
}
}
if res, err := match(es, getDstDiscard); err != nil {
panic(err)
} else {
if res == "0" {
Entry.DstDiscard = false
} else {
Entry.DstDiscard = true
}
}
if res, err := match(es, getDefaultRoute); err != nil {
panic(err)
} else {
if res == "0" {
Entry.DefaultRoute = false
} else {
Entry.DefaultRoute = true
}
}
Entry.Validate()
return &Entry, nil
}
func match(s string, r *regexp.Regexp) (string, error) {
matches := r.FindStringSubmatch(s)
if len(matches) == 2 {
return matches[1], nil
}
return "", errors.New("Cannot match for string: " + s + " Re: " + r.String())
}
var DEFIPModeValueToString = map[int64]string{
0: "IPv4",
1: "IPv6(64)",
3: "IPv6(128)",
}
var IP = flag.String("ip", "10.71.20.115", "IP address of the remote device")
var Host = flag.String("hostname", "V8500", "Host name of the remote device")
var User = flag.String("username", "admin", "Username of the remote device")
var Password = flag.String("password", "", "Passwrod of the remote device")
var SFU = flag.String("sfu", "A", "SFU (A/B)")
var Table = flag.String("table", "all", "Address family to dump (v4/v6/all/arp/nd/alpm/alpm4/alpm6/defip)")
func main() {
flag.Parse()
ip := net.ParseIP(*IP)
if ip == nil {
fmt.Printf("Invalid IP address: %s\n", *IP)
return
}
if *Host == "" {
fmt.Println("Invalid Host name")
return
}
if *User == "" {
fmt.Println("Invalidusername")
return
}
if *SFU != "A" && *SFU != "B" {
fmt.Printf("Invalid SFU: %s\n", *SFU)
return
}
if *Table != "v4" && *Table != "v6" && *Table != "all" && *Table != "arp" && *Table != "nd" && *Table != "alpm" && *Table != "alpm4" && *Table != "alpm6" && *Table != "defip" {
fmt.Println("Valid table is: v4/v6/all/arp/nd/alpm/alpm4/alpm6/defip")
return
}
dev, err := prepare(&rut.RUT{
Name: "DUT1",
Device: "V8",
IP: *IP,
Port: "23",
Username: *User,
Hostname: *Host,
Password: *Password,
})
if err != nil {
fmt.Printf("Cannot connect to :%s with error: %s", *IP, err.Error())
return
}
if *Table == "v4" {
DumpIPv4Entry(dev)
} else if *Table == "v6" {
DumpIPv6Entry(dev)
} else if *Table == "all" {
DumpIPv4Entry(dev)
DumpIPv6Entry(dev)
} else if *Table == "arp" {
DumpIPv4HostEntry(dev)
} else if *Table == "nd" {
DumpIPv6HostEntry(dev)
} else if *Table == "defip" {
DumpL3DEFIPEntry(dev)
} else if *Table == "alpm" {
DumpL3DEFIPALPMEntry(dev)
} else if *Table == "alpm4" {
DumpL3IPv4DEFIPEntry(dev)
} else if *Table == "alpm6" {
DumpL3IPv664DEFIPEntry(dev)
} else {
fmt.Println("Invalid table: ", *Table, " to dump")
}
}
//L3_ENTRY_IPV4_UNICAST.*[3456]: <VRF_ID=0,VALID=1,TRILL:TREE_ID=0,TRILL:RESERVED_104_43=0x800000,TRILL:KEY=0x20,TRILL:INGRESS_RBRIDGE_NICKNAME=1,TRILL:HASH_LSB=1,TRILL:EXPECTED_TGID=0x60,TRILL:EXPECTED_T=0,TRILL:EXPECTED_PORT_NUM=0x60,TRILL:EXPECTED_MODULE_ID=8,TRILL:DATA=0x460,RSVD_VRF_ID=0,RSVD_NEXT_HOP_INDEX=0,RPE=0,RMEP:RESERVED_104_83=0,RMEP:HASH_LSB=0,RESERVED_104_82=0,PRI=0,NEXT_HOP_INDEX=8,LOCAL_ADDRESS=0,LMEP:HASH_LSB=0,KEY_TYPE=0,KEY=0x8c0000020,IP_ADDR=0x46000001,IPV4UC:VRF_ID=0,IPV4UC:RSVD_VRF_ID=0,IPV4UC:RSVD_NEXT_HOP_INDEX=0,IPV4UC:RPE=0,IPV4UC:RESERVED_104_82=0,IPV4UC:PRI=0,IPV4UC:NEXT_HOP_INDEX=8,IPV4UC:LOCAL_ADDRESS=0,IPV4UC:KEY=0x8c0000020,IPV4UC:IP_ADDR=0x46000001,IPV4UC:HASH_LSB=1,IPV4UC:ECMP_PTR=8,IPV4UC:ECMP=0,IPV4UC:DST_DISCARD=0,IPV4UC:DATA=0x10000,IPV4UC:CLASS_ID=0,IPV4UC:BFD_ENABLE=0,HIT=1,HASH_LSB=1,FCOE:VRF_ID=0x46,FCOE:S_ID=1,FCOE:RSVD_VRF_ID=0,FCOE:RSVD_NEXT_HOP_INDEX=0,FCOE:RPE=0,FCOE:RESERVED_ECMP_PTR0=2,FCOE:RESERVED_104_73=0,FCOE:PRI=0,FCOE:NEXT_HOP_INDEX=0x800,FCOE:MASKED_D_ID=1,FCOE:LOCAL_ADDRESS=0,FCOE:KEY=0x8c0000020,FCOE:HASH_LSB=1,FCOE:ECMP_PTR0=0,FCOE:ECMP=0,FCOE:D_ID=1,FCOE:DST_DISCARD=0,FCOE:DATA=0x1000000,FCOE:CLASS_ID=0,EVEN_PARITY=0,ECMP_PTR=8,ECMP=0,DUMMY_V6=0,DUMMY_IPMC=0,DUMMY=0,DST_DISCARD=0,DATA=0x10000,CLASS_ID=0,BFD_ENABLE=0>
var getHostEntryNextHopIndex = regexp.MustCompile(`,IPV4UC:NEXT_HOP_INDEX=(?P<nhi>[0x]?[[:alnum:]]+)`)
var getIPv6HostEntryNextHopIndex = regexp.MustCompile(`,IPV6UC:NEXT_HOP_INDEX=(?P<nhi>[0x]?[[:alnum:]]+)`)
var getHostEntryIPv4Address = regexp.MustCompile(`,IPV4UC:IP_ADDR=(?P<key>[0x]?[[:alnum:]]+)`)
var getIPv6HostEntryAddressUpper = regexp.MustCompile(`,IPV6UC:IP_ADDR_UPR_64=(?P<key>[0x]?[[:alnum:]]+)`)
var getIPv6HostEntryAddressLower = regexp.MustCompile(`,IPV6UC:IP_ADDR_LWR_64=(?P<key>[0x]?[[:alnum:]]+)`)
var getHostEntryECMPPTR = regexp.MustCompile(`,IPV4UC:ECMP_PTR=(?P<ecmpptr>[0x]?[[:alnum:]]+)`)
var getIPv6HostEntryECMPPTR = regexp.MustCompile(`,IPV6UC:ECMP_PTR=(?P<ecmpptr>[0x]?[[:alnum:]]+)`)
var getHostEntryECMP = regexp.MustCompile(`,IPV4UC:ECMP=(?P<ecmpptr>[0x]?[[:alnum:]]+)`)
var getIPv6HostEntryECMP = regexp.MustCompile(`,IPV6UC:ECMP=(?P<ecmpptr>[0x]?[[:alnum:]]+)`)
var getHostEntryDstDiscard = regexp.MustCompile(`,IPV4UC:DST_DISCARD=(?P<dstdis>[0-9]+)`)
var getIPv6HostEntryDstDiscard = regexp.MustCompile(`,IPV6UC:DST_DISCARD=(?P<dstdis>[0-9]+)`)
var getHostEntryKeyType = regexp.MustCompile(`,KEY_TYPE=(?P<keytype>[0-9]+)`)
var getIPv6HostEntryKeyType = regexp.MustCompile(`,KEY_TYPE_1=(?P<keytype>[0-9]+)`)
var getIPv6HostEntryValidBit = regexp.MustCompile(`VALID_1=(?P<valid>[0-9]+)`)
var getIPv6HostEntryHitBit = regexp.MustCompile(`,HIT_1=(?P<hit>[0-9]+)`)
func ParseHostEntryString(es string, af int) (*HostEntry, error) {
if !IsValid(es) {
return nil, errors.New("Invalid input string: " + es)
}
var Entry HostEntry
if af == IPV4 {
Entry.Length = 32
} else {
Entry.Length = 128
}
if res, err := match(es, getEntryIndex); err != nil {
panic(err)
} else {
index, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
Entry.Index = index
}
if res, err := match(es, getValidBit); err != nil {
panic(err)
} else {
Entry.Valid = res
}
if res, err := match(es, getHostEntryNextHopIndex); err != nil {
panic(err)
} else {
nhi, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
} else {
Entry.NextHopIndex = nhi
}
}
if res, err := match(es, getHostEntryKeyType); err != nil {
panic(err)
} else {
kt, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
} else {
Entry.KeyType = kt
}
}
if res, err := match(es, getHostEntryIPv4Address); err != nil {
panic(err)
} else {
if af == IPV4 {
Entry.AF = af
Entry.IP = FixIPv4Address(res)
} else if af == IPV6 {
Entry.AF = af
Entry.IP = FixIPv6Address(res)
} else {
panic("Unknown Address family")
}
}
if res, err := match(es, getHitBit); err != nil {
panic(err)
} else {
if res == "1" {
Entry.Hit = true
} else {
Entry.Hit = false
}
}
if res, err := match(es, getHostEntryECMPPTR); err != nil {
panic(err)
} else {
ptr, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
Entry.ECMPPTR = ptr
}
if res, err := match(es, getHostEntryECMP); err != nil {
panic(err)
} else {
if res == "0" {
Entry.ECMP = false
} else {
Entry.ECMP = true
}
}
if res, err := match(es, getHostEntryDstDiscard); err != nil {
panic(err)
} else {
if res == "0" {
Entry.DstDiscard = false
} else {
Entry.DstDiscard = true
}
}
return &Entry, nil
}
func ParseIPv6HostEntryString(es string, af int) (*HostEntry, error) {
if !IsValid(es) {
return nil, errors.New("Invalid input string: " + es)
}
var Entry HostEntry
if af == IPV4 {
Entry.Length = 32
} else {
Entry.Length = 128
}
if res, err := match(es, getEntryIndex); err != nil {
panic(err)
} else {
index, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
Entry.Index = index
}
if res, err := match(es, getIPv6HostEntryValidBit); err != nil {
panic(err)
} else {
Entry.Valid = res
}
if res, err := match(es, getIPv6HostEntryNextHopIndex); err != nil {
panic(err)
} else {
nhi, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
} else {
Entry.NextHopIndex = nhi
}
}
if res, err := match(es, getIPv6HostEntryKeyType); err != nil {
panic(err)
} else {
kt, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
} else {
Entry.KeyType = kt
}
}
if upper, err := match(es, getIPv6HostEntryAddressUpper); err != nil {
panic(err)
} else {
if lower, err := match(es, getIPv6HostEntryAddressLower); err != nil {
panic(err)
} else {
if af == IPV6 {
Entry.AF = af
Entry.IP = FixIPv6Address(MakeIPv6Address(upper, lower))
} else {
panic("Unknown Address family")
}
}
}
if res, err := match(es, getIPv6HostEntryHitBit); err != nil {
panic(err)
} else {
if res == "1" {
Entry.Hit = true
} else {
Entry.Hit = false
}
}
if res, err := match(es, getIPv6HostEntryECMPPTR); err != nil {
panic(err)
} else {
ptr, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
Entry.ECMPPTR = ptr
}
if res, err := match(es, getIPv6HostEntryECMP); err != nil {
panic(err)
} else {
if res == "0" {
Entry.ECMP = false
} else {
Entry.ECMP = true
}
}
if res, err := match(es, getIPv6HostEntryDstDiscard); err != nil {
panic(err)
} else {
if res == "0" {
Entry.DstDiscard = false
} else {
Entry.DstDiscard = true
}
}
return &Entry, nil
}
func MakeIPv6Address(upper, lower string) string {
if strings.HasPrefix(lower, "0x") {
lower = lower[2:]
}
if len(lower) == 15 {
lower = "0" + lower
} else if len(lower) == 14 {
lower = "00" + lower
} else if len(lower) == 13 {
lower = "000" + lower
} else if len(lower) == 12 {
lower = "0000" + lower
} else if len(lower) == 11 {
lower = "00000" + lower
} else if len(lower) == 10 {
lower = "00000" + lower
} else if len(lower) == 9 {
lower = "000000" + lower
} else if len(lower) == 8 {
lower = "0000000" + lower
} else if len(lower) == 8 {
lower = "00000000" + lower
} else if len(lower) == 7 {
lower = "000000000" + lower
} else if len(lower) == 6 {
lower = "0000000000" + lower
} else if len(lower) == 5 {
lower = "00000000000" + lower
} else if len(lower) == 4 {
lower = "000000000000" + lower
} else if len(lower) == 3 {
lower = "0000000000000" + lower
} else if len(lower) == 2 {
lower = "00000000000000" + lower
} else if len(lower) == 1 {
lower = "000000000000000" + lower
} else if len(lower) == 0 {
lower = "0000000000000000" + lower
}
return upper + lower
}
func DumpIPv4HostEntry(dev *rut.RUT) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_ENTRY_IPV4_UNICAST 0 81919 | grep VALID=1",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
h, _ := ParseHostEntryString(l, IPV4)
if h != nil {
h.ParseNexthopInfo()
}
if h != nil && h.AF == IPV4 && h.KeyType == 0 {
fmt.Println(h)
}
}
}
func DumpIPv6HostEntry(dev *rut.RUT) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_ENTRY_IPV6_UNICAST 0 8191 | grep VALID_1=1",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
h, _ := ParseIPv6HostEntryString(l, IPV6)
if h != nil {
h.ParseNexthopInfo()
}
fmt.Println(h)
}
}
func DumpIPv4Entry(dev *rut.RUT) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP_ALPM_IPV4 0 393215 | grep VALID=1",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
r, _ := ParseRouteEntryString(l, IPV4)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
func DumpIPv4EntryByIndex(dev *rut.RUT, index int) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: fmt.Sprintf(" scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP_ALPM_IPV4 %d %d", index, index),
})
if err != nil {
panic(err)
}
if !strings.Contains(res, "VALID=1") {
fmt.Printf("Entry %d is invalid\n", index)
return
}
for _, l := range strings.Split(res, "\n") {
r, _ := ParseRouteEntryString(l, IPV4)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
func DumpIPv6Entry(dev *rut.RUT) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP_ALPM_IPV6_64 0 262143 | grep VALID=1",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
r, _ := ParseRouteEntryString(l, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
res, err = dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP_ALPM_IPV6_128 0 131071 | grep VALID=1",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
r, _ := ParseRouteEntryString(l, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
type L3DEFIPHalfEntry struct {
AF int
Index int64
VRF int64
Valid bool
SrcDiscard bool
DstDiscard bool
RPE int64
ReservedECMPPtr int64
Priority int64
NexthopIndex int64
NH Nexthop
EG ECMPGroup
Mode int64
//Mask int64
//Key int64
IPAddrMask net.IPMask
IPAddrMaskLen int
IPAddr net.IP
Hit bool
GlobalRoute bool
EntryType int64
ECMPPtr int64
ECMP bool
DefaultMiss bool
DefaultRoute bool
ALGHitIndex int64
ALGBktPtr int64
}
func (re *L3DEFIPHalfEntry) String() string {
var Out *color.Color
if re.Hit {
Out = color.New(color.FgGreen)
} else {
Out = color.New(color.FgRed)
}
if re.AF == IPV4 {
if re.IPAddrMaskLen > 32 || re.NexthopIndex > threshold.MaxNexthopIndex {
if re.IPAddrMaskLen > 32 {
return Out.Sprintf("[%6d]: %39s/%-3d >> %20s", re.Index, re.IPAddr, re.IPAddrMaskLen, "is not a valid IPv4 Address")
} else {
return Out.Sprintf("[%6d]: %39s/%-3d >> has a invalid nexthop index: %d", re.Index, re.IPAddr, re.IPAddrMaskLen, re.NexthopIndex)
}
} else if !re.ECMP {
return Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> NH[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", re.Index, re.IPAddr, re.IPAddrMaskLen, re.DstDiscard, re.SrcDiscard, re.NexthopIndex, re.NH.OIF.MAC, re.NH.DstMac, re.NH.OIF.Index, re.NH.OIF.Vid, re.NH.DstPort)
} else {
base := Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> is ECMP Route, ECMP_PTR: %5d, BASE_PTR: %5d, MemberCount: %2d", re.Index, re.IPAddr, re.IPAddrMaskLen, re.DstDiscard, re.SrcDiscard, re.ECMPPtr, re.EG.ECMPBasePTR, re.EG.MemberCount)
for i := 0; i < len(re.EG.Member); i++ {
base += "\n"
base += Out.Sprintf("%73s[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", "NH", re.EG.Member[i].Index, re.EG.Member[i].OIF.MAC, re.EG.Member[i].DstMac, re.EG.Member[i].OIF.Index, re.EG.Member[i].OIF.Vid, re.EG.Member[i].DstPort)
}
return base
}
} else if re.AF == IPV6 {
if re.IPAddrMaskLen > 128 || re.NexthopIndex > threshold.MaxNexthopIndex {
if re.IPAddrMaskLen > 128 {
return Out.Sprintf("[%6d]: %39s/%-3d >> %20s", re.Index, re.IPAddr, re.IPAddrMaskLen, "is not a valid IPv6 Address")
} else {
return Out.Sprintf("[%6d]: %39s/%-3d >> has a invalid nexthop index: %d", re.Index, re.IPAddr, re.IPAddrMaskLen, re.NexthopIndex)
}
} else if !re.ECMP {
return Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> NH[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", re.Index, re.IPAddr, re.IPAddrMaskLen, re.DstDiscard, re.SrcDiscard, re.NexthopIndex, re.NH.OIF.MAC, re.NH.DstMac, re.NH.OIF.Index, re.NH.OIF.Vid, re.NH.DstPort)
} else {
base := Out.Sprintf("[%6d]: %39s/%-3d (%5t|%5t) >> is ECMP Route, ECMP_PTR: %5d, BASE_PTR: %5d, MemberCount: %2d", re.Index, re.IPAddr, re.IPAddrMaskLen, re.DstDiscard, re.SrcDiscard, re.ECMPPtr, re.EG.ECMPBasePTR, re.EG.MemberCount)
for i := 0; i < len(re.EG.Member); i++ {
base += "\n"
base += Out.Sprintf("%73s[%5d]:{SMAC: %17s DMAC: %17s OIF: %4d VID: %4d DPORT: %3d}", "NH", re.EG.Member[i].Index, re.EG.Member[i].OIF.MAC, re.EG.Member[i].DstMac, re.EG.Member[i].OIF.Index, re.EG.Member[i].OIF.Vid, re.EG.Member[i].DstPort)
}
return base
}
}
return fmt.Sprintf("Invalid route entry: %s/%d\n", re.IPAddr, re.IPAddrMaskLen)
// return ""
}
var HalfEntryFields = []string{
"Index",
"VRF_ID_",
"VALID",
"SRC_DISCARD",
"DST_DISCARD",
"RPE",
"PRI",
"RESERVED_ECMP_PTR",
"NEXT_HOP_INDEX",
",MODE",
//"MASK",
//"KEY",
"IP_ADDR_MASK",
"IP_ADDR",
"HIT",
"GLOBAL_ROUTE",
"ENTRY_TYPE",
",ECMP_PTR",
"ECMP",
"DEFAULT_MISS",
"DEFAULT_ROUTE",
"ALG_HIT_IDX",
"ALG_BKT_PTR",
}
var ip1r = regexp.MustCompile("IP_ADDR1=(?P<f>[0x]?[[:alnum:]]+)")
var ip0r = regexp.MustCompile("IP_ADDR0=(?P<f>[0x]?[[:alnum:]]+)")
func ParseIPv6BestPrefixRouteFromDEFIP(entry string, index int) (net.IP, error) {
var ip1 string
var ip0 string
if matches := ip1r.FindStringSubmatch(entry); len(matches) == 2 {
ip1 = matches[1]
if strings.HasPrefix(ip1, "0x") {
ip1 = ip1[2:]
}
}
if matches := ip0r.FindStringSubmatch(entry); len(matches) == 2 {
ip0 = matches[1]
if strings.HasPrefix(ip0, "0x") {
ip0 = ip0[2:]
}
}
ip := ip1 + ip0
nip := FixIPv6AddressDEFIP(ip)
return nip, nil
}
var ipmr0 = regexp.MustCompile("IP_ADDR_MASK0=(?P<f>[0x]?[[:alnum:]]+)")
var ipmr1 = regexp.MustCompile("IP_ADDR_MASK1=(?P<f>[0x]?[[:alnum:]]+)")
var HexToBin = map[rune]string{
'0': "0000",
'1': "0001",
'2': "0010",
'3': "0011",
'4': "0100",
'5': "0101",
'6': "0110",
'7': "0111",
'8': "1000",
'9': "1001",
'a': "1010",
'b': "1011",
'c': "1100",
'd': "1101",
'e': "1110",
'f': "1111",
}
func GetLeadingOnes(entry string) int {
var res string
for _, c := range entry {
if c != 'f' && c != 'e' && c != 'c' && c != '8' && c != '0' {
panic("Invalid mask")
}
res += HexToBin[c]
}
var count int
for _, c := range res {
if c == '1' {
count++
}
}
return count
}
func ParseIPv6BestPrefixRouteMaskLengthFromDEFIP(entry string, index int) (int, error) {
var ipm1 string
var ipm0 string
if matches := ipmr1.FindStringSubmatch(entry); len(matches) == 2 {
ipm1 = matches[1]
if strings.HasPrefix(ipm1, "0x") {
ipm1 = ipm1[2:]
}
}
if matches := ipmr1.FindStringSubmatch(entry); len(matches) == 2 {
ipm0 = matches[1]
if strings.HasPrefix(ipm0, "0x") {
ipm0 = ipm0[2:]
}
}
ipm := ipm1 + ipm0
return GetLeadingOnes(ipm), nil
}
func DumpL3DEFIPHalfEntry(entry string, index int) (*L3DEFIPHalfEntry, error) {
fieldsReg := make(map[string]*regexp.Regexp, len(HalfEntryFields))
for _, f := range HalfEntryFields {
fieldsReg[f] = regexp.MustCompile(fmt.Sprintf("%s%d=(?P<f>[0x]?[[:alnum:]]+)", f, index))
}
/*
var getHalfEntryVRF = regexp.MustCompile(fmt.Sprintf("VRF_ID_%d=[0x]*(?P<vrf>[0-9]+)", index))
var getHalfEntryValid = regexp.MustCompile(fmt.Sprintf("VALID%d=(?P<valid>[0-9]+)", index))
var getHalfEntrySrcDiscard = regexp.MustCompile(fmt.Sprintf("SRC_DISCARD%d=(?P<sd>[0-9]+)", index))
var getHalfEntryDstDiscard = regexp.MustCompile(fmt.Sprintf("DST_DISCARD%d=(?P<dd>[0-9]+)", index))
var getHalfEntryRPE = regexp.MustCompile(fmt.Sprintf("RPE%d=(?P<rpe>[[:alnum:]]+)", index))
var getHalfEntryReservedECMPPtr = regexp.MustCompile(fmt.Sprintf("RESERVED_ECMP_PTR%d=[0x]*(?P<rep>[[:alnum:]]+)", index))
var getHalfEntryPriority = regexp.MustCompile(fmt.Sprintf("PRI%d=[0x]*(?P<pri>[0-9]+)", index))
var getHalfEntryNexthopIndex = regexp.MustCompile(fmt.Sprintf("NEXT_HOP_INDEX%d=[0x]?(?P<nhi>[[:alnum:]]+)", index))
var getHalfEntryMode = regexp.MustCompile(fmt.Sprintf("MODE%d=[0x]?(?P<mode>([[:alnum:]]+)", index))
var getHalfEntryMask = regexp.MustCompile(fmt.Sprintf("MASK%d=[0x]?(?P<mask>([[:alnum:]]+)", index))
var getHalfEntryKey = regexp.MustCompile(fmt.Sprintf("KEY%d=[0x]?(?P<key>[[:alnum:]]+)", index))
var getHalfEntryIPAddrMask = regexp.MustCompile(fmt.Sprintf("IP_ADDR_MASK%d=[0x]?(?P<mask>([[:alnum:]]+)", index))
var getHalfEntryIPAddr = regexp.MustCompile(fmt.Sprintf("IP_ADDR%d=[0x]?(?P<ip>([[:alnum:]]+)", index))
var getHalfEntryHit = regexp.MustCompile(fmt.Sprintf("HIT%d=(?P<hit>([[:alnum:]]+)", index))
var getHalfEntryGlobalRoute = regexp.MustCompile(fmt.Sprintf("GLOBAL_ROUTE%d=(?P<gr>([[:alnum:]]+)", index))
var getHalfEntryEntryType = regexp.MustCompile(fmt.Sprintf("ENTRY_TYPE%d=[0x]?(?P<et>([[:alnum:]]+)", index))
var getHalfEntryECMPPtr = regexp.MustCompile(fmt.Sprintf("ECMP_PTR%d=[0x]?(?P<ept>([[:alnum:]]+)", index))
var getHalfEntryECMP = regexp.MustCompile(fmt.Sprintf("ECMP%d=(?P<ecmp>([[:alnum:]]+)", index))
var getHalfEntryDefaultMiss = regexp.MustCompile(fmt.Sprintf("DEFAULT_MISS%d=(?P<dfm>([[:alnum:]]+)", index))
var getHalfEntryDefaultRoute = regexp.MustCompile(fmt.Sprintf("DEFAULT_ROUTE%d=(?P<dfr>([[:alnum:]]+)", index))
var getHalfEntryALGHitIndex = regexp.MustCompile(fmt.Sprintf("ALG_HIT_IDX%d=[0x]?(?P<ahi>([[:alnum:]]+)", index))
var getHalfEntryALGBktPtr = regexp.MustCompile(fmt.Sprintf("ALG_BKT_PTR%d=[0x]?(?P<abp>([[:alnum:]]+)", index))
*/
en := &L3DEFIPHalfEntry{}
if res, err := match(entry, getEntryIndex); err != nil {
panic(err)
} else {
index, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
en.Index = index
}
for k, v := range fieldsReg {
if matches := v.FindStringSubmatch(entry); len(matches) == 2 {
//fmt.Printf("%20s : %10s\n", k, matches[1])
switch k {
case "VRF_ID_":
vrf, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.VRF = vrf
case "VALID":
valid, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if valid == 1 {
en.Valid = true
} else {
en.Valid = false
}
case "SRC_DISCARD":
sd, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if sd == 1 {
en.SrcDiscard = true
} else {
en.SrcDiscard = false
}
case "DST_DISCARD":
dd, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if dd == 1 {
en.DstDiscard = true
} else {
en.DstDiscard = false
}
case "RPE":
rpe, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.RPE = rpe
case "PRI":
pri, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.Priority = pri
case "RESERVED_ECMP_PTR":
rep, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.ReservedECMPPtr = rep
/*
case "NEXT_HOP_INDEX":
nhi, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.NexthopIndex = nhi
nh, err := ParseNexthopByIndex(en.NexthopIndex)
if err != nil {
panic(err)
}
en.NH = *nh
*/
case ",MODE":
mode, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.Mode = mode
var ipr *regexp.Regexp
if index == 0 {
ipr = regexp.MustCompile("IP_ADDR0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
ipr = regexp.MustCompile("IP_ADDR1=(?P<f>[0x]?[[:alnum:]]+)")
}
if ms := ipr.FindStringSubmatch(entry); len(ms) == 2 {
if en.Mode == 0 {
en.AF = IPV4
en.IPAddr = FixIPv4Address(ms[1])
} else {
en.AF = IPV6
en.IPAddr, _ = ParseIPv6BestPrefixRouteFromDEFIP(entry, index)
}
}
//ip mask
var ipmr *regexp.Regexp
if index == 0 {
ipmr = regexp.MustCompile("IP_ADDR_MASK0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
ipmr = regexp.MustCompile("IP_ADDR_MASK1=(?P<f>[0x]?[[:alnum:]]+)")
}
if ms := ipmr.FindStringSubmatch(entry); len(ms) == 2 {
if en.Mode == 0 {
en.IPAddrMask = FixIPv4NetMask(ms[1])
en.IPAddrMaskLen, _ = en.IPAddrMask.Size()
} else {
en.IPAddrMaskLen, _ = ParseIPv6BestPrefixRouteMaskLengthFromDEFIP(entry, index)
}
}
//ALG_BKT_PTR
var abpr *regexp.Regexp
if index == 0 {
abpr = regexp.MustCompile("ALG_BKT_PTR0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
if en.Mode == 1 || en.Mode == 3 { //IPv6 64 && IPv6 128
abpr = regexp.MustCompile("ALG_BKT_PTR0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
abpr = regexp.MustCompile("ALG_BKT_PTR1=(?P<f>[0x]?[[:alnum:]]+)")
}
}
if ms := abpr.FindStringSubmatch(entry); len(ms) == 2 {
abp, err := strconv.ParseInt(ms[1], 0, 64)
if err != nil {
panic(err)
}
en.ALGBktPtr = abp
}
//Nexthop.
var nhpr *regexp.Regexp
if index == 0 {
nhpr = regexp.MustCompile("NEXT_HOP_INDEX0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
if en.Mode == 1 || en.Mode == 3 {
nhpr = regexp.MustCompile("NEXT_HOP_INDEX0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
nhpr = regexp.MustCompile("NEXT_HOP_INDEX1=(?P<f>[0x]?[[:alnum:]]+)")
}
}
if ms := nhpr.FindStringSubmatch(entry); len(ms) == 2 {
nhi, err := strconv.ParseInt(ms[1], 0, 64)
if err != nil {
panic(err)
}
en.NexthopIndex = nhi
nh, err := ParseNexthopByIndex(en.NexthopIndex)
if err != nil {
panic(err)
}
en.NH = *nh
}
//ecmp
var ecmpr *regexp.Regexp
if index == 0 {
ecmpr = regexp.MustCompile("ECMP0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
if en.Mode == 1 || en.Mode == 3 {
ecmpr = regexp.MustCompile("ECMP0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
ecmpr = regexp.MustCompile("ECMP1=(?P<f>[0x]?[[:alnum:]]+)")
}
}
if ms := ecmpr.FindStringSubmatch(entry); len(ms) == 2 {
ecmp, err := strconv.ParseInt(ms[1], 0, 64)
if err != nil {
panic(err)
}
if ecmp == 1 {
en.ECMP = true
} else {
en.ECMP = false
}
}
//ecmp_ptr
var ecmpptrr *regexp.Regexp
if index == 0 {
ecmpptrr = regexp.MustCompile(",ECMP_PTR0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
if en.Mode == 1 || en.Mode == 3 {
ecmpptrr = regexp.MustCompile(",ECMP_PTR0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
ecmpptrr = regexp.MustCompile(",ECMP_PTR1=(?P<f>[0x]?[[:alnum:]]+)")
}
}
if ms := ecmpptrr.FindStringSubmatch(entry); len(ms) == 2 {
ep, err := strconv.ParseInt(ms[1], 0, 64)
if err != nil {
panic(err)
}
en.ECMPPtr = ep
en.EG, _ = DumpECMPGroupByIndex(en.ECMPPtr)
}
//GLOBAL route
var grr *regexp.Regexp
if index == 0 {
grr = regexp.MustCompile("GLOBAL_ROUTE0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
if en.Mode == 1 || en.Mode == 3 {
grr = regexp.MustCompile("GLOBAL_ROUTE0=(?P<f>[0x]?[[:alnum:]]+)")
} else {
grr = regexp.MustCompile("GLOBAL_ROUTE1=(?P<f>[0x]?[[:alnum:]]+)")
}
}
if ms := grr.FindStringSubmatch(entry); len(ms) == 2 {
gr, err := strconv.ParseInt(ms[1], 0, 64)
if err != nil {
panic(err)
}
if gr == 1 {
en.GlobalRoute = true
} else {
en.GlobalRoute = false
}
}
//ecmp_ptr
/*
case "IP_ADDR_MASK":
en.IPAddrMask = FixIPv4NetMask(matches[1])
en.IPAddrMaskLen, _ = en.IPAddrMask.Size()
case "IP_ADDR":
fmt.Println("[", en.Mode, "]", entry)
if en.Mode == 0 {
en.AF = IPV4
en.IPAddr = FixIPv4Address(matches[1])
} else {
en.AF = IPV6
en.IPAddr, _ = ParseIPv6BestPrefixRouteFromDEFIP(entry, index)
}
*/
case "HIT":
hit, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if hit == 1 {
en.Hit = true
} else {
en.Hit = false
}
/*
case "GLOBAL_ROUTE":
gr, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if gr == 1 {
en.GlobalRoute = true
} else {
en.GlobalRoute = false
}
*/
case "ENTRY_TYPE":
et, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.EntryType = et
/*
case ",ECMP_PTR":
ep, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.ECMPPtr = ep
en.EG, _ = DumpECMPGroupByIndex(en.ECMPPtr)
case "ECMP":
ecmp, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if ecmp == 1 {
en.ECMP = true
} else {
en.ECMP = false
}
*/
case "DEFAULT_MISS":
dm, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if dm == 1 {
en.DefaultMiss = true
} else {
en.DefaultMiss = false
}
case "DEFAULT_ROUTE":
dr, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
if dr == 1 {
en.DefaultRoute = true
} else {
en.DefaultRoute = false
}
case "ALG_HIT_IDX":
ahi, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.ALGHitIndex = ahi
/*
case "ALG_BKT_PTR":
abp, err := strconv.ParseInt(matches[1], 0, 64)
if err != nil {
panic(err)
}
en.ALGBktPtr = abp
*/
}
}
}
//fmt.Printf("%s", en)
return en, nil
}
var ALPMIPv4 map[int64]string
var ALPMIPv664 map[int64]string
var ALPMIPv6128 map[int64]string
func DumpALPMIPv4DB(dev *rut.RUT) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP_ALPM_IPV4 0 393215 | grep VALID=1",
})
if err != nil {
panic(err)
}
ALPMIPv4 = make(map[int64]string, 393215)
for _, l := range strings.Split(res, "\n") {
if res, err := match(l, getEntryIndex); err != nil {
//fmt.Println("Cannot get entry index of: ", l)
continue
} else {
index, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
ALPMIPv4[index] = l
}
}
}
func DumpALPMIPv664DB(dev *rut.RUT) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP_ALPM_IPV6_64 0 262143 | grep VALID=1",
})
if err != nil {
panic(err)
}
ALPMIPv664 = make(map[int64]string, 262144)
for _, l := range strings.Split(res, "\n") {
if res, err := match(l, getEntryIndex); err != nil {
//fmt.Println("Cannot get entry index of: ", l)
continue
} else {
index, err := strconv.ParseInt(res, 0, 64)
if err != nil {
panic(err)
}
ALPMIPv664[index] = l
}
}
}
func DumpL3DEFIPEntry(dev *rut.RUT) {
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP 0 8191 | grep -E 'VALID(0|1)=1'",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
if strings.Contains(l, "VALID0=1") {
en, _ := DumpL3DEFIPHalfEntry(l, 0)
//Currently just dump ipv4 entry for this half entry
if en.Mode == 0 {
fmt.Printf("%s\n", en)
}
}
if strings.Contains(l, "VALID1=1") {
en, _ := DumpL3DEFIPHalfEntry(l, 1)
/* if en.Mode != 0 {
//Currently just dump IPv4 entry.
continue
}
*/
if en.Mode == 0 {
fmt.Printf("%s\n", en)
}
}
if !strings.Contains(l, "VALID1=1") && !strings.Contains(l, "VALID0=1") {
//fmt.Println("Invalid result: ", l)
}
/*
r, _ := ParseRouteEntryString(l, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
*/
}
}
func DumpL3DEFIPALPMEntry(dev *rut.RUT) {
DumpALPMIPv4DB(dev)
DumpALPMIPv664DB(dev)
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP 0 8191 | grep -E 'VALID(0|1)=1'",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
if strings.Contains(l, "VALID0=1") {
en, _ := DumpL3DEFIPHalfEntry(l, 0)
//Currently just dump ipv4 entry for this half entry
if en.Mode == 0 {
fmt.Printf("<<<Root[%d(0)(%6s)]: %s/%d (0x%-4x) Global: %5t -> VRF : %2d)>>>: \n", en.Index, DEFIPModeValueToString[en.Mode], en.IPAddr, en.IPAddrMaskLen, en.ALGBktPtr, en.GlobalRoute, en.VRF)
fmt.Printf("%s\n", en)
for bank := 0; bank < 4; bank++ {
for eindex := 0; eindex < 6; eindex++ {
alpmidx := eindex<<16 | int(en.ALGBktPtr<<2) | bank
//DumpIPv4EntryByIndex(dev, alpmidx)
if e, ok := ALPMIPv4[int64(alpmidx)]; !ok {
//Do not display invlaid entry.
//fmt.Printf("Entry %d does not exist\n", alpmidx)
} else {
if !strings.Contains(e, "VALID=1") {
fmt.Printf("Entry %d is invalid\n", alpmidx)
continue
}
r, _ := ParseRouteEntryString(e, IPV4)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
}
}
}
if strings.Contains(l, "VALID1=1") {
en, _ := DumpL3DEFIPHalfEntry(l, 1)
/* if en.Mode != 0 {
//Currently just dump IPv4 entry.
continue
}
*/
fmt.Printf("<<<Root[%d(1)(%6s)]: %s/%d (0x%-4x) Global: %5t -> VRF : %2d)>>>: \n", en.Index, DEFIPModeValueToString[en.Mode], en.IPAddr, en.IPAddrMaskLen, en.ALGBktPtr, en.GlobalRoute, en.VRF)
fmt.Printf("%s\n", en)
if en.Mode == 0 {
for bank := 0; bank < 4; bank++ {
for eindex := 0; eindex < 6; eindex++ {
alpmidx := eindex<<16 | int(en.ALGBktPtr<<2) | bank
//DumpIPv4EntryByIndex(dev, alpmidx)
if e, ok := ALPMIPv4[int64(alpmidx)]; !ok {
//Do not display invlaid entry.
//fmt.Printf("Entry %d does not exist\n", alpmidx)
} else {
if !strings.Contains(e, "VALID=1") {
fmt.Printf("Entry %d is invalid\n", alpmidx)
continue
}
r, _ := ParseRouteEntryString(e, IPV4)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
}
} else if en.Mode == 1 {
for bank := 0; bank < 4; bank++ {
for eindex := 0; eindex < 4; eindex++ {
alpmidx := eindex<<16 | int(en.ALGBktPtr<<2) | bank
//DumpIPv4EntryByIndex(dev, alpmidx)
if e, ok := ALPMIPv664[int64(alpmidx)]; !ok {
//Do not display invlaid entry.
//fmt.Printf("Entry %d does not exist\n", alpmidx)
} else {
if !strings.Contains(e, "VALID=1") {
fmt.Printf("Entry %d is invalid\n", alpmidx)
continue
}
//fmt.Println(e)
r, _ := ParseRouteEntryString(e, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
}
}
}
if !strings.Contains(l, "VALID1=1") && !strings.Contains(l, "VALID0=1") {
//fmt.Println("Invalid result: ", l)
}
/*
r, _ := ParseRouteEntryString(l, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
*/
}
}
func DumpL3IPv4DEFIPEntry(dev *rut.RUT) {
DumpALPMIPv4DB(dev)
DumpALPMIPv664DB(dev)
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP 0 8191 | grep -E 'VALID(0|1)=1'",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
if strings.Contains(l, "VALID0=1") {
en, _ := DumpL3DEFIPHalfEntry(l, 0)
//Currently just dump ipv4 entry for this half entry
if en.Mode == 0 {
fmt.Printf("<<<Root[%d(0)(%6s)]: %32s/%2d (0x%-4x) Global: %5t -> VRF : %2d)>>>: \n", en.Index, DEFIPModeValueToString[en.Mode], en.IPAddr, en.IPAddrMaskLen, en.ALGBktPtr, en.GlobalRoute, en.VRF)
fmt.Printf("%s\n", en)
for bank := 0; bank < 4; bank++ {
for eindex := 0; eindex < 6; eindex++ {
alpmidx := eindex<<16 | int(en.ALGBktPtr<<2) | bank
//DumpIPv4EntryByIndex(dev, alpmidx)
if e, ok := ALPMIPv4[int64(alpmidx)]; !ok {
//Do not display invlaid entry.
//fmt.Printf("Entry %d does not exist\n", alpmidx)
} else {
if !strings.Contains(e, "VALID=1") {
fmt.Printf("Entry %d is invalid\n", alpmidx)
continue
}
r, _ := ParseRouteEntryString(e, IPV4)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
}
}
}
if strings.Contains(l, "VALID1=1") {
en, _ := DumpL3DEFIPHalfEntry(l, 1)
/* if en.Mode != 0 {
//Currently just dump IPv4 entry.
continue
}
*/
if en.Mode == 0 {
fmt.Printf("<<<Root[%d(1)(%6s)]: %32s/%2d (0x%-4x) Global: %5t -> VRF : %2d)>>>: \n", en.Index, DEFIPModeValueToString[en.Mode], en.IPAddr, en.IPAddrMaskLen, en.ALGBktPtr, en.GlobalRoute, en.VRF)
fmt.Printf("%s\n", en)
for bank := 0; bank < 4; bank++ {
for eindex := 0; eindex < 6; eindex++ {
alpmidx := eindex<<16 | int(en.ALGBktPtr<<2) | bank
//DumpIPv4EntryByIndex(dev, alpmidx)
if e, ok := ALPMIPv4[int64(alpmidx)]; !ok {
//Do not display invlaid entry.
//fmt.Printf("Entry %d does not exist\n", alpmidx)
} else {
if !strings.Contains(e, "VALID=1") {
fmt.Printf("Entry %d is invalid\n", alpmidx)
continue
}
r, _ := ParseRouteEntryString(e, IPV4)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
}
}
}
if !strings.Contains(l, "VALID1=1") && !strings.Contains(l, "VALID0=1") {
//fmt.Println("Invalid result: ", l)
}
/*
r, _ := ParseRouteEntryString(l, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
*/
}
}
func DumpL3IPv664DEFIPEntry(dev *rut.RUT) {
DumpALPMIPv4DB(dev)
DumpALPMIPv664DB(dev)
res, err := dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: " scontrol -f /proc/switch/ASIC/ctrl dump table 0 L3_DEFIP 0 8191 | grep -E 'VALID(0|1)=1'",
})
if err != nil {
panic(err)
}
for _, l := range strings.Split(res, "\n") {
if strings.Contains(l, "VALID1=1") {
en, _ := DumpL3DEFIPHalfEntry(l, 1)
/* if en.Mode != 0 {
//Currently just dump IPv4 entry.
continue
}
*/
if en.Mode == 1 {
fmt.Printf("<<<Root[%d(1)(%6s)]: %24s/%2d (0x%-4x) Global: %5t -> VRF : %2d)>>>: \n", en.Index, DEFIPModeValueToString[en.Mode], en.IPAddr, en.IPAddrMaskLen, en.ALGBktPtr, en.GlobalRoute, en.VRF)
fmt.Printf("%s\n", en)
for bank := 0; bank < 4; bank++ {
for eindex := 0; eindex < 4; eindex++ {
alpmidx := eindex<<16 | int(en.ALGBktPtr<<2) | bank
//DumpIPv4EntryByIndex(dev, alpmidx)
if e, ok := ALPMIPv664[int64(alpmidx)]; !ok {
//Do not display invlaid entry.
//fmt.Printf("Entry %d does not exist\n", alpmidx)
} else {
if !strings.Contains(e, "VALID=1") {
fmt.Printf("Entry %d is invalid\n", alpmidx)
continue
}
//fmt.Println(e)
r, _ := ParseRouteEntryString(e, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
}
}
}
}
}
if !strings.Contains(l, "VALID1=1") && !strings.Contains(l, "VALID0=1") {
//fmt.Println("Invalid result: ", l)
}
/*
r, _ := ParseRouteEntryString(l, IPV6)
if r != nil {
r.ParseNexthopInfo()
}
fmt.Println(r)
*/
}
}
func prepare(r *rut.RUT) (*rut.RUT, error) {
dev, err := rut.New(r)
if err != nil {
return nil, err
}
err = dev.Init()
if err != nil {
return nil, err
}
Dev = dev
_, err = dev.RunCommand(CTX, &command.Command{
Mode: "normal",
CMD: "terminal length 0",
})
if err != nil {
return nil, err
}
_, err = dev.RunCommand(CTX, &command.Command{
Mode: "normal",
CMD: "configure terminal",
})
if err != nil {
return nil, err
}
_, err = dev.RunCommand(CTX, &command.Command{
Mode: "config",
CMD: "do q sh -l",
})
if err != nil {
return nil, err
}
_, err = dev.RunCommand(CTX, &command.Command{
Mode: "shell",
CMD: "ls -al",
})
if err != nil {
return nil, err
}
return dev, nil
}
|
{
panic(err)
}
|
json-sample-polyline.js
|
polyline = {
"vertices" : {
[0,0,0],
|
[0,100,0],
[100,100,0],
[100,0,0]
},
"numberFloors": "none",
"buildingType": "office"
}
| |
greater_than_exercise.py
|
def greater_than(x, y):
if x > y:
return True
else:
|
a = 2
b = 3
result = greater_than(a, b)
print("{} is greater than {}: {}".format(a, b, result))
|
return False
|
types.rs
|
use serde::{Deserialize, Serialize};
use std::fmt::{Debug};
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Quote {
pub quote_id: String,
pub description: String,
pub ln_invoice: String,
pub expiration: String,
pub expiration_in_sec: i64,
pub source_amount: SourceAmount,
pub target_amount: TargetAmount,
pub conversion_rate: ConversionRate,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SourceAmount {
pub amount: String,
pub currency: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TargetAmount {
pub amount: String,
pub currency: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ConversionRate {
pub amount: String,
pub source_currency: String,
pub target_currency: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Invoices {
pub items: Vec<Invoice>,
pub count: i64,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Invoice {
pub invoice_id: String,
pub amount: Amount,
pub state: String,
pub created: String,
pub description: String,
pub issuer_id: String,
pub receiver_id: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Amount {
pub currency: String,
pub amount: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Subscription {
pub id: String,
pub webhook_url: String,
pub webhook_version: String,
pub enabled: bool,
pub created: String,
#[serde(skip_deserializing)]
pub secret: String,
pub event_types: Vec<Event>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum
|
{
#[serde(rename = "invoice.created")]
InvoiceCreated,
#[serde(rename = "invoice.updated")]
InvoiceUpdated,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Rate {
pub amount: String,
pub source_currency: String,
pub target_currency: String,
}
fn none_string() -> String {
String::from("None")
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Account {
pub handle: String,
#[serde(default = "none_string")]
pub avatar_url: String,
#[serde(default = "none_string")]
pub description: String,
pub can_receive: bool,
pub currencies: Vec<Currency>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Currency {
pub currency: String,
pub is_default_currency: bool,
pub is_available: bool,
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn test_deserialize_subscription() {
let subscription: Subscription = serde_json::from_str(
r#"{
"id": "4d0081e2-5355-411b-b0e4-ee5ff1b691d1",
"webhookUrl": "https://kramerica_industries.com/webhook",
"webhookVersion": "v1",
"enabled": true,
"created": "2022-02-23T18:29:18.773+00:00",
"eventTypes": [
"invoice.created",
"invoice.updated"
]
}"#,
)
.unwrap();
assert_eq!(
subscription,
Subscription {
id: "4d0081e2-5355-411b-b0e4-ee5ff1b691d1".to_string(),
webhook_url: "https://kramerica_industries.com/webhook".to_string(),
webhook_version: "v1".to_string(),
enabled: true,
secret: "".to_string(),
created: "2022-02-23T18:29:18.773+00:00".to_string(),
event_types: vec![Event::InvoiceCreated, Event::InvoiceUpdated],
}
);
}
}
|
Event
|
send_email_file.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 18:15:59 2018
@author: Chat
"""
import smtplib
def sendemail(from_addr, to_addr_list, cc_addr_list, subject, message, login, password, smtpserver='smtp.gmail.com:587'):
|
header = 'From: %s\n' % from_addr
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Cc: %s\n' % ','.join(cc_addr_list)
header += 'Subject: %s\n\n' % subject
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
return problems
|
|
clkctrl.rs
|
#[doc = "Reader of register CLKCTRL%s"]
pub type R = crate::R<u32, super::CLKCTRL>;
#[doc = "Writer for register CLKCTRL%s"]
pub type W = crate::W<u32, super::CLKCTRL>;
#[doc = "Register CLKCTRL%s `reset()`'s with value 0"]
impl crate::ResetValue for super::CLKCTRL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Slot Size\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum SLOTSIZE_A {
#[doc = "0: 8-bit Slot for Clock Unit n"]
_8 = 0,
#[doc = "1: 16-bit Slot for Clock Unit n"]
_16 = 1,
#[doc = "2: 24-bit Slot for Clock Unit n"]
_24 = 2,
#[doc = "3: 32-bit Slot for Clock Unit n"]
_32 = 3,
}
impl From<SLOTSIZE_A> for u8 {
#[inline(always)]
fn from(variant: SLOTSIZE_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `SLOTSIZE`"]
pub type SLOTSIZE_R = crate::R<u8, SLOTSIZE_A>;
impl SLOTSIZE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SLOTSIZE_A {
match self.bits {
0 => SLOTSIZE_A::_8,
1 => SLOTSIZE_A::_16,
2 => SLOTSIZE_A::_24,
3 => SLOTSIZE_A::_32,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `_8`"]
#[inline(always)]
pub fn is_8(&self) -> bool {
*self == SLOTSIZE_A::_8
}
#[doc = "Checks if the value of the field is `_16`"]
#[inline(always)]
pub fn is_16(&self) -> bool {
*self == SLOTSIZE_A::_16
}
#[doc = "Checks if the value of the field is `_24`"]
#[inline(always)]
pub fn is_24(&self) -> bool {
*self == SLOTSIZE_A::_24
}
#[doc = "Checks if the value of the field is `_32`"]
#[inline(always)]
pub fn is_32(&self) -> bool {
*self == SLOTSIZE_A::_32
}
}
#[doc = "Write proxy for field `SLOTSIZE`"]
pub struct SLOTSIZE_W<'a> {
w: &'a mut W,
}
impl<'a> SLOTSIZE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SLOTSIZE_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "8-bit Slot for Clock Unit n"]
#[inline(always)]
pub fn _8(self) -> &'a mut W {
self.variant(SLOTSIZE_A::_8)
}
#[doc = "16-bit Slot for Clock Unit n"]
#[inline(always)]
pub fn _16(self) -> &'a mut W {
self.variant(SLOTSIZE_A::_16)
}
#[doc = "24-bit Slot for Clock Unit n"]
#[inline(always)]
pub fn _24(self) -> &'a mut W {
self.variant(SLOTSIZE_A::_24)
}
#[doc = "32-bit Slot for Clock Unit n"]
#[inline(always)]
pub fn _32(self) -> &'a mut W {
self.variant(SLOTSIZE_A::_32)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03);
self.w
}
}
#[doc = "Reader of field `NBSLOTS`"]
pub type NBSLOTS_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `NBSLOTS`"]
pub struct NBSLOTS_W<'a> {
w: &'a mut W,
}
impl<'a> NBSLOTS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 2)) | (((value as u32) & 0x07) << 2);
self.w
}
}
#[doc = "Frame Sync Width\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum FSWIDTH_A {
#[doc = "0: Frame Sync Pulse is 1 Slot wide (default for I2S protocol)"]
SLOT = 0,
#[doc = "1: Frame Sync Pulse is half a Frame wide"]
HALF = 1,
#[doc = "2: Frame Sync Pulse is 1 Bit wide"]
BIT = 2,
#[doc = "3: Clock Unit n operates in Burst mode, with a 1-bit wide Frame Sync pulse per Data sample, only when Data transfer is requested"]
BURST = 3,
}
impl From<FSWIDTH_A> for u8 {
#[inline(always)]
fn from(variant: FSWIDTH_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `FSWIDTH`"]
pub type FSWIDTH_R = crate::R<u8, FSWIDTH_A>;
impl FSWIDTH_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> FSWIDTH_A {
match self.bits {
0 => FSWIDTH_A::SLOT,
1 => FSWIDTH_A::HALF,
2 => FSWIDTH_A::BIT,
3 => FSWIDTH_A::BURST,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SLOT`"]
#[inline(always)]
pub fn is_slot(&self) -> bool {
*self == FSWIDTH_A::SLOT
}
#[doc = "Checks if the value of the field is `HALF`"]
#[inline(always)]
pub fn is_half(&self) -> bool {
*self == FSWIDTH_A::HALF
}
#[doc = "Checks if the value of the field is `BIT`"]
#[inline(always)]
pub fn is_bit_(&self) -> bool {
*self == FSWIDTH_A::BIT
}
#[doc = "Checks if the value of the field is `BURST`"]
#[inline(always)]
pub fn is_burst(&self) -> bool {
*self == FSWIDTH_A::BURST
}
}
#[doc = "Write proxy for field `FSWIDTH`"]
pub struct FSWIDTH_W<'a> {
w: &'a mut W,
}
impl<'a> FSWIDTH_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FSWIDTH_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Frame Sync Pulse is 1 Slot wide (default for I2S protocol)"]
#[inline(always)]
pub fn slot(self) -> &'a mut W {
self.variant(FSWIDTH_A::SLOT)
}
#[doc = "Frame Sync Pulse is half a Frame wide"]
#[inline(always)]
pub fn half(self) -> &'a mut W {
self.variant(FSWIDTH_A::HALF)
}
#[doc = "Frame Sync Pulse is 1 Bit wide"]
#[inline(always)]
pub fn bit_(self) -> &'a mut W {
self.variant(FSWIDTH_A::BIT)
}
#[doc = "Clock Unit n operates in Burst mode, with a 1-bit wide Frame Sync pulse per Data sample, only when Data transfer is requested"]
#[inline(always)]
pub fn burst(self) -> &'a mut W {
self.variant(FSWIDTH_A::BURST)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 5)) | (((value as u32) & 0x03) << 5);
self.w
}
}
#[doc = "Data Delay from Frame Sync\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BITDELAY_A {
#[doc = "0: Left Justified (0 Bit Delay)"]
LJ = 0,
#[doc = "1: I2S (1 Bit Delay)"]
I2S = 1,
}
impl From<BITDELAY_A> for bool {
#[inline(always)]
fn from(variant: BITDELAY_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `BITDELAY`"]
pub type BITDELAY_R = crate::R<bool, BITDELAY_A>;
impl BITDELAY_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BITDELAY_A {
match self.bits {
false => BITDELAY_A::LJ,
true => BITDELAY_A::I2S,
}
}
#[doc = "Checks if the value of the field is `LJ`"]
#[inline(always)]
pub fn is_lj(&self) -> bool {
*self == BITDELAY_A::LJ
}
#[doc = "Checks if the value of the field is `I2S`"]
#[inline(always)]
pub fn is_i2s(&self) -> bool {
*self == BITDELAY_A::I2S
}
}
#[doc = "Write proxy for field `BITDELAY`"]
pub struct BITDELAY_W<'a> {
w: &'a mut W,
}
impl<'a> BITDELAY_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: BITDELAY_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Left Justified (0 Bit Delay)"]
#[inline(always)]
pub fn lj(self) -> &'a mut W {
self.variant(BITDELAY_A::LJ)
}
#[doc = "I2S (1 Bit Delay)"]
#[inline(always)]
pub fn i2s(self) -> &'a mut W {
self.variant(BITDELAY_A::I2S)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Frame Sync Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FSSEL_A {
#[doc = "0: Divided Serial Clock n is used as Frame Sync n source"]
SCKDIV = 0,
#[doc = "1: FSn input pin is used as Frame Sync n source"]
FSPIN = 1,
}
impl From<FSSEL_A> for bool {
#[inline(always)]
fn from(variant: FSSEL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `FSSEL`"]
pub type FSSEL_R = crate::R<bool, FSSEL_A>;
impl FSSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> FSSEL_A {
match self.bits {
false => FSSEL_A::SCKDIV,
true => FSSEL_A::FSPIN,
}
}
#[doc = "Checks if the value of the field is `SCKDIV`"]
#[inline(always)]
pub fn is_sckdiv(&self) -> bool {
*self == FSSEL_A::SCKDIV
}
#[doc = "Checks if the value of the field is `FSPIN`"]
#[inline(always)]
pub fn is_fspin(&self) -> bool {
*self == FSSEL_A::FSPIN
}
}
#[doc = "Write proxy for field `FSSEL`"]
pub struct FSSEL_W<'a> {
w: &'a mut W,
}
impl<'a> FSSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FSSEL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Divided Serial Clock n is used as Frame Sync n source"]
#[inline(always)]
pub fn sckdiv(self) -> &'a mut W {
self.variant(FSSEL_A::SCKDIV)
}
#[doc = "FSn input pin is used as Frame Sync n source"]
#[inline(always)]
pub fn fspin(self) -> &'a mut W {
self.variant(FSSEL_A::FSPIN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `FSINV`"]
pub type FSINV_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FSINV`"]
pub struct FSINV_W<'a> {
w: &'a mut W,
}
impl<'a> FSINV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Serial Clock Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SCKSEL_A {
#[doc = "0: Divided Master Clock n is used as Serial Clock n source"]
MCKDIV = 0,
#[doc = "1: SCKn input pin is used as Serial Clock n source"]
SCKPIN = 1,
}
impl From<SCKSEL_A> for bool {
#[inline(always)]
fn from(variant: SCKSEL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SCKSEL`"]
pub type SCKSEL_R = crate::R<bool, SCKSEL_A>;
impl SCKSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SCKSEL_A {
match self.bits {
false => SCKSEL_A::MCKDIV,
true => SCKSEL_A::SCKPIN,
}
}
#[doc = "Checks if the value of the field is `MCKDIV`"]
#[inline(always)]
pub fn is_mckdiv(&self) -> bool {
*self == SCKSEL_A::MCKDIV
}
#[doc = "Checks if the value of the field is `SCKPIN`"]
#[inline(always)]
pub fn is_sckpin(&self) -> bool {
*self == SCKSEL_A::SCKPIN
}
}
#[doc = "Write proxy for field `SCKSEL`"]
pub struct SCKSEL_W<'a> {
w: &'a mut W,
}
impl<'a> SCKSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SCKSEL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Divided Master Clock n is used as Serial Clock n source"]
#[inline(always)]
pub fn mckdiv(self) -> &'a mut W {
self.variant(SCKSEL_A::MCKDIV)
}
#[doc = "SCKn input pin is used as Serial Clock n source"]
#[inline(always)]
pub fn sckpin(self) -> &'a mut W {
self.variant(SCKSEL_A::SCKPIN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Master Clock Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MCKSEL_A {
#[doc = "0: GCLK_I2S_n is used as Master Clock n source"]
GCLK = 0,
#[doc = "1: MCKn input pin is used as Master Clock n source"]
MCKPIN = 1,
}
|
fn from(variant: MCKSEL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `MCKSEL`"]
pub type MCKSEL_R = crate::R<bool, MCKSEL_A>;
impl MCKSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> MCKSEL_A {
match self.bits {
false => MCKSEL_A::GCLK,
true => MCKSEL_A::MCKPIN,
}
}
#[doc = "Checks if the value of the field is `GCLK`"]
#[inline(always)]
pub fn is_gclk(&self) -> bool {
*self == MCKSEL_A::GCLK
}
#[doc = "Checks if the value of the field is `MCKPIN`"]
#[inline(always)]
pub fn is_mckpin(&self) -> bool {
*self == MCKSEL_A::MCKPIN
}
}
#[doc = "Write proxy for field `MCKSEL`"]
pub struct MCKSEL_W<'a> {
w: &'a mut W,
}
impl<'a> MCKSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MCKSEL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "GCLK_I2S_n is used as Master Clock n source"]
#[inline(always)]
pub fn gclk(self) -> &'a mut W {
self.variant(MCKSEL_A::GCLK)
}
#[doc = "MCKn input pin is used as Master Clock n source"]
#[inline(always)]
pub fn mckpin(self) -> &'a mut W {
self.variant(MCKSEL_A::MCKPIN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `MCKEN`"]
pub type MCKEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `MCKEN`"]
pub struct MCKEN_W<'a> {
w: &'a mut W,
}
impl<'a> MCKEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `MCKDIV`"]
pub type MCKDIV_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `MCKDIV`"]
pub struct MCKDIV_W<'a> {
w: &'a mut W,
}
impl<'a> MCKDIV_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 19)) | (((value as u32) & 0x1f) << 19);
self.w
}
}
#[doc = "Reader of field `MCKOUTDIV`"]
pub type MCKOUTDIV_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `MCKOUTDIV`"]
pub struct MCKOUTDIV_W<'a> {
w: &'a mut W,
}
impl<'a> MCKOUTDIV_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 24)) | (((value as u32) & 0x1f) << 24);
self.w
}
}
#[doc = "Reader of field `FSOUTINV`"]
pub type FSOUTINV_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FSOUTINV`"]
pub struct FSOUTINV_W<'a> {
w: &'a mut W,
}
impl<'a> FSOUTINV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29);
self.w
}
}
#[doc = "Reader of field `SCKOUTINV`"]
pub type SCKOUTINV_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SCKOUTINV`"]
pub struct SCKOUTINV_W<'a> {
w: &'a mut W,
}
impl<'a> SCKOUTINV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Reader of field `MCKOUTINV`"]
pub type MCKOUTINV_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `MCKOUTINV`"]
pub struct MCKOUTINV_W<'a> {
w: &'a mut W,
}
impl<'a> MCKOUTINV_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:1 - Slot Size"]
#[inline(always)]
pub fn slotsize(&self) -> SLOTSIZE_R {
SLOTSIZE_R::new((self.bits & 0x03) as u8)
}
#[doc = "Bits 2:4 - Number of Slots in Frame"]
#[inline(always)]
pub fn nbslots(&self) -> NBSLOTS_R {
NBSLOTS_R::new(((self.bits >> 2) & 0x07) as u8)
}
#[doc = "Bits 5:6 - Frame Sync Width"]
#[inline(always)]
pub fn fswidth(&self) -> FSWIDTH_R {
FSWIDTH_R::new(((self.bits >> 5) & 0x03) as u8)
}
#[doc = "Bit 7 - Data Delay from Frame Sync"]
#[inline(always)]
pub fn bitdelay(&self) -> BITDELAY_R {
BITDELAY_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - Frame Sync Select"]
#[inline(always)]
pub fn fssel(&self) -> FSSEL_R {
FSSEL_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 11 - Frame Sync Invert"]
#[inline(always)]
pub fn fsinv(&self) -> FSINV_R {
FSINV_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Serial Clock Select"]
#[inline(always)]
pub fn scksel(&self) -> SCKSEL_R {
SCKSEL_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 16 - Master Clock Select"]
#[inline(always)]
pub fn mcksel(&self) -> MCKSEL_R {
MCKSEL_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 18 - Master Clock Enable"]
#[inline(always)]
pub fn mcken(&self) -> MCKEN_R {
MCKEN_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bits 19:23 - Master Clock Division Factor"]
#[inline(always)]
pub fn mckdiv(&self) -> MCKDIV_R {
MCKDIV_R::new(((self.bits >> 19) & 0x1f) as u8)
}
#[doc = "Bits 24:28 - Master Clock Output Division Factor"]
#[inline(always)]
pub fn mckoutdiv(&self) -> MCKOUTDIV_R {
MCKOUTDIV_R::new(((self.bits >> 24) & 0x1f) as u8)
}
#[doc = "Bit 29 - Frame Sync Output Invert"]
#[inline(always)]
pub fn fsoutinv(&self) -> FSOUTINV_R {
FSOUTINV_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 30 - Serial Clock Output Invert"]
#[inline(always)]
pub fn sckoutinv(&self) -> SCKOUTINV_R {
SCKOUTINV_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - Master Clock Output Invert"]
#[inline(always)]
pub fn mckoutinv(&self) -> MCKOUTINV_R {
MCKOUTINV_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:1 - Slot Size"]
#[inline(always)]
pub fn slotsize(&mut self) -> SLOTSIZE_W {
SLOTSIZE_W { w: self }
}
#[doc = "Bits 2:4 - Number of Slots in Frame"]
#[inline(always)]
pub fn nbslots(&mut self) -> NBSLOTS_W {
NBSLOTS_W { w: self }
}
#[doc = "Bits 5:6 - Frame Sync Width"]
#[inline(always)]
pub fn fswidth(&mut self) -> FSWIDTH_W {
FSWIDTH_W { w: self }
}
#[doc = "Bit 7 - Data Delay from Frame Sync"]
#[inline(always)]
pub fn bitdelay(&mut self) -> BITDELAY_W {
BITDELAY_W { w: self }
}
#[doc = "Bit 8 - Frame Sync Select"]
#[inline(always)]
pub fn fssel(&mut self) -> FSSEL_W {
FSSEL_W { w: self }
}
#[doc = "Bit 11 - Frame Sync Invert"]
#[inline(always)]
pub fn fsinv(&mut self) -> FSINV_W {
FSINV_W { w: self }
}
#[doc = "Bit 12 - Serial Clock Select"]
#[inline(always)]
pub fn scksel(&mut self) -> SCKSEL_W {
SCKSEL_W { w: self }
}
#[doc = "Bit 16 - Master Clock Select"]
#[inline(always)]
pub fn mcksel(&mut self) -> MCKSEL_W {
MCKSEL_W { w: self }
}
#[doc = "Bit 18 - Master Clock Enable"]
#[inline(always)]
pub fn mcken(&mut self) -> MCKEN_W {
MCKEN_W { w: self }
}
#[doc = "Bits 19:23 - Master Clock Division Factor"]
#[inline(always)]
pub fn mckdiv(&mut self) -> MCKDIV_W {
MCKDIV_W { w: self }
}
#[doc = "Bits 24:28 - Master Clock Output Division Factor"]
#[inline(always)]
pub fn mckoutdiv(&mut self) -> MCKOUTDIV_W {
MCKOUTDIV_W { w: self }
}
#[doc = "Bit 29 - Frame Sync Output Invert"]
#[inline(always)]
pub fn fsoutinv(&mut self) -> FSOUTINV_W {
FSOUTINV_W { w: self }
}
#[doc = "Bit 30 - Serial Clock Output Invert"]
#[inline(always)]
pub fn sckoutinv(&mut self) -> SCKOUTINV_W {
SCKOUTINV_W { w: self }
}
#[doc = "Bit 31 - Master Clock Output Invert"]
#[inline(always)]
pub fn mckoutinv(&mut self) -> MCKOUTINV_W {
MCKOUTINV_W { w: self }
}
}
|
impl From<MCKSEL_A> for bool {
#[inline(always)]
|
05-regex-tasks.js
|
'use strict';
/********************************************************************************************
* *
* Plese read the following tutorial before implementing tasks: *
* https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions *
* *
********************************************************************************************/
/**
* Returns the regexp that matches a GUID string representation
* '{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}',
* where X is hexadecimal digit (0,1,2...,9,A,a,B,b,C,c,D,d,F,f)
*
* See more details: https://en.wikipedia.org/wiki/Globally_unique_identifier
*
* Match :
* '{3F2504E0-4F89-41D3-9A0C-0305E82C3301}'
* '{21EC2020-3AEA-4069-A2DD-08002B30309D}'
* '{0c74f13f-fa83-4c48-9b33-68921dd72463}'
*
* Do not match:
* '{D44EF4F4-280B47E5-91C7-261222A59621}'
* '{D1A5279D-B27D-4CD4-A05E-EFDH53D08E8D}'
* '{5EDEB36C-9006-467A8D04-AFB6F62CD7D2}'
* '677E2553DD4D43B09DA77414DB1EB8EA'
* '0c74f13f-fa83-4c48-9b33-68921dd72463'
* 'The roof, the roof, the roof is on fire'
*
* @return {RegExp}
*/
function getRegexForGuid(str) {
return /{[0-9a-fA-F\-]{36}}/;
}
/**
* Returns the regexp that matches all the strings from first column
* but of them from the second
*
* Match : Do not match:
* ----------- --------------
* 'pit' ' pt'
* 'spot' 'Pot'
* 'spate' 'peat'
* 'slap two' 'part'
* 'respite'
*
* NOTE : the regex lenth should be < 13
*
* @return {RegExp}
*
*/
function getRegexForPitSpot() {
return /p[aoi\s]t/;
}
/**
* Returns the regexp that matches all IPv4 strings in
* 'XX.XX.XX.XX' dotted format where XX is number 0 to 255
*
* Valid IPv4: Invalid IPv4
* --------------- -----------------
* '0.0.0.0' '300.0.0.0'
* '127.0.0.1' '127.0.0.-1'
* '10.10.1.1' '23.24.25.26.27'
* '46.61.155.237' 'Set dns to 8.8.8.8'
* '010.234.015.001'
*
* @return {RegExp}
*/
function getRegexForIPv4() {
return /^[0-2]?[0-9]{1,2}\.[0-2]?[0-9]{1,2}\.[0-2]?[0-9]{1,2}\.[0-2]?[0-9]{1,2}$/;
}
/**
* Returns the regexp that matches all SSN (Social Security Number) codes in
* 'XXX-XX-XXXX' format where X is digit, where each group can't be all zeros
* https://en.wikipedia.org/wiki/Social_Security_number
*
* Valid SSN: Invalid SSN
* --------------- -----------------
* '123-45-6789' '123456789'
* '234-56-2349' '000-56-2349'
* '875-43-0298' '875-00-0298'
* '034-01-0008' '034-01-0000'
* '0S4-H1-HACK'
* @return {RegExp}
*/
function getRegexForSSN() {
return /^(?!000)[0-9]{3}\-(?!00)[0-9]{2}\-(?!0000)[0-9]{4}$/;
}
|
/**
* Returns the password validator regex.
* Regex will validate a password to make sure it meets the follwing criteria:
* - At least specified characters long (argument minLength)
* - Contains a lowercase letter
* - Contains an uppercase letter
* - Contains a number
* - Valid passwords will only be alphanumeric characters.
*
* @param {number} minLength
* @return {Regex}
*
* @example
* let validator = getPasswordValidator(6);
* 'password'.match(validator) => false
* 'Pa55Word'.match(validator) => true
* 'PASSw0rd'.match(validator) => true
* 'PASSW0RD'.match(validator) => false
* 'Pa55'.match(validator) => false
*/
function getPasswordValidator(minLength) {
return new RegExp('^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])[a-zA-Z0-9]{' + minLength + ',}$');
}
module.exports = {
getRegexForGuid: getRegexForGuid,
getRegexForPitSpot: getRegexForPitSpot,
getRegexForIPv4: getRegexForIPv4,
getRegexForSSN: getRegexForSSN,
getPasswordValidator: getPasswordValidator
};
| |
validator_operator.rs
|
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use libra_global_constants::OPERATOR_KEY;
use libra_management::{
config::ConfigPath, constants, error::Error, secure_backend::SharedBackend,
};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
pub struct ValidatorOperator {
#[structopt(flatten)]
config: ConfigPath,
#[structopt(long)]
operator_name: String,
#[structopt(flatten)]
shared_backend: SharedBackend,
}
impl ValidatorOperator {
pub fn execute(self) -> Result<String, Error>
|
}
|
{
let config = self
.config
.load()?
.override_shared_backend(&self.shared_backend.shared_backend)?;
let operator_name = self.operator_name;
// Verify the operator exists in the shared storage
let operator_storage = config.shared_backend_with_namespace(operator_name.clone());
let _ = operator_storage.ed25519_key(OPERATOR_KEY)?;
// Upload the operator name to shared storage
let mut shared_storage = config.shared_backend();
shared_storage.set(constants::VALIDATOR_OPERATOR, operator_name.clone())?;
Ok(operator_name)
}
|
register_ldtk_objects.rs
|
use crate::{app::ldtk_entity::*, app::ldtk_int_cell::*};
use bevy::prelude::*;
/// Provides functions to register [Bundle]s to bevy's [App] for particular LDtk layer identifiers,
/// entity identifiers, and IntGrid values.
///
/// After being registered, [Entity]s will be spawned with these bundles when some IntGrid tile or
/// entity meets the criteria you specify.
///
/// Not necessarily intended for custom implementations on your own types.
pub trait RegisterLdtkObjects {
/// Used internally by all the other LDtk entity registration functions.
///
/// Similar to [RegisterLdtkObjects::register_ldtk_entity_for_layer], except it provides
/// defaulting functionality:
/// - Setting `layer_identifier` to [None] will make the registration apply to any Entity layer.
/// - Setting `entity_identifier` to [None] will make the registration apply to any LDtk entity.
///
/// This defaulting functionality means that a particular instance of an LDtk entity may match
/// multiple registrations.
/// In these cases, registrations are prioritized in order of most to least specific:
/// 1. `layer_identifier` and `entity_identifier` are specified
/// 2. Just `entity_identifier` is specified
/// 3. Just `layer_identifier` is specified
/// 4. Neither `entity_identifier` nor `layer_identifier` are specified
fn register_ldtk_entity_for_layer_optional<B: LdtkEntity + Bundle>(
&mut self,
layer_identifier: Option<String>,
entity_identifier: Option<String>,
) -> &mut Self;
/// Registers [LdtkEntity] types to be spawned for a given Entity identifier and layer
/// identifier in an LDtk file.
///
/// This example lets the plugin know that it should spawn a MyBundle when it encounters a
/// "my_entity_identifier" entity on a "MyLayerIdentifier" layer.
/// ```no_run
/// use bevy::prelude::*;
/// use bevy_ecs_ldtk::prelude::*;
///
/// fn main() {
/// App::empty()
/// .add_plugin(LdtkPlugin)
/// .register_ldtk_entity_for_layer::<MyBundle>("MyLayerIdentifier", "my_entity_identifier")
/// // add other systems, plugins, resources...
/// .run();
/// }
///
/// # #[derive(Component, Default)]
/// # struct ComponentA;
/// # #[derive(Component, Default)]
/// # struct ComponentB;
/// # #[derive(Component, Default)]
/// # struct ComponentC;
/// #[derive(Bundle, LdtkEntity)]
/// pub struct MyBundle {
/// a: ComponentA,
/// b: ComponentB,
/// c: ComponentC,
/// }
/// ```
///
/// You can find more details on the `#[derive(LdtkEntity)]` macro at [LdtkEntity].
fn register_ldtk_entity_for_layer<B: LdtkEntity + Bundle>(
&mut self,
layer_identifier: &str,
entity_identifier: &str,
) -> &mut Self {
self.register_ldtk_entity_for_layer_optional::<B>(
Some(layer_identifier.to_string()),
Some(entity_identifier.to_string()),
)
}
/// Similar to [RegisterLdtkObjects::register_ldtk_entity_for_layer], except it applies the
/// registration to all layers.
fn
|
<B: LdtkEntity + Bundle>(
&mut self,
entity_identifier: &str,
) -> &mut Self {
self.register_ldtk_entity_for_layer_optional::<B>(None, Some(entity_identifier.to_string()))
}
/// Similar to [RegisterLdtkObjects::register_ldtk_entity_for_layer], except it applies the
/// registration to all entities on the given layer.
fn register_default_ldtk_entity_for_layer<B: LdtkEntity + Bundle>(
&mut self,
layer_identifier: &str,
) -> &mut Self {
self.register_ldtk_entity_for_layer_optional::<B>(Some(layer_identifier.to_string()), None)
}
/// Similar to [RegisterLdtkObjects::register_ldtk_entity_for_layer], except it applies the
/// registration to any entity and any layer.
fn register_default_ldtk_entity<B: LdtkEntity + Bundle>(&mut self) -> &mut Self {
self.register_ldtk_entity_for_layer_optional::<B>(None, None)
}
/// Used internally by all the other LDtk int cell registration functions.
///
/// Similar to [RegisterLdtkObjects::register_ldtk_int_cell_for_layer], except it provides
/// defaulting functionality:
/// - Setting `layer_identifier` to [None] will make the registration apply to any IntGrid layer.
/// - Setting `value` to [None] will make the registration apply to any IntGrid tile.
///
/// This defaulting functionality means that a particular LDtk IntGrid tile may match multiple
/// registrations.
/// In these cases, registrations are prioritized in order of most to least specific:
/// 1. `layer_identifier` and `value` are specified
/// 2. Just `value` is specified
/// 3. Just `layer_identifier` is specified
/// 4. Neither `value` nor `layer_identifier` are specified
fn register_ldtk_int_cell_for_layer_optional<B: LdtkIntCell + Bundle>(
&mut self,
layer_identifier: Option<String>,
value: Option<i32>,
) -> &mut Self;
/// Registers [LdtkIntCell] types to be inserted for a given IntGrid value and layer identifier
/// in an LDtk file.
///
/// This example lets the plugin know that it should spawn a MyBundle when it encounters an
/// IntGrid tile whose value is `1` on a "MyLayerIdentifier" layer.
/// ```no_run
/// use bevy::prelude::*;
/// use bevy_ecs_ldtk::prelude::*;
///
/// fn main() {
/// App::empty()
/// .add_plugin(LdtkPlugin)
/// .register_ldtk_int_cell_for_layer::<MyBundle>("MyLayerIdentifier", 1)
/// // add other systems, plugins, resources...
/// .run();
/// }
///
/// # #[derive(Component, Default)]
/// # struct ComponentA;
/// # #[derive(Component, Default)]
/// # struct ComponentB;
/// # #[derive(Component, Default)]
/// # struct ComponentC;
/// #[derive(Bundle, LdtkIntCell)]
/// pub struct MyBundle {
/// a: ComponentA,
/// b: ComponentB,
/// c: ComponentC,
/// }
/// ```
///
/// You can find more details on the `#[derive(LdtkIntCell)]` macro at [LdtkIntCell].
fn register_ldtk_int_cell_for_layer<B: LdtkIntCell + Bundle>(
&mut self,
layer_identifier: &str,
value: i32,
) -> &mut Self {
self.register_ldtk_int_cell_for_layer_optional::<B>(
Some(layer_identifier.to_string()),
Some(value),
)
}
/// Similar to [RegisterLdtkObjects::register_ldtk_int_cell_for_layer], except it applies the
/// registration to all layers.
fn register_ldtk_int_cell<B: LdtkIntCell + Bundle>(&mut self, value: i32) -> &mut Self {
self.register_ldtk_int_cell_for_layer_optional::<B>(None, Some(value))
}
/// Similar to [RegisterLdtkObjects::register_ldtk_int_cell_for_layer], except it applies the
/// registration to all tiles on the given layer.
fn register_default_ldtk_int_cell_for_layer<B: LdtkIntCell + Bundle>(
&mut self,
layer_identifier: &str,
) -> &mut Self {
self.register_ldtk_int_cell_for_layer_optional::<B>(
Some(layer_identifier.to_string()),
None,
)
}
/// Similar to [RegisterLdtkObjects::register_ldtk_int_cell_for_layer], except it applies the
/// registration to any tile and any layer.
fn register_default_ldtk_int_cell<B: LdtkIntCell + Bundle>(&mut self) -> &mut Self {
self.register_ldtk_int_cell_for_layer_optional::<B>(None, None)
}
}
impl RegisterLdtkObjects for App {
fn register_ldtk_entity_for_layer_optional<B: LdtkEntity + Bundle>(
&mut self,
layer_identifier: Option<String>,
entity_identifier: Option<String>,
) -> &mut Self {
let new_entry = Box::new(PhantomLdtkEntity::<B>::new());
match self.world.get_non_send_resource_mut::<LdtkEntityMap>() {
Some(mut entries) => {
entries.insert((layer_identifier, entity_identifier), new_entry);
}
None => {
let mut bundle_map = LdtkEntityMap::new();
bundle_map.insert((layer_identifier, entity_identifier), new_entry);
self.world.insert_non_send::<LdtkEntityMap>(bundle_map);
}
}
self
}
fn register_ldtk_int_cell_for_layer_optional<B: LdtkIntCell + Bundle>(
&mut self,
layer_identifier: Option<String>,
value: Option<i32>,
) -> &mut Self {
let new_entry = Box::new(PhantomLdtkIntCell::<B>::new());
match self.world.get_non_send_resource_mut::<LdtkIntCellMap>() {
Some(mut entries) => {
entries.insert((layer_identifier, value), new_entry);
}
None => {
let mut bundle_map = LdtkIntCellMap::new();
bundle_map.insert((layer_identifier, value), new_entry);
self.world.insert_non_send::<LdtkIntCellMap>(bundle_map);
}
}
self
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
components::{EntityInstance, IntGridCell},
ldtk::{LayerInstance, TilesetDefinition},
};
#[derive(Default, Component, Debug)]
struct ComponentA;
#[derive(Default, Component, Debug)]
struct ComponentB;
#[derive(Default, Bundle, Debug)]
struct LdtkEntityBundle {
a: ComponentA,
b: ComponentB,
}
impl LdtkEntity for LdtkEntityBundle {
fn bundle_entity(
_: &EntityInstance,
_: &LayerInstance,
_: Option<&Handle<Image>>,
_: Option<&TilesetDefinition>,
_: &AssetServer,
_: &mut Assets<TextureAtlas>,
) -> LdtkEntityBundle {
LdtkEntityBundle::default()
}
}
#[derive(Default, Bundle)]
struct LdtkIntCellBundle {
a: ComponentA,
b: ComponentB,
}
impl LdtkIntCell for LdtkIntCellBundle {
fn bundle_int_cell(_: IntGridCell, _: &LayerInstance) -> LdtkIntCellBundle {
LdtkIntCellBundle::default()
}
}
#[test]
fn test_ldtk_entity_registrations() {
let mut app = App::new();
app.register_ldtk_entity_for_layer::<LdtkEntityBundle>("layer", "entity_for_layer")
.register_ldtk_entity::<LdtkEntityBundle>("entity")
.register_default_ldtk_entity_for_layer::<LdtkEntityBundle>("default_entity_for_layer")
.register_default_ldtk_entity::<LdtkEntityBundle>();
let ldtk_entity_map = app.world.get_non_send_resource::<LdtkEntityMap>().unwrap();
assert!(ldtk_entity_map.contains_key(&(
Some("layer".to_string()),
Some("entity_for_layer".to_string())
)));
assert!(ldtk_entity_map.contains_key(&(None, Some("entity".to_string()))));
assert!(ldtk_entity_map.contains_key(&(Some("default_entity_for_layer".to_string()), None)));
assert!(ldtk_entity_map.contains_key(&(None, None)));
}
#[test]
fn test_ldtk_int_cell_registrations() {
let mut app = App::new();
app.register_ldtk_int_cell_for_layer::<LdtkIntCellBundle>("layer", 1)
.register_ldtk_int_cell::<LdtkIntCellBundle>(2)
.register_default_ldtk_int_cell_for_layer::<LdtkIntCellBundle>(
"default_int_cell_for_layer",
)
.register_default_ldtk_int_cell::<LdtkIntCellBundle>();
let ldtk_int_cell_map = app.world.get_non_send_resource::<LdtkIntCellMap>().unwrap();
assert!(ldtk_int_cell_map.contains_key(&(Some("layer".to_string()), Some(1))));
assert!(ldtk_int_cell_map.contains_key(&(None, Some(2))));
assert!(
ldtk_int_cell_map.contains_key(&(Some("default_int_cell_for_layer".to_string()), None))
);
assert!(ldtk_int_cell_map.contains_key(&(None, None)));
}
}
|
register_ldtk_entity
|
crypt.service.ts
|
import { Injectable } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { compare, hash } from 'bcrypt';
import { SecurityConfig } from 'src/config/config.types';
@Injectable()
export class
|
{
get bcryptSaltRounds(): string | number {
const securityConfig = this.configService.get<SecurityConfig>('security');
const saltOrRounds = securityConfig.bcryptSaltOrRound;
return Number.isInteger(Number(saltOrRounds)) ? Number(saltOrRounds) : saltOrRounds;
}
constructor(private configService: ConfigService) {}
hashPassword(password: string): Promise<string> {
return hash(password, this.bcryptSaltRounds);
}
validatePassword(password: string, hashedPassword: string): Promise<boolean> {
return compare(password, hashedPassword);
}
}
|
CryptService
|
test_bots.py
|
import filecmp
import os
from typing import Any, Dict, List, Mapping, Optional
from unittest.mock import MagicMock, patch
import orjson
from django.core import mail
from django.test import override_settings
from zulip_bots.custom_exceptions import ConfigValidationError
from zerver.lib.actions import (
do_change_stream_invite_only,
do_deactivate_user,
do_set_realm_property,
)
from zerver.lib.bot_config import ConfigError, get_bot_config
from zerver.lib.bot_lib import get_bot_handler
from zerver.lib.integrations import EMBEDDED_BOTS, WebhookIntegration
from zerver.lib.test_classes import UploadSerializeMixin, ZulipTestCase
from zerver.lib.test_helpers import (
avatar_disk_path,
get_test_image_file,
queries_captured,
tornado_redirected_to_list,
)
from zerver.models import (
Realm,
Service,
UserProfile,
get_bot_services,
get_realm,
get_stream,
get_user,
is_cross_realm_bot_email,
)
# A test validator
def _check_string(var_name: str, val: object) -> Optional[str]:
if str(val).startswith("_"):
return f'{var_name} starts with a "_" and is hence invalid.'
return None
stripe_sample_config_options = [
WebhookIntegration(
"stripe",
["financial"],
display_name="Stripe",
config_options=[("Stripe API Key", "stripe_api_key", _check_string)],
),
]
class BotTest(ZulipTestCase, UploadSerializeMixin):
def get_bot_user(self, email: str) -> UserProfile:
realm = get_realm("zulip")
bot = get_user(email, realm)
return bot
def assert_num_bots_equal(self, count: int) -> None:
result = self.client_get("/json/bots")
self.assert_json_success(result)
self.assertEqual(count, len(result.json()["bots"]))
def create_bot(self, **extras: Any) -> Dict[str, Any]:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": "1",
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
return result.json()
def test_bot_domain(self) -> None:
self.login("hamlet")
self.create_bot()
self.assertTrue(UserProfile.objects.filter(email="[email protected]").exists())
# The other cases are hard to test directly, since we don't allow creating bots from
# the wrong subdomain, and because 'testserver.example.com' is not a valid domain for the bot's email.
# So we just test the Raelm.get_bot_domain function.
realm = get_realm("zulip")
self.assertEqual(realm.get_bot_domain(), "zulip.testserver")
def deactivate_bot(self) -> None:
email = "[email protected]"
result = self.client_delete(f"/json/bots/{self.get_bot_user(email).id}")
self.assert_json_success(result)
def test_add_bot_with_bad_username(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
# Invalid username
bot_info = dict(
full_name="My bot name",
short_name="@",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Bad name or username")
self.assert_num_bots_equal(0)
# Empty username
bot_info = dict(
full_name="My bot name",
short_name="",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Bad name or username")
self.assert_num_bots_equal(0)
@override_settings(FAKE_EMAIL_DOMAIN="invaliddomain", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_add_bot_with_invalid_fake_email_domain(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": "1",
}
result = self.client_post("/json/bots", bot_info)
error_message = (
"Can't create bots until FAKE_EMAIL_DOMAIN is correctly configured.\n"
+ "Please contact your server administrator."
)
self.assert_json_error(result, error_message)
self.assert_num_bots_equal(0)
def test_add_bot_with_no_name(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
bot_info = dict(
full_name="a",
short_name="bot",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Name too short!")
self.assert_num_bots_equal(0)
def test_json_users_with_bots(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
self.assert_num_bots_equal(0)
num_bots = 3
for i in range(num_bots):
full_name = f"Bot {i}"
short_name = f"bot-{i}"
bot_info = dict(
full_name=full_name,
short_name=short_name,
bot_type=1,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
self.assert_num_bots_equal(num_bots)
with queries_captured() as queries:
users_result = self.client_get("/json/users")
self.assert_json_success(users_result)
self.assert_length(queries, 3)
def test_add_bot(self) -> None:
hamlet = self.example_user("hamlet")
self.login("hamlet")
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.create_bot()
self.assert_num_bots_equal(1)
email = "[email protected]"
bot = self.get_bot_user(email)
(event,) = [e for e in events if e["event"]["type"] == "realm_bot"]
self.assertEqual(result["user_id"], bot.id)
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="[email protected]",
user_id=bot.id,
bot_type=bot.bot_type,
full_name="The Bot of Hamlet",
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
services=[],
owner_id=hamlet.id,
),
),
event["event"],
)
users_result = self.client_get("/json/users")
members = orjson.loads(users_result.content)["members"]
[bot] = [m for m in members if m["email"] == "[email protected]"]
self.assertEqual(bot["bot_owner_id"], self.example_user("hamlet").id)
self.assertEqual(bot["user_id"], self.get_bot_user(email).id)
@override_settings(FAKE_EMAIL_DOMAIN="fakedomain.com", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_add_bot_with_fake_email_domain(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
email = "[email protected]"
self.get_bot_user(email)
@override_settings(EXTERNAL_HOST="example.com")
def test_add_bot_verify_subdomain_in_email_address(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
email = "[email protected]"
self.get_bot_user(email)
@override_settings(
FAKE_EMAIL_DOMAIN="fakedomain.com", REALM_HOSTS={"zulip": "zulip.example.com"}
)
def test_add_bot_host_used_as_domain_if_valid(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
email = "[email protected]"
self.get_bot_user(email)
def test_add_bot_with_username_in_use(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
# The short_name is used in the email, which we call
# "Username" for legacy reasons.
bot_info = dict(
full_name="whatever",
short_name="hambot",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Username already in use")
dup_full_name = "The Bot of Hamlet"
bot_info = dict(
full_name=dup_full_name,
short_name="whatever",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Name is already in use!")
def test_add_bot_with_user_avatar(self) -> None:
email = "[email protected]"
realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
with get_test_image_file("img.png") as fp:
self.create_bot(file=fp)
profile = get_user(email, realm)
# Make sure that avatar image that we've uploaded is same with avatar image in the server
self.assertTrue(
filecmp.cmp(fp.name, os.path.splitext(avatar_disk_path(profile))[0] + ".original")
)
self.assert_num_bots_equal(1)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
self.assertTrue(os.path.exists(avatar_disk_path(profile)))
def test_add_bot_with_too_many_files(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
with get_test_image_file("img.png") as fp1, get_test_image_file("img.gif") as fp2:
bot_info = dict(
full_name="whatever",
short_name="whatever",
file1=fp1,
file2=fp2,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "You may only upload one file at a time")
self.assert_num_bots_equal(0)
def test_add_bot_with_default_sending_stream(self) -> None:
email = "[email protected]"
realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_sending_stream"], "Denmark")
profile = get_user(email, realm)
assert profile.default_sending_stream is not None
self.assertEqual(profile.default_sending_stream.name, "Denmark")
def test_add_bot_with_default_sending_stream_not_subscribed(self) -> None:
email = "[email protected]"
realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream="Rome")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_sending_stream"], "Rome")
profile = get_user(email, realm)
assert profile.default_sending_stream is not None
self.assertEqual(profile.default_sending_stream.name, "Rome")
def test_add_bot_email_address_visibility(self) -> None:
# Test that we don't mangle the email field with
# email_address_visiblity limited to admins
user = self.example_user("hamlet")
do_set_realm_property(
user.realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
user.refresh_from_db()
self.login_user(user)
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.create_bot()
self.assert_num_bots_equal(1)
email = "[email protected]"
bot = self.get_bot_user(email)
(event,) = [e for e in events if e["event"]["type"] == "realm_bot"]
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="[email protected]",
user_id=bot.id,
bot_type=bot.bot_type,
full_name="The Bot of Hamlet",
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
services=[],
owner_id=user.id,
),
),
event["event"],
)
users_result = self.client_get("/json/users")
members = orjson.loads(users_result.content)["members"]
[bot] = [m for m in members if m["email"] == "[email protected]"]
self.assertEqual(bot["bot_owner_id"], user.id)
self.assertEqual(bot["user_id"], self.get_bot_user(email).id)
def test_bot_add_subscription(self) -> None:
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and a stream to the
list of subscriptions and confirm the right number of events
are generated.
When 'principals' has a bot, no notification message event or invitation email
is sent when add_subscriptions_backend is called in the above API call.
"""
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
self.login_user(hamlet)
# Normal user i.e. not a bot.
request_data = {
"principals": '["' + iago.email + '"]',
}
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.common_subscribe_to_streams(hamlet, ["Rome"], request_data)
self.assert_json_success(result)
msg_event = [e for e in events if e["event"]["type"] == "message"]
self.assert_length(msg_event, 1) # Notification message event is sent.
# Create a bot.
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
# A bot
bot_request_data = {
"principals": '["[email protected]"]',
}
events_bot: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events_bot):
result = self.common_subscribe_to_streams(hamlet, ["Rome"], bot_request_data)
self.assert_json_success(result)
# No notification message event or invitation email is sent because of bot.
msg_event = [e for e in events_bot if e["event"]["type"] == "message"]
self.assert_length(msg_event, 0)
self.assertEqual(len(events_bot), len(events) - 1)
# Test runner automatically redirects all sent email to a dummy 'outbox'.
self.assertEqual(len(mail.outbox), 0)
def test_add_bot_with_default_sending_stream_private_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = get_stream("Denmark", user_profile.realm)
self.subscribe(user_profile, stream.name)
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.create_bot(default_sending_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_sending_stream"], "Denmark")
email = "[email protected]"
realm = get_realm("zulip")
profile = get_user(email, realm)
assert profile.default_sending_stream is not None
self.assertEqual(profile.default_sending_stream.name, "Denmark")
(event,) = [e for e in events if e["event"]["type"] == "realm_bot"]
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="[email protected]",
user_id=profile.id,
full_name="The Bot of Hamlet",
bot_type=profile.bot_type,
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream="Denmark",
default_events_register_stream=None,
default_all_public_streams=False,
services=[],
owner_id=user_profile.id,
),
),
event["event"],
)
self.assertEqual(event["users"], [user_profile.id])
def test_add_bot_with_default_sending_stream_private_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"default_sending_stream": "Denmark",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_add_bot_with_default_events_register_stream(self) -> None:
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_events_register_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_events_register_stream"], "Denmark")
profile = get_user(bot_email, bot_realm)
assert profile.default_events_register_stream is not None
self.assertEqual(profile.default_events_register_stream.name, "Denmark")
def test_add_bot_with_default_events_register_stream_private_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "Denmark")
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.create_bot(default_events_register_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_events_register_stream"], "Denmark")
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
bot_profile = get_user(bot_email, bot_realm)
assert bot_profile.default_events_register_stream is not None
self.assertEqual(bot_profile.default_events_register_stream.name, "Denmark")
(event,) = [e for e in events if e["event"]["type"] == "realm_bot"]
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="[email protected]",
full_name="The Bot of Hamlet",
user_id=bot_profile.id,
bot_type=bot_profile.bot_type,
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream=None,
default_events_register_stream="Denmark",
default_all_public_streams=False,
services=[],
owner_id=user_profile.id,
),
),
event["event"],
)
self.assertEqual(event["users"], [user_profile.id])
def test_add_bot_with_default_events_register_stream_private_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"default_events_register_stream": "Denmark",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_add_bot_with_default_all_public_streams(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_all_public_streams=orjson.dumps(True).decode())
self.assert_num_bots_equal(1)
self.assertTrue(result["default_all_public_streams"])
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.default_all_public_streams, True)
def test_deactivate_bot(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
self.deactivate_bot()
# You can deactivate the same bot twice.
self.deactivate_bot()
self.assert_num_bots_equal(0)
def test_deactivate_bogus_bot(self) -> None:
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
invalid_user_id = 1000
result = self.client_delete(f"/json/bots/{invalid_user_id}")
self.assert_json_error(result, "No such bot")
self.assert_num_bots_equal(1)
def test_deactivate_bot_with_owner_deactivation(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"full_name": "The Another Bot of Hamlet",
"short_name": "hambot-another",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
self.assertEqual(
UserProfile.objects.filter(is_bot=True, bot_owner=user, is_active=True).count(), 2
)
result = self.client_delete("/json/users/me")
self.assert_json_success(result)
user = self.example_user("hamlet")
self.assertFalse(user.is_active)
self.login("iago")
self.assertFalse(
UserProfile.objects.filter(is_bot=True, bot_owner=user, is_active=True).exists()
)
def test_cannot_deactivate_other_realm_bot(self) -> None:
user = self.mit_user("starnine")
self.login_user(user)
bot_info = {
"full_name": "The Bot in zephyr",
"short_name": "starn-bot",
"bot_type": "1",
}
result = self.client_post("/json/bots", bot_info, subdomain="zephyr")
self.assert_json_success(result)
result = self.client_get("/json/bots", subdomain="zephyr")
bot_email = result.json()["bots"][0]["username"]
bot = get_user(bot_email, user.realm)
self.login("iago")
result = self.client_delete(f"/json/bots/{bot.id}")
self.assert_json_error(result, "No such bot")
def test_bot_deactivation_attacks(self) -> None:
"""You cannot deactivate somebody else's bot."""
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to deactivate both Hamlet and
# Hamlet's bot.
self.login("othello")
# Cannot deactivate a user as a bot
result = self.client_delete("/json/bots/{}".format(self.example_user("hamlet").id))
self.assert_json_error(result, "No such bot")
email = "[email protected]"
result = self.client_delete(f"/json/bots/{self.get_bot_user(email).id}")
self.assert_json_error(result, "Insufficient permission")
# But we don't actually deactivate the other person's bot.
self.login("hamlet")
self.assert_num_bots_equal(1)
# Cannot deactivate a bot as a user
result = self.client_delete(f"/json/users/{self.get_bot_user(email).id}")
self.assert_json_error(result, "No such user")
self.assert_num_bots_equal(1)
def test_bot_permissions(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to mess with Hamlet's bots.
self.login("othello")
email = "[email protected]"
result = self.client_post(f"/json/bots/{self.get_bot_user(email).id}/api_key/regenerate")
self.assert_json_error(result, "Insufficient permission")
bot_info = {
"full_name": "Fred",
}
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Insufficient permission")
def get_bot(self) -> Dict[str, Any]:
result = self.client_get("/json/bots")
bots = result.json()["bots"]
return bots[0]
def test_update_api_key(self) -> None:
self.login("hamlet")
self.create_bot()
bot = self.get_bot()
old_api_key = bot["api_key"]
email = "[email protected]"
result = self.client_post(f"/json/bots/{self.get_bot_user(email).id}/api_key/regenerate")
self.assert_json_success(result)
new_api_key = result.json()["api_key"]
self.assertNotEqual(old_api_key, new_api_key)
bot = self.get_bot()
self.assertEqual(new_api_key, bot["api_key"])
def test_update_api_key_for_invalid_user(self) -> None:
self.login("hamlet")
invalid_user_id = 1000
result = self.client_post(f"/json/bots/{invalid_user_id}/api_key/regenerate")
self.assert_json_error(result, "No such bot")
def test_add_bot_with_bot_type_default(self) -> None:
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.DEFAULT_BOT)
def test_add_bot_with_bot_type_incoming_webhook(self) -> None:
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.INCOMING_WEBHOOK_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.INCOMING_WEBHOOK_BOT)
def test_add_bot_with_bot_type_invalid(self) -> None:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": 7,
}
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Invalid bot type")
def test_no_generic_bots_allowed_for_non_admins(self) -> None:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": 1,
}
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
bot_realm.save(update_fields=["bot_creation_policy"])
# A regular user cannot create a generic bot
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Must be an organization administrator")
# But can create an incoming webhook
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.INCOMING_WEBHOOK_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.INCOMING_WEBHOOK_BOT)
def test_no_generic_bot_reactivation_allowed_for_non_admins(self) -> None:
self.login("hamlet")
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
bot_realm.save(update_fields=["bot_creation_policy"])
bot_email = "[email protected]"
bot_user = get_user(bot_email, bot_realm)
do_deactivate_user(bot_user, acting_user=None)
# A regular user cannot reactivate a generic bot
self.assert_num_bots_equal(0)
result = self.client_post(f"/json/users/{bot_user.id}/reactivate")
self.assert_json_error(result, "Must be an organization administrator")
self.assert_num_bots_equal(0)
def test_no_generic_bots_allowed_for_admins(self) -> None:
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
bot_realm.save(update_fields=["bot_creation_policy"])
# An administrator can create any type of bot
self.login("iago")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.DEFAULT_BOT)
def test_no_bots_allowed_for_non_admins(self) -> None:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": 1,
}
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_ADMINS_ONLY
bot_realm.save(update_fields=["bot_creation_policy"])
# A regular user cannot create a generic bot
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Must be an organization administrator")
# Also, a regular user cannot create a incoming bot
bot_info["bot_type"] = 2
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Must be an organization administrator")
def test_no_bots_allowed_for_admins(self) -> None:
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_ADMINS_ONLY
bot_realm.save(update_fields=["bot_creation_policy"])
# An administrator can create any type of bot
self.login("iago")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.DEFAULT_BOT)
def test_patch_bot_full_name(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"full_name": "Fred",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Fred", result.json()["full_name"])
bot = self.get_bot()
self.assertEqual("Fred", bot["full_name"])
def test_patch_bot_full_name_in_use(self) -> None:
self.login("hamlet")
original_name = "The Bot of Hamlet"
bot_info = {
"full_name": original_name,
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_email = "[email protected]"
bot = self.get_bot_user(bot_email)
url = f"/json/bots/{bot.id}"
# It doesn't matter whether a name is taken by a human
# or a bot, we can't use it.
already_taken_name = self.example_user("cordelia").full_name
bot_info = {
"full_name": already_taken_name,
}
result = self.client_patch(url, bot_info)
self.assert_json_error(result, "Name is already in use!")
# We can use our own name (with extra whitespace), and the
# server should silently do nothing.
original_name_with_padding = " " + original_name + " "
bot_info = {
"full_name": original_name_with_padding,
}
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
bot = self.get_bot_user(bot_email)
self.assertEqual(bot.full_name, original_name)
# And let's do a sanity check with an actual name change
# after our various attempts that either failed or did
# nothing.
bot_info = {
"full_name": "Hal",
}
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
bot = self.get_bot_user(bot_email)
self.assertEqual(bot.full_name, "Hal")
def test_patch_bot_full_name_non_bot(self) -> None:
self.login("iago")
bot_info = {
"full_name": "Fred",
}
result = self.client_patch("/json/bots/{}".format(self.example_user("hamlet").id), bot_info)
self.assert_json_error(result, "No such bot")
def test_patch_bot_owner(self) -> None:
self.login("hamlet")
othello = self.example_user("othello")
bot_info: Dict[str, object] = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"bot_owner_id": othello.id,
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
# Test bot's owner has been changed successfully.
self.assertEqual(result.json()["bot_owner"], othello.email)
self.login("othello")
bot = self.get_bot()
self.assertEqual("The Bot of Hamlet", bot["full_name"])
def test_patch_bot_owner_bad_user_id(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
email = "[email protected]"
profile = get_user("[email protected]", get_realm("zulip"))
bad_bot_owner_id = 999999
bot_info = {
"bot_owner_id": bad_bot_owner_id,
}
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, no such user")
profile = get_user("[email protected]", get_realm("zulip"))
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_deactivated(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
target_user_profile = self.example_user("othello")
do_deactivate_user(target_user_profile, acting_user=None)
target_user_profile = self.example_user("othello")
self.assertFalse(target_user_profile.is_active)
bot_info = {
"bot_owner_id": self.example_user("othello").id,
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, user is deactivated")
profile = self.get_bot_user(email)
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_must_be_in_same_realm(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
bot_info = {
"bot_owner_id": self.mit_user("starnine").id,
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, no such user")
profile = self.get_bot_user(email)
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_noop(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
bot_info = {
"bot_owner_id": self.example_user("hamlet").id,
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
# Check that we're still the owner
self.assert_json_success(result)
profile = self.get_bot_user(email)
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_a_bot(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
bot_info: Dict[str, object] = {
"full_name": "Another Bot of Hamlet",
"short_name": "hamelbot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"bot_owner_id": self.get_bot_user("[email protected]").id,
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, bots can't own other bots")
profile = get_user(email, get_realm("zulip"))
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_avatar(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_GRAVATAR)
email = "[email protected]"
# Try error case first (too many files):
with get_test_image_file("img.png") as fp1, get_test_image_file("img.gif") as fp2:
result = self.client_patch_multipart(
f"/json/bots/{self.get_bot_user(email).id}", dict(file1=fp1, file2=fp2)
)
self.assert_json_error(result, "You may only upload one file at a time")
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.avatar_version, 1)
# HAPPY PATH
with get_test_image_file("img.png") as fp:
result = self.client_patch_multipart(
f"/json/bots/{self.get_bot_user(email).id}", dict(file=fp)
)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.avatar_version, 2)
# Make sure that avatar image that we've uploaded is same with avatar image in the server
self.assertTrue(
filecmp.cmp(fp.name, os.path.splitext(avatar_disk_path(profile))[0] + ".original")
)
self.assert_json_success(result)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
self.assertTrue(os.path.exists(avatar_disk_path(profile)))
def test_patch_bot_to_stream(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Denmark",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Denmark", result.json()["default_sending_stream"])
bot = self.get_bot()
self.assertEqual("Denmark", bot["default_sending_stream"])
def test_patch_bot_to_stream_not_subscribed(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Rome",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Rome", result.json()["default_sending_stream"])
bot = self.get_bot()
self.assertEqual("Rome", bot["default_sending_stream"])
def test_patch_bot_to_stream_none(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
default_sending_stream = get_user(bot_email, bot_realm).default_sending_stream
self.assertEqual(None, default_sending_stream)
bot = self.get_bot()
self.assertEqual(None, bot["default_sending_stream"])
def test_patch_bot_to_stream_private_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Denmark",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Denmark", result.json()["default_sending_stream"])
bot = self.get_bot()
self.assertEqual("Denmark", bot["default_sending_stream"])
def test_patch_bot_to_stream_private_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Denmark",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_patch_bot_to_stream_not_found(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "missing",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'missing'")
def test_patch_bot_events_register_stream(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
email = "[email protected]"
bot_user = self.get_bot_user(email)
url = f"/json/bots/{bot_user.id}"
# Successfully give the bot a default stream.
stream_name = "Denmark"
bot_info = dict(default_events_register_stream=stream_name)
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
self.assertEqual(stream_name, result.json()["default_events_register_stream"])
bot = self.get_bot()
self.assertEqual(stream_name, bot["default_events_register_stream"])
# Make sure we are locked out of an unsubscribed private stream.
# We'll subscribe the bot but not the owner (since the check is
# on owner).
stream_name = "private_stream"
self.make_stream(stream_name, hamlet.realm, invite_only=True)
self.subscribe(bot_user, stream_name)
bot_info = dict(default_events_register_stream=stream_name)
result = self.client_patch(url, bot_info)
self.assert_json_error_contains(result, "Invalid stream name")
# Subscribing the owner allows us to patch the stream.
self.subscribe(hamlet, stream_name)
bot_info = dict(default_events_register_stream=stream_name)
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
# Make sure the bot cannot create their own default stream.
url = f"/api/v1/bots/{bot_user.id}"
result = self.api_patch(bot_user, url, bot_info)
self.assert_json_error_contains(result, "endpoint does not accept")
def test_patch_bot_events_register_stream_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "Denmark",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Denmark", result.json()["default_events_register_stream"])
bot = self.get_bot()
self.assertEqual("Denmark", bot["default_events_register_stream"])
def test_patch_bot_events_register_stream_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "Denmark",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_patch_bot_events_register_stream_none(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
default_events_register_stream = get_user(
bot_email, bot_realm
).default_events_register_stream
self.assertEqual(None, default_events_register_stream)
bot = self.get_bot()
self.assertEqual(None, bot["default_events_register_stream"])
def test_patch_bot_events_register_stream_not_found(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "missing",
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'missing'")
def test_patch_bot_default_all_public_streams_true(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_all_public_streams": orjson.dumps(True).decode(),
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual(result.json()["default_all_public_streams"], True)
bot = self.get_bot()
self.assertEqual(bot["default_all_public_streams"], True)
def test_patch_bot_default_all_public_streams_false(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_all_public_streams": orjson.dumps(False).decode(),
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual(result.json()["default_all_public_streams"], False)
bot = self.get_bot()
self.assertEqual(bot["default_all_public_streams"], False)
def test_patch_bot_via_post(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"full_name": "Fred",
"method": "PATCH",
}
email = "[email protected]"
# Important: We intentionally use the wrong method, post, here.
result = self.client_post(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Fred", result.json()["full_name"])
bot = self.get_bot()
self.assertEqual("Fred", bot["full_name"])
def test_patch_bogus_bot(self) -> None:
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet")
self.create_bot()
bot_info = {
"full_name": "Fred",
}
invalid_user_id = 1000
result = self.client_patch(f"/json/bots/{invalid_user_id}", bot_info)
self.assert_json_error(result, "No such bot")
self.assert_num_bots_equal(1)
def test_patch_outgoing_webhook_bot(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://foo.bar.com").decode(),
"service_interface": Service.GENERIC,
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"service_payload_url": orjson.dumps("http://foo.bar2.com").decode(),
"service_interface": Service.SLACK,
}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
service_interface = orjson.loads(result.content)["service_interface"]
self.assertEqual(service_interface, Service.SLACK)
service_payload_url = orjson.loads(result.content)["service_payload_url"]
self.assertEqual(service_payload_url, "http://foo.bar2.com")
@patch("zulip_bots.bots.giphy.giphy.GiphyHandler.validate_config")
def test_patch_bot_config_data(self, mock_validate_config: MagicMock) -> None:
self.create_test_bot(
"test",
self.example_user("hamlet"),
full_name="Bot with config data",
bot_type=UserProfile.EMBEDDED_BOT,
service_name="giphy",
config_data=orjson.dumps({"key": "12345678"}).decode(),
)
bot_info = {"config_data": orjson.dumps({"key": "87654321"}).decode()}
email = "[email protected]"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
config_data = orjson.loads(result.content)["config_data"]
self.assertEqual(config_data, orjson.loads(bot_info["config_data"]))
def test_outgoing_webhook_invalid_interface(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "Outgoing Webhook test bot",
"short_name": "outgoingservicebot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://127.0.0.1:5002").decode(),
"interface_type": -1,
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid interface type")
bot_info["interface_type"] = Service.GENERIC
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
def test_create_outgoing_webhook_bot(self, **extras: Any) -> None:
self.login("hamlet")
bot_info = {
"full_name": "Outgoing Webhook test bot",
"short_name": "outgoingservicebot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://127.0.0.1:5002").decode(),
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
bot = get_user(bot_email, bot_realm)
[service] = get_bot_services(bot.id)
self.assertEqual(service.name, "outgoingservicebot")
self.assertEqual(service.base_url, "http://127.0.0.1:5002")
self.assertEqual(service.user_profile, bot)
# invalid URL test case.
bot_info["payload_url"] = orjson.dumps("http://127.0.0.:5002").decode()
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "payload_url is not a URL")
def test_get_bot_handler(self) -> None:
# Test for valid service.
test_service_name = "converter"
test_bot_handler = get_bot_handler(test_service_name)
self.assertEqual(
str(type(test_bot_handler)),
"<class 'zulip_bots.bots.converter.converter.ConverterHandler'>",
)
# Test for invalid service.
test_service_name = "incorrect_bot_service_foo"
test_bot_handler = get_bot_handler(test_service_name)
self.assertEqual(test_bot_handler, None)
def test_if_each_embedded_bot_service_exists(self) -> None:
for embedded_bot in EMBEDDED_BOTS:
self.assertIsNotNone(get_bot_handler(embedded_bot.name))
def test_outgoing_webhook_interface_type(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "Outgoing Webhook test bot",
"short_name": "outgoingservicebot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://127.0.0.1:5002").decode(),
"interface_type": -1,
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid interface type")
bot_info["interface_type"] = Service.GENERIC
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
def
|
(self, **extras: Any) -> None:
with self.settings(EMBEDDED_BOTS_ENABLED=False):
self.fail_to_create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
bot_type=UserProfile.EMBEDDED_BOT,
service_name="followup",
config_data=orjson.dumps({"key": "value"}).decode(),
assert_json_error_msg="Embedded bots are not enabled.",
**extras,
)
def test_create_embedded_bot(self, **extras: Any) -> None:
bot_config_info = {"key": "value"}
self.create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
bot_type=UserProfile.EMBEDDED_BOT,
service_name="followup",
config_data=orjson.dumps(bot_config_info).decode(),
**extras,
)
bot_email = "[email protected]"
bot_realm = get_realm("zulip")
bot = get_user(bot_email, bot_realm)
[service] = get_bot_services(bot.id)
bot_config = get_bot_config(bot)
self.assertEqual(bot_config, bot_config_info)
self.assertEqual(service.name, "followup")
self.assertEqual(service.user_profile, bot)
def test_create_embedded_bot_with_incorrect_service_name(self, **extras: Any) -> None:
self.fail_to_create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
bot_type=UserProfile.EMBEDDED_BOT,
service_name="not_existing_service",
assert_json_error_msg="Invalid embedded bot name.",
**extras,
)
def test_create_embedded_bot_with_invalid_config_value(self, **extras: Any) -> None:
self.fail_to_create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
service_name="followup",
config_data=orjson.dumps({"invalid": ["config", "value"]}).decode(),
assert_json_error_msg="config_data contains a value that is not a string",
**extras,
)
# Test to create embedded bot with an incorrect config value
incorrect_bot_config_info = {"key": "incorrect key"}
bot_info = {
"full_name": "Embedded test bot",
"short_name": "embeddedservicebot3",
"bot_type": UserProfile.EMBEDDED_BOT,
"service_name": "giphy",
"config_data": orjson.dumps(incorrect_bot_config_info).decode(),
}
bot_info.update(extras)
with patch(
"zulip_bots.bots.giphy.giphy.GiphyHandler.validate_config",
side_effect=ConfigValidationError,
):
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid configuration data!")
def test_is_cross_realm_bot_email(self) -> None:
self.assertTrue(is_cross_realm_bot_email("[email protected]"))
self.assertTrue(is_cross_realm_bot_email("[email protected]"))
self.assertFalse(is_cross_realm_bot_email("[email protected]"))
with self.settings(CROSS_REALM_BOT_EMAILS={"[email protected]"}):
self.assertTrue(is_cross_realm_bot_email("[email protected]"))
self.assertFalse(is_cross_realm_bot_email("[email protected]"))
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_service_name_and_with_keys(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripe",
"config_data": orjson.dumps({"stripe_api_key": "sample-api-key"}).decode(),
}
self.create_bot(**bot_metadata)
new_bot = UserProfile.objects.get(full_name="My Stripe Bot")
config_data = get_bot_config(new_bot)
self.assertEqual(
config_data, {"integration_id": "stripe", "stripe_api_key": "sample-api-key"}
)
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_service_name_incorrect_keys(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripe",
"config_data": orjson.dumps({"stripe_api_key": "_invalid_key"}).decode(),
}
response = self.client_post("/json/bots", bot_metadata)
self.assertEqual(response.status_code, 400)
expected_error_message = 'Invalid stripe_api_key value _invalid_key (stripe_api_key starts with a "_" and is hence invalid.)'
self.assertEqual(orjson.loads(response.content)["msg"], expected_error_message)
with self.assertRaises(UserProfile.DoesNotExist):
UserProfile.objects.get(full_name="My Stripe Bot")
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_service_name_without_keys(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripe",
}
response = self.client_post("/json/bots", bot_metadata)
self.assertEqual(response.status_code, 400)
expected_error_message = "Missing configuration parameters: {'stripe_api_key'}"
self.assertEqual(orjson.loads(response.content)["msg"], expected_error_message)
with self.assertRaises(UserProfile.DoesNotExist):
UserProfile.objects.get(full_name="My Stripe Bot")
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_without_service_name(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
}
self.create_bot(**bot_metadata)
new_bot = UserProfile.objects.get(full_name="My Stripe Bot")
with self.assertRaises(ConfigError):
get_bot_config(new_bot)
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_incorrect_service_name(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripes",
}
response = self.client_post("/json/bots", bot_metadata)
self.assertEqual(response.status_code, 400)
expected_error_message = "Invalid integration 'stripes'."
self.assertEqual(orjson.loads(response.content)["msg"], expected_error_message)
with self.assertRaises(UserProfile.DoesNotExist):
UserProfile.objects.get(full_name="My Stripe Bot")
|
test_create_embedded_bot_with_disabled_embedded_bots
|
setup.py
|
# coding=utf-8
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name='pubplot',
version='0.2.4',
description='Seamless LaTeX and Matplotlib integration for publication plots',
long_description=readme,
packages=find_packages(),
url='',
download_url='https://github.com/hsadok/pubplot',
license='ISC',
author='Hugo Sadok',
author_email='[email protected]',
keywords=['matplotlib', 'latex', 'pgf'],
include_package_data=True,
install_requires=[
'matplotlib',
'pylatex',
'numpy'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
],
|
)
|
|
not-found-error.spec.ts
|
import { NotFoundError } from './not-found-error';
|
expect(new NotFoundError()).toBeTruthy();
});
});
|
describe('NotFoundError', () => {
it('should create an instance', () => {
|
PickFolder.go
|
package main
import (
"github.com/leaanthony/go-common-file-dialog/cfd"
"log"
)
|
pickFolderDialog, err := cfd.NewSelectFolderDialog(cfd.DialogConfig{
Title: "Pick Folder",
Role: "PickFolderExample",
})
if err != nil {
log.Fatal(err)
}
if err := pickFolderDialog.Show(); err != nil {
log.Fatal(err)
}
result, err := pickFolderDialog.GetResult()
if err == cfd.ErrorCancelled {
log.Fatal("Dialog was cancelled by the user.")
} else if err != nil {
log.Fatal(err)
}
log.Printf("Chosen folder: %s\n", result)
}
|
func main() {
|
test_views.py
|
import datetime
import itertools
import os
import re
from importlib import import_module
from unittest import mock
from urllib.parse import quote
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import (
BACKEND_SESSION_KEY, REDIRECT_FIELD_NAME, SESSION_KEY,
)
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import Permission, User
from django.contrib.auth.views import (
INTERNAL_RESET_SESSION_TOKEN, LoginView, logout_then_login,
redirect_to_login,
)
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.db import connection
from django.http import HttpRequest
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import Client, TestCase, override_settings
from django.test.client import RedirectCycleError
from django.urls import NoReverseMatch, reverse, reverse_lazy
from django.utils.http import urlsafe_base64_encode
from .client import PasswordResetConfirmClient
from .models import CustomUser, UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[('en', 'English')],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='[email protected]')
cls.u3 = User.objects.create_user(username='staff', password='password', email='[email protected]')
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(str(error), form_errors)
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
with self.subTest(name=name):
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def setUp(self):
self.client = PasswordResetConfirmClient()
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_extra_email_context(self):
"""
extra_email_context should be available in the email template context.
"""
response = self.client.post(
'/password_reset_extra_email_context/',
{'email': '[email protected]'},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Email email context: "Hello!"', mail.outbox[0].body)
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("[email protected]", mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with self.assertLogs('django.security.DisallowedHost', 'ERROR'):
response = self.client.post(
'/password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with self.assertLogs('django.security.DisallowedHost', 'ERROR'):
response = self.client.post(
'/admin_password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# A nonexistent user returns a 200 response, not a 404.
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# A base36 user id that overflows int returns a 200 response.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_invalid_hash(self):
"""A POST with an invalid token is rejected."""
u = User.objects.get(email='[email protected]')
original_password = u.password
url, path = self._test_confirm_start()
path_parts = path.split('-')
path_parts[-1] = ("0") * 20 + '/'
path = '-'.join(path_parts)
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertIs(response.context['validlink'], False)
u.refresh_from_db()
self.assertEqual(original_password, u.password) # password hasn't changed
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(u.check_password("anewpassword"))
# The reset token is deleted from the session.
self.assertNotIn(INTERNAL_RESET_SESSION_TOKEN, self.client.session)
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertRedirects(response, '/password_reset/done/', fetch_redirect_response=False)
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/', {'email': '[email protected]'})
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/', {'email': '[email protected]'})
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
def test_confirm_login_post_reset(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/post_reset_login/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
self.assertIn(SESSION_KEY, self.client.session)
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
)
def test_confirm_login_post_reset_custom_backend(self):
# This backend is specified in the URL pattern.
backend = 'django.contrib.auth.backends.AllowAllUsersModelBackend'
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/post_reset_login_custom_backend/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
self.assertIn(SESSION_KEY, self.client.session)
self.assertEqual(self.client.session[BACKEND_SESSION_KEY], backend)
def test_confirm_login_post_reset_already_logged_in(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/post_reset_login/')
self.login()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
self.assertIn(SESSION_KEY, self.client.session)
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# The password_reset_confirm() view passes the user object to the
# SetPasswordForm``, even on GET requests (#16919). For this test,
# {{ form.user }}`` is rendered in the template
# registration/password_reset_confirm.html.
username = User.objects.get(email='[email protected]').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
def test_confirm_link_redirects_to_set_password_page(self):
url, path = self._test_confirm_start()
# Don't use PasswordResetConfirmClient (self.client) here which
# automatically fetches the redirect page.
client = Client()
response = client.get(path)
token = response.resolver_match.kwargs['token']
uuidb64 = response.resolver_match.kwargs['uidb64']
self.assertRedirects(response, '/reset/%s/set-password/' % uuidb64)
self.assertEqual(client.session['_password_reset_token'], token)
def test_invalid_link_if_going_directly_to_the_final_reset_password_url(self):
url, path = self._test_confirm_start()
_, uuidb64, _ = path.strip('/').split('/')
response = Client().get('/reset/%s/set-password/' % uuidb64)
self.assertContains(response, 'The password reset link was invalid')
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = '[email protected]'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
email='[email protected]',
date_of_birth=datetime.date(1976, 11, 8),
)
cls.u1.set_password('password')
cls.u1.save()
def setUp(self):
self.client = PasswordResetConfirmClient()
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth_tests.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super()._test_confirm_start()
def test_confirm_invalid_uuid(self):
"""A uidb64 that decodes to a non-UUID doesn't crash."""
_, path = self._test_confirm_start()
invalid_uidb64 = urlsafe_base64_encode('INVALID_UUID'.encode())
first, _uuidb64_, second = path.strip('/').split('/')
response = self.client.get('/' + '/'.join((first, invalid_uidb64, second)) + '/')
self.assertContains(response, 'The password reset link was invalid')
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self):
response = self.client.post('/login/', {
'username': 'testclient',
'password': 'password',
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/password_change/done/', fetch_redirect_response=False)
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertRedirects(response, '/login/?next=/password_change/done/', fetch_redirect_response=False)
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/password_change/done/', fetch_redirect_response=False)
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
original_session_key = self.client.session.session_key
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
# The session key is rotated.
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self):
login_url = reverse('login')
# These URLs should not pass the security check.
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")',
)
for bad_url in bad_urls:
with self.subTest(bad_url=bad_url):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': quote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': 'password',
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url, '%s should be blocked' % bad_url)
# These URLs should pass the security check.
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/',
)
for good_url in good_urls:
with self.subTest(good_url=good_url):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': quote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': 'password',
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, '%s should be allowed' % good_url)
def test_security_check_https(self):
login_url = reverse('login')
non_https_next_url = 'http://testserver/path'
not_secured_url = '%(url)s?%(next)s=%(next_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'next_url': quote(non_https_next_url),
}
post_data = {
'username': 'testclient',
'password': 'password',
}
response = self.client.post(not_secured_url, post_data, secure=True)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(response.url, non_https_next_url)
self.assertEqual(response.url, settings.LOGIN_REDIRECT_URL)
def test_login_form_contains_request(self):
# The custom authentication form for this login requires a request to
# initialize it.
response = self.client.post('/custom_request_auth_login/', {
'username': 'testclient',
'password': 'password',
})
# The login was successful.
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False)
def test_login_csrf_rotate(self):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# The test client isn't used here as it's a test for middleware.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, LoginView.as_view(), (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = LoginView.as_view()(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': 'password', 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, LoginView.as_view(), (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = LoginView.as_view()(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url):
response = self.client.get('/login_required/')
self.assertRedirects(response, url, fetch_redirect_response=False)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = quote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = quote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = quote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertRedirects(response, url, fetch_redirect_response=False)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutThenLoginTests(AuthViewsTestCase):
"""Tests for the logout_then_login view"""
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
@override_settings(LOGIN_URL='/login/')
def test_default_logout_then_login(self):
self.login()
req = HttpRequest()
req.method = 'GET'
req.session = self.client.session
response = logout_then_login(req)
self.confirm_logged_out()
self.assertRedirects(response, '/login/', fetch_redirect_response=False)
def test_logout_then_login_with_custom_login(self):
self.login()
req = HttpRequest()
req.method = 'GET'
req.session = self.client.session
response = logout_then_login(req, login_url='/custom/')
self.confirm_logged_out()
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
class LoginRedirectAuthenticatedUser(AuthViewsTestCase):
dont_redirect_url = '/login/redirect_authenticated_user_default/'
do_redirect_url = '/login/redirect_authenticated_user/'
def test_default(self):
"""Stay on the login page by default."""
self.login()
response = self.client.get(self.dont_redirect_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['next'], '')
def test_guest(self):
"""If not logged in, stay on the same page."""
response = self.client.get(self.do_redirect_url)
self.assertEqual(response.status_code, 200)
def test_redirect(self):
"""If logged in, go to default redirected URL."""
self.login()
response = self.client.get(self.do_redirect_url)
self.assertRedirects(response, '/accounts/profile/', fetch_redirect_response=False)
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_redirect_url(self):
"""If logged in, go to custom redirected URL."""
self.login()
response = self.client.get(self.do_redirect_url)
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_redirect_param(self):
"""If next is specified as a GET parameter, go there."""
self.login()
url = self.do_redirect_url + '?next=/custom_next/'
response = self.client.get(url)
self.assertRedirects(response, '/custom_next/', fetch_redirect_response=False)
def test_redirect_loop(self):
"""
Detect a redirect loop if LOGIN_REDIRECT_URL is not correctly set,
with and without custom parameters.
"""
self.login()
msg = (
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page"
)
with self.settings(LOGIN_REDIRECT_URL=self.do_redirect_url):
with self.assertRaisesMessage(ValueError, msg):
self.client.get(self.do_redirect_url)
url = self.do_redirect_url + '?bla=2'
with self.assertRaisesMessage(ValueError, msg):
self.client.get(url)
def test_permission_required_not_logged_in(self):
# Not logged in ...
with self.settings(LOGIN_URL=self.do_redirect_url):
# redirected to login.
response = self.client.get('/permission_required_redirect/', follow=True)
self.assertEqual(response.status_code, 200)
# exception raised.
response = self.client.get('/permission_required_exception/', follow=True)
self.assertEqual(response.status_code, 403)
# redirected to login.
response = self.client.get('/login_and_permission_required_exception/', follow=True)
self.assertEqual(response.status_code, 200)
def test_permission_required_logged_in(self):
self.login()
# Already logged in...
with self.settings(LOGIN_URL=self.do_redirect_url):
# redirect loop encountered.
with self.assertRaisesMessage(RedirectCycleError, 'Redirect loop detected.'):
self.client.get('/permission_required_redirect/', follow=True)
# exception raised.
response = self.client.get('/permission_required_exception/', follow=True)
self.assertEqual(response.status_code, 403)
# exception raised.
response = self.client.get('/login_and_permission_required_exception/', follow=True)
self.assertEqual(response.status_code, 403)
class LoginSuccessURLAllowedHostsTest(AuthViewsTestCase):
def test_success_url_allowed_hosts_same_host(self):
response = self.client.post('/login/allowed_hosts/', {
'username': 'testclient',
'password': 'password',
'next': 'https://testserver/home',
})
self.assertIn(SESSION_KEY, self.client.session)
self.assertRedirects(response, 'https://testserver/home', fetch_redirect_response=False)
def test_success_url_allowed_hosts_safe_host(self):
response = self.client.post('/login/allowed_hosts/', {
'username': 'testclient',
'password': 'password',
'next': 'https://otherserver/home',
})
self.assertIn(SESSION_KEY, self.client.session)
self.assertRedirects(response, 'https://otherserver/home', fetch_redirect_response=False)
def test_success_url_allowed_hosts_unsafe_host(self):
response = self.client.post('/login/allowed_hosts/', {
'username': 'testclient',
'password': 'password',
'next': 'https://evil/home',
})
self.assertIn(SESSION_KEY, self.client.session)
self.assertRedirects(response, '/accounts/profile/', fetch_redirect_response=False)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_logout_with_post(self):
self.login()
response = self.client.post('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_doesnt_cache(self):
"""
The logout() view should send "no-cache" headers for reasons described
in #25490.
"""
response = self.client.get('/logout/')
self.assertIn('no-store', response['Cache-Control'])
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False)
response = self.client.get('/logout/next_page/?next=/login/')
self.assertRedirects(response, '/login/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertRedirects(response, '/login/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_success_url_allowed_hosts_same_host(self):
self.login()
response = self.client.get('/logout/allowed_hosts/?next=https://testserver/')
self.assertRedirects(response, 'https://testserver/', fetch_redirect_response=False)
self.confirm_logged_out()
def te
|
elf):
self.login()
response = self.client.get('/logout/allowed_hosts/?next=https://otherserver/')
self.assertRedirects(response, 'https://otherserver/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_success_url_allowed_hosts_unsafe_host(self):
self.login()
response = self.client.get('/logout/allowed_hosts/?next=https://evil/')
self.assertRedirects(response, '/logout/allowed_hosts/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_security_check(self):
logout_url = reverse('logout')
# These URLs should not pass the security check.
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")',
)
for bad_url in bad_urls:
with self.subTest(bad_url=bad_url):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': quote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url, '%s should be blocked' % bad_url)
self.confirm_logged_out()
# These URLs should pass the security check.
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/',
)
for good_url in good_urls:
with self.subTest(good_url=good_url):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': quote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, '%s should be allowed' % good_url)
self.confirm_logged_out()
def test_security_check_https(self):
logout_url = reverse('logout')
non_https_next_url = 'http://testserver/'
url = '%(url)s?%(next)s=%(next_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'next_url': quote(non_https_next_url),
}
self.login()
response = self.client.get(url, secure=True)
self.assertRedirects(response, logout_url, fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Language is preserved after logout."""
self.login()
self.client.post('/setlang/', {'language': 'pl'})
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, 'pl')
self.client.get('/logout/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, 'pl')
@override_settings(LOGOUT_REDIRECT_URL='/custom/')
def test_logout_redirect_url_setting(self):
self.login()
response = self.client.get('/logout/')
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
@override_settings(LOGOUT_REDIRECT_URL='logout')
def test_logout_redirect_url_named_setting(self):
self.login()
response = self.client.get('/logout/')
self.assertRedirects(response, '/logout/', fetch_redirect_response=False)
def get_perm(Model, perm):
ct = ContentType.objects.get_for_model(Model)
return Permission.objects.get(content_type=ct, codename=perm)
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@override_settings(ROOT_URLCONF='auth_tests.urls_admin')
class ChangelistTests(AuthViewsTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
def setUp(self):
self.login()
# Get the latest last_login value.
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with self.assertLogs('django.security.DisallowedModelAdminLookup', 'ERROR'):
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.get_change_message(), 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.get_change_message(), 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
response.content.decode()
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.get_change_message(), 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='[email protected]')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.get_change_message(), 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@mock.patch('django.contrib.auth.admin.UserAdmin.has_change_permission')
def test_user_change_password_passes_user_to_has_change_permission(self, has_change_permission):
url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
self.client.post(url, {'password1': 'password1', 'password2': 'password1'})
(_request, user), _kwargs = has_change_permission.call_args
self.assertEqual(user.pk, self.admin.pk)
def test_view_user_password_is_readonly(self):
u = User.objects.get(username='testclient')
u.is_superuser = False
u.save()
original_password = u.password
u.user_permissions.add(get_perm(User, 'view_user'))
response = self.client.get(reverse('auth_test_admin:auth_user_change', args=(u.pk,)),)
algo, salt, hash_string = (u.password.split('$'))
self.assertContains(response, '<div class="readonly">testclient</div>')
# ReadOnlyPasswordHashWidget is used to render the field.
self.assertContains(
response,
'<strong>algorithm</strong>: %s\n\n'
'<strong>salt</strong>: %s**********\n\n'
'<strong>hash</strong>: %s**************************\n\n' % (
algo, salt[:2], hash_string[:6],
),
html=True,
)
# Value in POST data is ignored.
data = self.get_user_data(u)
data['password'] = 'shouldnotchange'
change_url = reverse('auth_test_admin:auth_user_change', args=(u.pk,))
response = self.client.post(change_url, data)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
u.refresh_from_db()
self.assertEqual(u.password, original_password)
@override_settings(
AUTH_USER_MODEL='auth_tests.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='[email protected]', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_tests_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # hardcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.get_change_message(), 'Changed password.')
# The LogEntry.user column isn't altered to a UUID type so it's set to
# an integer manually in CustomUserAdmin to avoid an error. To avoid a
# constraint error, delete the entry before constraints are checked
# after the test.
row.delete()
|
st_success_url_allowed_hosts_safe_host(s
|
lab02.part2.main.js
|
/*
Import the ip-cidr npm package.
See https://www.npmjs.com/package/ip-cidr
The ip-cidr package exports a class.
Assign the class definition to variable IPCIDR.
*/
const IPCIDR = require('ip-cidr');
/**
* Calculate and return the first host IP address from a CIDR subnet.
* @param {string} cidrStr - The IPv4 subnet expressed
* in CIDR format.
* @param {callback} callback - A callback function.
* @return {object} (ipv4,ipv6) - An object havning IPv4 and ipv6 address.
*/
function getFirstIpAddress(cidrStr, callback) {
// Initialize return arguments for callback
let ipv4v6 ={
ipv4:null,
ipv6:null
}
let firstIpAddress = null;
let callbackError = null;
// Instantiate an object from the imported class and assign the instance to variable cidr.
const cidr = new IPCIDR(cidrStr);
// Initialize options for the toArray() method.
// We want an offset of one and a limit of one.
// This returns an array with a single element, the first host address from the subnet.
const options = {
from: 1,
limit: 1
};
// Use the object's isValid() method to verify the passed CIDR.
if (!cidr.isValid()) {
// If the passed CIDR is invalid, set an error message.
callbackError = 'Error: Invalid CIDR passed to getFirstIpAddress.';
} else {
// If the passed CIDR is valid, call the object's toArray() method.
// Notice the destructering assignment syntax to get the value of the first array's element.
[firstIpAddress] = cidr.toArray(options);
ipv4v6.ipv4 = firstIpAddress;
ipv4v6.ipv6 = getIpv4MappedIpv6Address(firstIpAddress);
}
// Call the passed callback function.
// Node.js convention is to pass error data as the first argument to a callback.
// The IAP convention is to pass returned data as the first argument and error
// data as the second argument to the callback function.
return callback(ipv4v6, callbackError);
}
/**
* Calculates an IPv4-mapped IPv6 address.
* @param {string} ipv4 - An IPv4 address in dotted-quad format.
* @return {*} (ipv6Address) - An IPv6 address string or null if a run-time problem was detected.
*/
function getIpv4MappedIpv6Address(ipv4) {
// Initialize return argument
let ipv6Address = null;
// Prepare to derive a Hex version of the dotted-quad decimal IPv4 address.
// Split the IPv4 address into its four parts.
let ipv4Quads = ipv4.split('.');
// Count the number of parts found.
let numIpv4Segments = ipv4Quads.length;
// Verify IPv4 had four parts.
if (numIpv4Segments === 4) {
let validQuads = true;
// Iterate over the IPv4 address parts and verify each segment was a number between 0 and 255.
for(let i=0; i < numIpv4Segments; i++) {
if( isNaN(Number(ipv4Quads[i])) || ipv4Quads[i] < 0 || ipv4Quads[i] > 255 ) {
validQuads = false;
}
}
// Passed IPv4 is valid. Now to derive an IPv4-mapped IPv6 address.
if (validQuads) {
// Hardcode the prefix. During refactor, we might want to make the prefix a const.
ipv6Address = "0:0:0:0:0:ffff:";
// Iterate over the IPv4 parts
for(let i=0; i < numIpv4Segments; i++) {
// Convert part to an integer, then convert to a hex string using method toString()
// with a base 16 (hex) encoding.
let hexString = parseInt(ipv4Quads[i]).toString(16);
|
if (hexString.length % 2)
hexString = '0' + hexString;
// Append hex part to evolving variable ipv6Address.
ipv6Address = ipv6Address + hexString;
// Add a colon to split the encoded address and match the IPv6 format.
if(i===1) {
ipv6Address = ipv6Address + ':';
}
}
}
}
return ipv6Address;
}
/*
This section is used to test function and log any errors.
We will make several positive and negative tests.
*/
function main() {
// Create some test data for getFirstIpAddress(), both valid and invalid.
let sampleCidrs = ['172.16.10.0/24', '172.16.10.0 255.255.255.0', '172.16.10.128/25', '192.168.1.216/30'];
let sampleCidrsLen = sampleCidrs.length;
// Create some test data for getIpv4MappedIpv6Address, both valid and invalid.
let sampleIpv4s = [ '172.16.10.1', '172.16.10.0/24', '172.16.10.0 255.255.255.0', '172.16.256.1', '1.1.1.-1'];
let sampleIpv4sLen = sampleIpv4s.length;
// Iterate over sampleCidrs and pass the element's value to getFirstIpAddress().
for (let i = 0; i < sampleCidrsLen; i++) {
console.log(`\n--- Test Number ${i + 1} getFirstIpAddress(${sampleCidrs[i]}) ---`);
// Call getFirstIpAddress and pass the test subnet and an anonymous callback function.
// The callback is using the fat arrow operator: () => { }
getFirstIpAddress(sampleCidrs[i], (data, error) => {
// Now we are inside the callback function.
// Display the results on the console.
if (error) {
console.error(` Error returned from GET request: ${error}`);
}
console.log(` Response returned from GET request:{ipv4: ${data.ipv4}, ipv6: ${data.ipv6} }`);
});
}
// Iterate over sampleIpv4s and pass the element's value to getIpv4MappedIpv6Address().
for (let i = 0; i < sampleIpv4sLen; i++) {
console.log(`\n--- Test Number ${i + 1} getIpv4MappedIpv6Address(${sampleIpv4s[i]}) ---`);
// Assign the function results to a variable so we can check if a string or null was returned.
let mappedAddress = getIpv4MappedIpv6Address(sampleIpv4s[i]);
if( mappedAddress ) {
console.log(` IPv4 ${sampleIpv4s[i]} mapped to IPv6 Address: ${mappedAddress}`);
} else {
console.error(` Problem converting IPv4 ${sampleIpv4s[i]} into a mapped IPv6 address.`);
}
}
}
/*
Call main to run it.
*/
main();
|
// If hex is odd (single digit), prepend a '0'. This is why we wanted to work with a string.
|
transform_first_upper.go
|
package main
import "fmt"
type FirstUpperCaseValue int
const (
male FirstUpperCaseValue = iota
female
unknown
)
func main()
|
func ck(value FirstUpperCaseValue, str string) {
if fmt.Sprint(value) != str {
panic("transform_first_upper.go: " + str)
}
}
|
{
ck(male, "M")
ck(female, "F")
ck(unknown, "U")
ck(-127, "FirstUpperCaseValue(-127)")
ck(127, "FirstUpperCaseValue(127)")
}
|
9.palindrome-number.rs
|
/*
* @lc app=leetcode id=9 lang=rust
*
* [9] Palindrome Number
*/
// @lc code=start
impl Solution {
pub fn is_palindrome(x: i32) -> bool
|
}
// @lc code=end
|
{
let c = (x as f64).log10() as u32;
let s = (0..=c).map(|b| x / 10i32.pow(b) % 10);
x >= 0 && s.clone().rev().eq(s)
}
|
loader.ts
|
import { Entry } from './entry';
|
export async function loadEntry(): Promise<Entry> {
const feature = await import(/* webpackChunkName: "createDao" */ './entry');
return feature.entry;
}
|
|
development.py
|
"""
This file contains all the settings that defines the development server.
SECURITY WARNING: don't run with debug turned on in production!
"""
import logging
from typing import List
from server.settings.components import config
from server.settings.components.common import INSTALLED_APPS, MIDDLEWARE
# Setting the development status:
DEBUG = True
ALLOWED_HOSTS = [
config('DOMAIN_NAME'),
'localhost',
'0.0.0.0', # noqa: S104
'127.0.0.1',
'[::1]',
]
# Installed apps for development only:
INSTALLED_APPS += (
'debug_toolbar',
'nplusone.ext.django',
'django_migration_linter',
'django_test_migrations.contrib.django_checks.AutoNames',
'django_test_migrations.contrib.django_checks.DatabaseConfiguration',
'extra_checks',
)
# Static files:
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS: List[str] = []
# Django debug toolbar:
# https://django-debug-toolbar.readthedocs.io
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
# https://github.com/bradmontgomery/django-querycount
# Prints how many queries were executed, useful for the APIs.
'querycount.middleware.QueryCountMiddleware',
)
def _custom_show_toolbar(request):
|
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK':
'server.settings.environments.development._custom_show_toolbar',
}
# This will make debug toolbar to work with django-csp,
# since `ddt` loads some scripts from `ajax.googleapis.com`:
CSP_SCRIPT_SRC = ("'self'", 'ajax.googleapis.com')
CSP_IMG_SRC = ("'self'", 'data:')
CSP_CONNECT_SRC = ("'self'",)
# nplusone
# https://github.com/jmcarp/nplusone
# Should be the first in line:
MIDDLEWARE = ( # noqa: WPS440
'nplusone.ext.django.NPlusOneMiddleware',
) + MIDDLEWARE
# Logging N+1 requests:
NPLUSONE_RAISE = True # comment out if you want to allow N+1 requests
NPLUSONE_LOGGER = logging.getLogger('django')
NPLUSONE_LOG_LEVEL = logging.WARN
NPLUSONE_WHITELIST = [
{'model': 'admin.*'},
]
# django-test-migrations
# https://github.com/wemake-services/django-test-migrations
# Set of badly named migrations to ignore:
DTM_IGNORED_MIGRATIONS = frozenset((
('axes', '*'),
))
# django-extra-checks
# https://github.com/kalekseev/django-extra-checks
EXTRA_CHECKS = {
'checks': [
# Forbid `unique_together`:
'no-unique-together',
# Require non empty `upload_to` argument:
'field-file-upload-to',
# Use the indexes option instead:
'no-index-together',
# Each model must be registered in admin:
'model-admin',
# FileField/ImageField must have non empty `upload_to` argument:
'field-file-upload-to',
# Text fields shouldn't use `null=True`:
'field-text-null',
# Prefer using BooleanField(null=True) instead of NullBooleanField:
'field-boolean-null',
# Don't pass `null=False` to model fields (this is django default)
'field-null',
# ForeignKey fields must specify db_index explicitly if used in
# other indexes:
{'id': 'field-foreign-key-db-index', 'when': 'indexes'},
# If field nullable `(null=True)`,
# then default=None argument is redundant and should be removed:
'field-default-null',
# Fields with choices must have companion CheckConstraint
# to enforce choices on database level
'field-choices-constraint',
],
}
|
"""Only show the debug toolbar to users with the superuser flag."""
return DEBUG and request.user.is_superuser
|
cli.test.ts
|
import assert from 'assert';
import { execSync } from 'child_process';
import { mkdtempSync, writeFileSync } from 'fs';
import { tmpdir } from 'os';
import path from 'path';
function
|
(cmdline: string) {
// want to be able to redirect stdin, so no execFile for us
return execSync(`${require.resolve('../cli.js')} ${cmdline}`, {
encoding: 'utf8',
});
}
describe('cli', () => {
let specPath: string;
before(() => {
const tmpDir = mkdtempSync(path.join(tmpdir(), 'cli-test-'));
specPath = path.join(tmpDir, 'spec.json');
writeFileSync(
specPath,
JSON.stringify({
openapi: '3.0.2',
paths: { '/': { get: { operationId: 'getSlash' } } },
})
);
});
it('shows help', () => {
assert.ok(runCLI('--help').includes('"ES2019", "ES2020"'));
});
it('generates some typescript from stdin', () => {
const tsSrc = runCLI(`-c Foo < ${specPath}`);
assert.ok(tsSrc.includes('export class Foo extends Gofer'));
assert.ok(tsSrc.includes('getSlash()'));
});
it('generates some typescript from a file', () => {
const tsSrc = runCLI(
`--class Foo --extends=bar-baz --default-export ${specPath}`
);
assert.ok(tsSrc.includes('export default class Foo extends BarBaz'));
});
});
|
runCLI
|
Index.py
|
#!/usr/bin/env python
"""
C.11.5 Index and Glossary (p211)
"""
import string, os
from plasTeX.Tokenizer import Token, EscapeSequence
from plasTeX import Command, Environment
from plasTeX.Logging import getLogger
from Sectioning import SectionUtils
try:
from pyuca import Collator
collator = Collator(os.path.join(os.path.dirname(__file__), 'allkeys.txt')).sort_key
except ImportError:
collator = lambda x: x.lower()
class IndexUtils(object):
""" Helper functions for generating indexes """
linkType = 'index'
level = Command.CHAPTER_LEVEL
class Index(Command):
"""
Utility class used to surface the index entries to the renderer
"""
def __init__(self, *args, **kwargs):
Command.__init__(self, *args, **kwargs)
self.pages = []
self.key = []
self.sortkey = ''
@property
def totallen(self):
""" Return the total number of entries generated by this entry """
total = 1
for item in self:
total += item.totallen
return total
def __repr__(self):
return '%s%s --> %s' % (''.join([x.source for x in self.key]),
', '.join([str(x) for x in self.pages]),
Command.__repr__(self))
class IndexGroup(list):
title = None
def invoke(self, tex):
if isinstance(self, Environment):
Environment.invoke(self, tex)
else:
Command.invoke(self, tex)
self.attributes['title'] = self.ownerDocument.createElement('indexname').expand(tex)
@property
def groups(self):
"""
Group index entries into batches according to the first letter
"""
batches = []
current = ''
for item in self:
try:
label = title = item.sortkey[0].upper()
if title in string.letters:
pass
elif title == '_':
title = '_ (Underscore)'
else:
label = title = 'Symbols'
except IndexError:
label = title = 'Symbols'
if current != title:
newgroup = self.IndexGroup()
newgroup.title = title
newgroup.id = label
batches.append(newgroup)
current = title
batches[-1].append(item)
for item in batches:
item[:] = self.splitColumns(item,
self.ownerDocument.config['document']['index-columns'])
return batches
def splitColumns(self, items, cols):
"""
Divide the index entries into the specified number of columns
Required Arguments:
items -- list of column entries
cols -- number of columns to create
Returns:
list of length `cols' containing groups of column entries
"""
entries = [(0,0)]
# Find the total number of entries
grandtotal = 0
for item in items:
entries.append((item.totallen, item))
grandtotal += entries[-1][0]
entries.pop(0)
entries.reverse()
# Get total number of entries per column
coltotal = int(grandtotal / cols)
# Group entries into columns
current = 0
output = [[]]
for num, item in entries:
current += num
if len(output) >= cols:
output[-1].append(item)
elif current > coltotal:
output.append([item])
current = num
elif current == coltotal:
output[-1].append(item)
output.append([])
current = 0
else:
output[-1].append(item)
output.reverse()
for item in output:
item.reverse()
# Get rid of empty columns
output = [x for x in output if x]
# Pad to the correct number of columns
for i in range(cols-len(output)):
output.append([])
return output
def digest(self, tokens):
""" Sort and group index entries """
if isinstance(self, Environment):
Environment.digest(self, tokens)
if self.macroMode == self.MODE_END:
return
# Throw it all away, we don't need it. We'll be generating
# our own index entries below.
while self.childNodes:
self.pop()
else:
Command.digest(self, tokens)
doc = self.ownerDocument
current = self
entries = sorted(self.ownerDocument.userdata.get('index', []))
prev = IndexEntry([], None)
for item in entries:
# See how many levels we need to add/subtract between this one
# and the previous
common = 0
for prevkey, itemkey in zip(zip(prev.sortkey, prev.key),
zip(item.sortkey, item.key)):
if prevkey == itemkey:
common += 1
continue
break
# print
# print item
# print (prev.key, prev.sortkey), (item.key, item.sortkey), common
# Pop out to the common level
i = common
while i < len(prev.key):
# print 'POP'
current = current.parentNode
i += 1
# Add the appropriate number of levels
i = common
while i < len(item.key):
# print 'ADD', item.sortkey[i]
newidx = self.Index()
newidx.key = item.key[i]
newidx.sortkey = item.sortkey[i]
newidx.parentNode = current
current.append(newidx)
current = newidx
i += 1
# Add the current page and format it
current.pages.append(IndexDestination(item.type, item.node))
if item.format is not None:
text = doc.createTextNode(str(len(current.pages)))
ipn = item.format.getElementsByTagName('index-page-number')
if ipn:
ipn = ipn[0]
ipn.parentNode.replaceChild(text, ipn)
item.node.append(item.format)
else:
text = doc.createTextNode(str(len(current.pages)))
item.node.append(text)
prev = item
class
|
(object):
def __init__(self, type, node):
self._cr_type = type
self._cr_node = node
@property
def see(self):
return self._cr_type == IndexEntry.TYPE_SEE
@property
def seealso(self):
return self._cr_type == IndexEntry.TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __getattribute__(self, name):
if name.startswith('_cr_') or name in ['see', 'seealso', 'normal']:
return object.__getattribute__(self, name)
if self._cr_type and name in ['url']:
return None
return getattr(self._cr_node, name)
class theindex(IndexUtils, Environment, SectionUtils):
blockType = True
level = Environment.CHAPTER_LEVEL
counter = 'chapter'
class printindex(IndexUtils, Command, SectionUtils):
blockType = True
level = Command.CHAPTER_LEVEL
counter = 'chapter'
class makeindex(Command):
pass
class makeglossary(Command):
pass
class glossary(Command):
args = 'entry:nox'
class index(Command):
args = 'entry:nox'
@property
def textContent(self):
return ''
def invoke(self, tex):
result = Command.invoke(self, tex)
sortkey, key, format = [], [], []
entry = iter(self.attributes['entry'])
current = []
alphanumeric = [Token.CC_OTHER, Token.CC_LETTER, Token.CC_SPACE]
# Parse the index tokens
for tok in entry:
if tok.catcode in alphanumeric:
# Escape character
if tok == '"':
for tok in entry:
current.append(tok)
break
# Entry separator
elif tok == '!':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = []
# Sort key separator
elif tok == '@':
sortkey.append(current)
current = []
# Format separator
elif tok == '|':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = format
else:
current.append(tok)
continue
# Everything else
current.append(tok)
# Make sure to get the stuff at the end
if not format:
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
# Convert the sort keys to strings
for i, item in enumerate(sortkey):
sortkey[i] = tex.expandTokens(item).textContent
# Expand the key tokens
for i, item in enumerate(key):
key[i] = tex.expandTokens(item)
# Get the format element
type = IndexEntry.TYPE_NORMAL
if not format:
format = None
else:
macro = []
while format and format[0].catcode == Token.CC_LETTER:
macro.append(format.pop(0))
if macro:
macro = ''.join(macro)
format.insert(0, EscapeSequence(macro))
if macro == 'see':
type = IndexEntry.TYPE_SEE
elif macro == 'seealso':
type = IndexEntry.TYPE_SEEALSO
format.append(EscapeSequence('index-page-number'))
format = tex.expandTokens(format)
# Store the index information in the document
userdata = self.ownerDocument.userdata
if 'index' not in userdata:
userdata['index'] = []
userdata['index'].append(IndexEntry(key, self, sortkey, format, type))
return result
class IndexEntry(object):
"""
Utility class used to assist in the sorting of index entries
"""
TYPE_NORMAL = 0
TYPE_SEE = 1
TYPE_SEEALSO = 2
def __init__(self, key, node, sortkey=None, format=None, type=0):
"""
Required Arguments:
key -- a list of keys for the index entry
node -- the node of the document that the index entry is
associated with
sortkey -- a list of sort keys, one per key, to be used for
sorting instead of the key values
format -- formatting that should be used to format the
destination of the index entry
type -- the type of entry that this is: TYPE_NORMAL, TYPE_SEE,
or TYPE_SEEALSO
"""
self.key = key
if not sortkey:
self.sortkey = key
else:
self.sortkey = []
for i, sk in enumerate(sortkey):
if sk is None:
self.sortkey.append(key[i].textContent)
else:
self.sortkey.append(sk)
self.format = format
self.node = node
self.type = type
@property
def see(self):
return self.type == type(self).TYPE_SEE
@property
def seealso(self):
return self.type == type(self).TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __cmp__(self, other):
result = cmp(zip([collator(x) for x in self.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in self.key],
self.key),
zip([collator(x) for x in other.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in other.key],
other.key))
if result == 0 and len(self.key) != len(other.key):
return cmp(len(self.key), len(other.key))
return result
def __repr__(self):
if self.format is None:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key])])
else:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key]),
' '.join([x.source for x in self.format])])
def __str__(self):
return repr(self)
class IndexPageNumber(Command):
macroName = 'index-page-number'
|
IndexDestination
|
admin.py
|
from django.contrib import admin
from .models import *
# Register your models here.
# admin.site.register(Role)
admin.site.register(Profile)
|
# admin.site.register(UserAuthorization)
| |
streamtest.go
|
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package streamtest
import (
"context"
"errors"
"io"
"sync"
"testing"
"time"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
ErrRecordsNotFound = errors.New("records not found")
ErrStreamNotSupported = errors.New("stream not supported")
ErrStreamClosed = errors.New("stream closed")
ErrStreamFullcloseTimeout = errors.New("fullclose timeout")
fullCloseTimeout = fullCloseTimeoutDefault // timeout of fullclose
fullCloseTimeoutDefault = 5 * time.Second // default timeout used for helper function to reset timeout when changed
noopMiddleware = func(f p2p.HandlerFunc) p2p.HandlerFunc {
return f
}
)
type Recorder struct {
base swarm.Address
records map[string][]*Record
recordsMu sync.Mutex
protocols []p2p.ProtocolSpec
middlewares []p2p.HandlerMiddleware
streamErr func(swarm.Address, string, string, string) error
protocolsWithPeers map[string]p2p.ProtocolSpec
}
func WithProtocols(protocols ...p2p.ProtocolSpec) Option {
return optionFunc(func(r *Recorder) {
r.protocols = append(r.protocols, protocols...)
})
}
func WithPeerProtocols(protocolsWithPeers map[string]p2p.ProtocolSpec) Option {
return optionFunc(func(r *Recorder) {
r.protocolsWithPeers = protocolsWithPeers
})
}
func WithMiddlewares(middlewares ...p2p.HandlerMiddleware) Option {
return optionFunc(func(r *Recorder) {
r.middlewares = append(r.middlewares, middlewares...)
})
}
func WithBaseAddr(a swarm.Address) Option
|
func WithStreamError(streamErr func(swarm.Address, string, string, string) error) Option {
return optionFunc(func(r *Recorder) {
r.streamErr = streamErr
})
}
func New(opts ...Option) *Recorder {
r := &Recorder{
records: make(map[string][]*Record),
}
r.middlewares = append(r.middlewares, noopMiddleware)
for _, o := range opts {
o.apply(r)
}
return r
}
func (r *Recorder) SetProtocols(protocols ...p2p.ProtocolSpec) {
r.protocols = append(r.protocols, protocols...)
}
func (r *Recorder) NewStream(ctx context.Context, addr swarm.Address, h p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) {
if r.streamErr != nil {
err := r.streamErr(addr, protocolName, protocolVersion, streamName)
if err != nil {
return nil, err
}
}
recordIn := newRecord()
recordOut := newRecord()
streamOut := newStream(recordIn, recordOut)
streamIn := newStream(recordOut, recordIn)
var handler p2p.HandlerFunc
var headler p2p.HeadlerFunc
peerHandlers, ok := r.protocolsWithPeers[addr.String()]
if !ok {
for _, p := range r.protocols {
if p.Name == protocolName && p.Version == protocolVersion {
peerHandlers = p
}
}
}
for _, s := range peerHandlers.StreamSpecs {
if s.Name == streamName {
handler = s.Handler
headler = s.Headler
}
}
if handler == nil {
return nil, ErrStreamNotSupported
}
for i := len(r.middlewares) - 1; i >= 0; i-- {
handler = r.middlewares[i](handler)
}
if headler != nil {
streamOut.headers = headler(h, addr)
}
record := &Record{in: recordIn, out: recordOut, done: make(chan struct{})}
go func() {
defer close(record.done)
// pass a new context to handler,
// do not cancel it with the client stream context
err := handler(context.Background(), p2p.Peer{Address: r.base}, streamIn)
if err != nil && err != io.EOF {
record.setErr(err)
}
}()
id := addr.String() + p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName)
r.recordsMu.Lock()
defer r.recordsMu.Unlock()
r.records[id] = append(r.records[id], record)
return streamOut, nil
}
func (r *Recorder) Records(addr swarm.Address, protocolName, protocolVersio, streamName string) ([]*Record, error) {
id := addr.String() + p2p.NewSwarmStreamName(protocolName, protocolVersio, streamName)
r.recordsMu.Lock()
defer r.recordsMu.Unlock()
records, ok := r.records[id]
if !ok {
return nil, ErrRecordsNotFound
}
// wait for all records goroutines to terminate
for _, r := range records {
<-r.done
}
return records, nil
}
// WaitRecords waits for some time for records to come into the recorder. If msgs is 0, the timeoutSec period is waited to verify
// that _no_ messages arrive during this time period.
func (r *Recorder) WaitRecords(t *testing.T, addr swarm.Address, proto, version, stream string, msgs, timeoutSec int) []*Record {
t.Helper()
wait := 10 * time.Millisecond
iters := int((time.Duration(timeoutSec) * time.Second) / wait)
for i := 0; i < iters; i++ {
recs, _ := r.Records(addr, proto, version, stream)
if l := len(recs); l > msgs {
t.Fatalf("too many records. want %d got %d", msgs, l)
} else if msgs > 0 && l == msgs {
return recs
}
// we can be here if msgs == 0 && l == 0
// or msgs = x && l < x, both cases are fine
// and we should continue waiting
time.Sleep(wait)
}
if msgs > 0 {
t.Fatal("timed out while waiting for records")
}
return nil
}
type Record struct {
in *record
out *record
err error
errMu sync.Mutex
done chan struct{}
}
func (r *Record) In() []byte {
return r.in.bytes()
}
func (r *Record) Out() []byte {
return r.out.bytes()
}
func (r *Record) Err() error {
r.errMu.Lock()
defer r.errMu.Unlock()
return r.err
}
func (r *Record) setErr(err error) {
r.errMu.Lock()
defer r.errMu.Unlock()
r.err = err
}
type stream struct {
in *record
out *record
headers p2p.Headers
responseHeaders p2p.Headers
}
func newStream(in, out *record) *stream {
return &stream{in: in, out: out}
}
func (s *stream) Read(p []byte) (int, error) {
return s.out.Read(p)
}
func (s *stream) Write(p []byte) (int, error) {
return s.in.Write(p)
}
func (s *stream) Headers() p2p.Headers {
return s.headers
}
func (s *stream) ResponseHeaders() p2p.Headers {
return s.responseHeaders
}
func (s *stream) Close() error {
return s.in.Close()
}
func (s *stream) FullClose() error {
if err := s.Close(); err != nil {
_ = s.Reset()
return err
}
waitStart := time.Now()
for {
if s.out.Closed() {
return nil
}
if time.Since(waitStart) >= fullCloseTimeout {
return ErrStreamFullcloseTimeout
}
time.Sleep(10 * time.Millisecond)
}
}
func (s *stream) Reset() (err error) {
if err := s.in.Close(); err != nil {
_ = s.out.Close()
return err
}
return s.out.Close()
}
type record struct {
b []byte
c int
closed bool
closeMu sync.RWMutex
cond *sync.Cond
}
func newRecord() *record {
return &record{
cond: sync.NewCond(new(sync.Mutex)),
}
}
func (r *record) Read(p []byte) (n int, err error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
for r.c == len(r.b) && !r.Closed() {
r.cond.Wait()
}
end := r.c + len(p)
if end > len(r.b) {
end = len(r.b)
}
n = copy(p, r.b[r.c:end])
r.c += n
if r.Closed() {
err = io.EOF
}
return n, err
}
func (r *record) Write(p []byte) (int, error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
if r.Closed() {
return 0, ErrStreamClosed
}
defer r.cond.Signal()
r.b = append(r.b, p...)
return len(p), nil
}
func (r *record) Close() error {
r.cond.L.Lock()
defer r.cond.L.Unlock()
defer r.cond.Broadcast()
r.closeMu.Lock()
r.closed = true
r.closeMu.Unlock()
return nil
}
func (r *record) Closed() bool {
r.closeMu.RLock()
defer r.closeMu.RUnlock()
return r.closed
}
func (r *record) bytes() []byte {
r.cond.L.Lock()
defer r.cond.L.Unlock()
return r.b
}
type Option interface {
apply(*Recorder)
}
type optionFunc func(*Recorder)
func (f optionFunc) apply(r *Recorder) { f(r) }
var _ p2p.StreamerDisconnecter = (*RecorderDisconnecter)(nil)
type RecorderDisconnecter struct {
*Recorder
disconnected map[string]struct{}
blocklisted map[string]time.Duration
mu sync.RWMutex
}
func NewRecorderDisconnecter(r *Recorder) *RecorderDisconnecter {
return &RecorderDisconnecter{
Recorder: r,
disconnected: make(map[string]struct{}),
blocklisted: make(map[string]time.Duration),
}
}
func (r *RecorderDisconnecter) Disconnect(overlay swarm.Address) error {
r.mu.Lock()
defer r.mu.Unlock()
r.disconnected[overlay.String()] = struct{}{}
return nil
}
func (r *RecorderDisconnecter) Blocklist(overlay swarm.Address, d time.Duration) error {
r.mu.Lock()
defer r.mu.Unlock()
r.blocklisted[overlay.String()] = d
return nil
}
func (r *RecorderDisconnecter) IsDisconnected(overlay swarm.Address) bool {
r.mu.RLock()
defer r.mu.RUnlock()
_, yes := r.disconnected[overlay.String()]
return yes
}
func (r *RecorderDisconnecter) IsBlocklisted(overlay swarm.Address) (bool, time.Duration) {
r.mu.RLock()
defer r.mu.RUnlock()
d, yes := r.blocklisted[overlay.String()]
return yes, d
}
|
{
return optionFunc(func(r *Recorder) {
r.base = a
})
}
|
TopicHeader.tsx
|
import * as React from 'react';
import { HTMLProps, SFC } from 'react';
const componentStyle = {
|
base: {
margin: '30px 0 20px',
},
};
export interface TopicHeaderProps extends HTMLProps<HTMLHeadingElement> {}
export const TopicHeader: SFC<TopicHeaderProps> = ({ children }) => (
<h3 style={componentStyle.base}>{children}</h3>
);
TopicHeader.displayName = 'TopicHeader';
| |
source_attributes.rs
|
extern crate snafu;
use snafu::Snafu;
#[derive(Debug, Snafu)]
enum InnerError {
Boom,
}
fn
|
() -> Result<(), InnerError> {
Ok(())
}
mod enabling {
use super::*;
use snafu::{ResultExt, Snafu};
#[derive(Debug, Snafu)]
enum Error {
NoArgument {
#[snafu(source)]
cause: InnerError,
},
ExplicitTrue {
#[snafu(source(true))]
cause: InnerError,
},
FromImpliesTrue {
#[snafu(source(from(InnerError, Box::new)))]
cause: Box<InnerError>,
},
ExplicitFalse {
#[snafu(source(false))]
source: i32,
},
}
fn example() -> Result<(), Error> {
inner().context(NoArgument)?;
inner().context(ExplicitTrue)?;
inner().context(FromImpliesTrue)?;
ExplicitFalse { source: 42 }.fail()?;
Ok(())
}
#[test]
fn implements_error() {
fn check<T: std::error::Error>() {}
check::<Error>();
example().unwrap_err();
}
}
mod transformation {
use super::*;
use snafu::{ResultExt, Snafu};
use std::io;
#[derive(Debug, Snafu)]
enum Error {
TransformationViaClosure {
#[snafu(source(from(InnerError, |e| io::Error::new(io::ErrorKind::InvalidData, e))))]
source: io::Error,
},
TransformationViaFunction {
#[snafu(source(from(InnerError, into_io)))]
source: io::Error,
},
TransformationToTraitObject {
#[snafu(source(from(InnerError, Box::new)))]
source: Box<dyn std::error::Error>,
},
}
fn into_io(e: InnerError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, e)
}
fn example() -> Result<(), Error> {
inner().context(TransformationViaClosure)?;
inner().context(TransformationViaFunction)?;
inner().context(TransformationToTraitObject)?;
Ok(())
}
#[test]
fn implements_error() {
fn check<T: std::error::Error>() {}
check::<Error>();
example().unwrap();
}
#[derive(Debug, Snafu)]
#[snafu(source(from(Error, Box::new)))]
struct ApiError(Box<Error>);
fn api_example() -> Result<(), ApiError> {
example()?;
Ok(())
}
#[test]
fn api_implements_error() {
fn check<T: std::error::Error>() {}
check::<ApiError>();
api_example().unwrap();
}
}
|
inner
|
top_pod_test.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"net/http"
"strings"
"testing"
"time"
"net/url"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/rest/fake"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
v1 "k8s.io/kubernetes/pkg/api/v1"
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
)
const (
topPathPrefix = baseMetricsAddress + "/" + metricsApiVersion
)
func TestTopPod(t *testing.T) {
testNS := "testns"
testCases := []struct {
name string
namespace string
flags map[string]string
args []string
expectedPath string
expectedQuery string
namespaces []string
containers bool
listsNamespaces bool
}{
{
name: "all namespaces",
flags: map[string]string{"all-namespaces": "true"},
expectedPath: topPathPrefix + "/pods",
namespaces: []string{testNS, "secondtestns", "thirdtestns"},
listsNamespaces: true,
},
{
name: "all in namespace",
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods",
namespaces: []string{testNS, testNS},
},
{
name: "pod with name",
args: []string{"pod1"},
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
namespaces: []string{testNS},
},
{
name: "pod with label selector",
flags: map[string]string{"selector": "key=value"},
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods",
expectedQuery: "labelSelector=" + url.QueryEscape("key=value"),
namespaces: []string{testNS, testNS},
},
{
name: "pod with container metrics",
flags: map[string]string{"containers": "true"},
args: []string{"pod1"},
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
namespaces: []string{testNS},
containers: true,
},
}
initTestErrorHandler(t)
for _, testCase := range testCases {
t.Logf("Running test case: %s", testCase.name)
metricsList := testPodMetricsData()
var expectedMetrics []metricsapi.PodMetrics
var expectedContainerNames, nonExpectedMetricsNames []string
for n, m := range metricsList {
if n < len(testCase.namespaces) {
m.Namespace = testCase.namespaces[n]
expectedMetrics = append(expectedMetrics, m)
for _, c := range m.Containers {
expectedContainerNames = append(expectedContainerNames, c.Name)
}
} else {
nonExpectedMetricsNames = append(nonExpectedMetricsNames, m.Name)
}
}
var response interface{}
if len(expectedMetrics) == 1 {
response = expectedMetrics[0]
} else {
response = metricsapi.PodMetricsList{
ListMeta: unversioned.ListMeta{
ResourceVersion: "2",
},
Items: expectedMetrics,
}
}
f, tf, _, ns := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.Client = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: ns,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m, q := req.URL.Path, req.Method, req.URL.RawQuery; {
case p == testCase.expectedPath && m == "GET" && (testCase.expectedQuery == "" || q == testCase.expectedQuery):
body, err := marshallBody(response)
if err != nil {
t.Errorf("%s: unexpected error: %v", testCase.name, err)
}
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil
default:
t.Fatalf("%s: unexpected request: %#v\nGot URL: %#v\nExpected path: %#v\nExpected query: %#v",
testCase.name, req, req.URL, testCase.expectedPath, testCase.expectedQuery)
return nil, nil
}
}),
}
tf.Namespace = testNS
tf.ClientConfig = defaultClientConfig()
buf := bytes.NewBuffer([]byte{})
cmd := NewCmdTopPod(f, buf)
for name, value := range testCase.flags {
cmd.Flags().Set(name, value)
}
cmd.Run(cmd, testCase.args)
// Check the presence of pod names&namespaces/container names in the output.
result := buf.String()
if testCase.containers
|
for _, m := range expectedMetrics {
if !strings.Contains(result, m.Name) {
t.Errorf("%s: missing metrics for %s: \n%s", testCase.name, m.Name, result)
}
if testCase.listsNamespaces && !strings.Contains(result, m.Namespace) {
t.Errorf("%s: missing metrics for %s/%s: \n%s", testCase.name, m.Namespace, m.Name, result)
}
}
for _, name := range nonExpectedMetricsNames {
if strings.Contains(result, name) {
t.Errorf("%s: unexpected metrics for %s: \n%s", testCase.name, name, result)
}
}
}
}
func testPodMetricsData() []metricsapi.PodMetrics {
return []metricsapi.PodMetrics{
{
ObjectMeta: v1.ObjectMeta{Name: "pod1", Namespace: "test", ResourceVersion: "10"},
Window: unversioned.Duration{Duration: time.Minute},
Containers: []metricsapi.ContainerMetrics{
{
Name: "container1-1",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
},
},
{
Name: "container1-2",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(4, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(5*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
},
},
},
},
{
ObjectMeta: v1.ObjectMeta{Name: "pod2", Namespace: "test", ResourceVersion: "11"},
Window: unversioned.Duration{Duration: time.Minute},
Containers: []metricsapi.ContainerMetrics{
{
Name: "container2-1",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
},
},
{
Name: "container2-2",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(11*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(12*(1024*1024), resource.DecimalSI),
},
},
{
Name: "container2-3",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(13, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(14*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(15*(1024*1024), resource.DecimalSI),
},
},
},
},
{
ObjectMeta: v1.ObjectMeta{Name: "pod3", Namespace: "test", ResourceVersion: "12"},
Window: unversioned.Duration{Duration: time.Minute},
Containers: []metricsapi.ContainerMetrics{
{
Name: "container3-1",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
},
},
},
},
}
}
|
{
for _, containerName := range expectedContainerNames {
if !strings.Contains(result, containerName) {
t.Errorf("%s: missing metrics for container %s: \n%s", testCase.name, containerName, result)
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.