file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_bb_scopes_updates.py | #!/usr/bin/env python3
'''Test config updates '''
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import subprocess
import os
import json
import time
import datetime
import requests
import pytest
# ------------------------------------------------------------------------------
# Constants
# ------------------------------------------------------------------------------
G_TEST_HOST = 'http://127.0.0.1:12345'
# ------------------------------------------------------------------------------
# run_command
# ------------------------------------------------------------------------------
def run_command(command):
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, stdout, stderr)
# ------------------------------------------------------------------------------
# setup scopez server in action mode
# ------------------------------------------------------------------------------
@pytest.fixture()
def setup_scopez_server_action():
# ------------------------------------------------------
# setup
# ------------------------------------------------------
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_geoip2city_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-City.mmdb'))
l_geoip2ISP_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-ASN.mmdb'))
l_conf_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf'))
l_ruleset_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/ruleset'))
l_scopez_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes'))
l_an_list = os.path.realpath(os.path.join(l_file_path, '../../data/an/an-scopes.json'))
l_scopez_server_path = os.path.abspath(os.path.join(l_file_path, '../../../build/util/scopez_server/scopez_server'))
l_bot_challenge = os.path.realpath(os.path.join(l_file_path, '../../data/bot/bot-challenges.json'))
l_subproc = subprocess.Popen([l_scopez_server_path,
'-d', l_conf_dir,
'-S', l_scopez_dir,
'-l', l_an_list,
'-r', l_ruleset_path,
'-g', l_geoip2city_path,
'-i', l_geoip2ISP_path,
'-c', l_bot_challenge,
'-a'
])
print('cmd: {}'.format(' '.join([l_scopez_server_path,
'-d', l_conf_dir,
'-S', l_scopez_dir,
'-l', l_an_list,
'-r', l_ruleset_path,
'-g', l_geoip2city_path,
'-i', l_geoip2ISP_path,
'-c', l_bot_challenge,
'-a'])))
# '-b'])))
time.sleep(1)
# ------------------------------------------------------
# yield...
# ------------------------------------------------------
yield setup_scopez_server_action
# ------------------------------------------------------
# tear down
# ------------------------------------------------------
_, _, _ = run_command('kill -9 %d'%(l_subproc.pid))
time.sleep(0.5)
def | (setup_scopez_server_action):
'''
update acl config 0050-ZrLf2KkQ - remove gizoogle from
user agent black list and test if request returns 200
'''
# ------------------------------------------------------
# test an 0050 with user-agent acl 'gizoogle' in the
# request
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'monkeez.com',
'user-agent': 'gizoogle',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is acl custom response\n'
#-------------------------------------------------------
# load acl config and remove gizoogle from blacklist
# ------------------------------------------------------
l_conf = {}
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_acl_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/acl/0050-ZrLf2KkQ.acl.json'))
try:
with open(l_acl_conf_path) as l_f:
l_conf = json.load(l_f)
except Exception as l_e:
print('error opening config file: %s. Reason: %s error: %s, doc: %s' % (
l_acl_conf_path, type(l_e), l_e, l_e.__doc__))
assert False
l_conf['user_agent']['blacklist'] = []
l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
# ------------------------------------------------------
# post/update acl conf
# ------------------------------------------------------
l_url = '%s/update_acl'%(G_TEST_HOST)
l_headers = {'Content-Type': 'application/json',
'waf-scopes-id': '0050'}
l_r = requests.post(l_url,
headers=l_headers,
data=json.dumps(l_conf))
assert l_r.status_code == 200
# ------------------------------------------------------
# blacklist should have been updated and should get 200
#-------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'monkeez.com',
'user-agent': 'gizoogle',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
def test_rules_config_update(setup_scopez_server_action):
'''
update rules config 0050-ZrLf3KKq.rules.json - change
user agent to Donkeez from Monkeez
'''
# ------------------------------------------------------
# test an 0050 with user-agent 'Monkeez' in the
# request
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'monkeez.com',
'user-agent': 'monkeez',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is rules custom response\n'
#-------------------------------------------------------
# load rules config and changes monkeez to donkeez in
# custom rules
# ------------------------------------------------------
l_conf = {}
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_rules_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/rules/0050-ZrLf3KkQ.rules.json'))
try:
with open(l_rules_conf_path) as l_f:
l_conf = json.load(l_f)
except Exception as l_e:
print('error opening config file: %s. Reason: %s error: %s, doc: %s' % (
l_file_path, type(l_e), l_e, l_e.__doc__))
assert False
l_conf['directive'][1]['sec_rule']['operator']['value'] = 'donkeez'
l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
# ------------------------------------------------------
# post/update rules conf
# ------------------------------------------------------
l_url = '%s/update_rules'%(G_TEST_HOST)
l_headers = {'Content-Type': 'application/json',
'waf-scopes-id': '0050'}
l_r = requests.post(l_url,
headers=l_headers,
data=json.dumps(l_conf))
assert l_r.status_code == 200
# ------------------------------------------------------
# test again with user-agent 'Monkeez' in the
# request. It should pass
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'monkeez.com',
'user-agent': 'monkeez',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
# ------------------------------------------------------
# test with user-agent 'donkeez' in the
# request. should be blocked
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'monkeez.com',
'user-agent': 'donkeez',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is rules custom response\n'
def test_profile_config_update(setup_scopez_server_action):
'''
update profile config 0050-YrLf3KkQ.wafprof.json - change
ignore_query_args to test from ignore
'''
# ------------------------------------------------------
# test an 0050 with sql injection
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/profile.html?a=%27select%20*%20from%20testing%27'
l_headers = {'host': 'monkeez.com',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is profile custom response\n'
# ------------------------------------------------------
# test an 0050 with sql injection and query_args "ignore"
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/profile.html?ignore=%27select%20*%20from%20testing%27'
l_headers = {'host': 'monkeez.com',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
#-------------------------------------------------------
# load profile config and change "ignore_query_args"
# to "test"
# ------------------------------------------------------
l_conf = {}
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_profile_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/profile/0050-YrLf3KkQ.wafprof.json'))
try:
with open(l_profile_conf_path) as l_f:
l_conf = json.load(l_f)
except Exception as l_e:
print('error opening config file: %s. Reason: %s error: %s, doc: %s' % (
l_profile_conf_path, type(l_e), l_e, l_e.__doc__))
assert False
l_conf["general_settings"]["ignore_query_args"] = ["test"]
l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
# ------------------------------------------------------
# post/update profile conf
# ------------------------------------------------------
l_url = '%s/update_profile'%(G_TEST_HOST)
l_headers = {'Content-Type': 'application/json',
'waf-scopes-id': '0050'}
l_r = requests.post(l_url,
headers=l_headers,
data=json.dumps(l_conf))
assert l_r.status_code == 200
# ------------------------------------------------------
# test an 0050 with sql injection and query_args "ignore"
# should get 403
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/profile.html?ignore=%27select%20*%20from%20testing%27'
l_headers = {'host': 'monkeez.com',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is profile custom response\n'
# ------------------------------------------------------
# test an 0050 with sql injection and query_args "test"
# sql injection should be ignored and get 200
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/profile.html?test=%27select%20*%20from%20testing%27'
l_headers = {'host': 'monkeez.com',
'waf-scopes-id': '0050'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
def test_limit_config_update(setup_scopez_server_action):
# ------------------------------------------------------
# Make 3 request in 2 sec for 3rd and
# 4th scope. Third request should get rate limited
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'limit.com',
'waf-scopes-id': '0050'}
for _ in range(2):
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is ddos custom response\n'
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'test.limit.com',
'waf-scopes-id': '0050'}
for _ in range(2):
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'custom response for limits from limit_id_2\n'
# ------------------------------------------------------
# sleep for 2 seconds. Enforcements should expire
# ------------------------------------------------------
time.sleep(2)
#-------------------------------------------------------
# load limit config and change duration_sec to 3
# ------------------------------------------------------
l_conf = {}
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_limit_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/limit/0050-MjMhNXMR.limit.json'))
try:
with open(l_limit_conf_path) as l_f:
l_conf = json.load(l_f)
except Exception as l_e:
print('error opening config file: %s. Reason: %s error: %s, doc: %s' % (
l_limit_conf_path, type(l_e), l_e, l_e.__doc__))
assert False
l_conf["num"] = 3
l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
#-------------------------------------------------------
# POST conf
# ------------------------------------------------------
l_url = '%s/update_limit'%(G_TEST_HOST)
l_headers = {'Content-Type': 'application/json',
'waf-scopes-id': '0050'}
l_r = requests.post(l_url,
headers=l_headers,
data=json.dumps(l_conf))
assert l_r.status_code == 200
# ------------------------------------------------------
# Make 4 request in 2 sec. fourth request should get
# rate limited. Third request shouldn't be blocked
# because of the update
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'limit.com',
'waf-scopes-id': '0050'}
for _ in range(3):
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is ddos custom response\n'
# ------------------------------------------------------
# Make 4 request in 2 sec for fourth scope.
# verify if 4th scope was also updated
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'test.limit.com',
'waf-scopes-id': '0050'}
for _ in range(3):
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'custom response for limits from limit_id_2\n'
def test_scopes_update(setup_scopez_server_action):
#-------------------------------------------------------
# check second scope for AN 0051 working correctly
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/path.html'
l_headers = {'host': 'www.regexhost.com',
'waf-scopes-id':'0051',
'User-Agent': 'bananas'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is from RX scope\n'
#-------------------------------------------------------
# change the 'path' value for scope and update.
# check if update was successful
# ------------------------------------------------------
l_conf = {}
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_scopes_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes/0051.scopes.json'))
try:
with open(l_scopes_conf_path) as l_f:
l_conf = json.load(l_f)
except Exception as l_e:
print('error opening config file: %s. Reason: %s error: %s, doc: %s' % (
l_scopes_conf_path, type(l_e), l_e, l_e.__doc__))
assert False
l_conf['scopes'][1]['path']['value'] = ".*/test.html"
l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
#-------------------------------------------------------
# POST conf
# ------------------------------------------------------
l_url = '%s/update_scopes'%(G_TEST_HOST)
l_headers = {'Content-Type': 'application/json'}
l_r = requests.post(l_url,
headers=l_headers,
data=json.dumps(l_conf))
assert l_r.status_code == 200
#-------------------------------------------------------
# make a request with same path '/path.html',
# should match GLOB scope
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/path.html'
l_headers = {'host': 'www.regexhost.com',
'waf-scopes-id':'0051',
'User-Agent': 'bananas'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is from GLOB scope\n'
#-------------------------------------------------------
# make a request with updated path '/test.html',
# should get 403 with custom response
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'www.regexhost.com',
'waf-scopes-id':'0051',
'User-Agent': 'bananas'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is from RX scope\n'
def test_scopes_linkage_update(setup_scopez_server_action):
"""
Test linkage update. Update rules config in second scope
(0050-scopes.json) to 0050-0gG8osWJ.rules.json from
0050-ZrLf3KkQ.rules.json check if update worked
"""
#-------------------------------------------------------
# check second scope for AN 0050 working correctly
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/path.html'
l_headers = {'host': 'test.com',
'waf-scopes-id':'0050',
'User-Agent': 'monkeez'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is rules custom response\n'
#-------------------------------------------------------
# change the 'rules_prod_id' value for second scope
# and update.
# check if update was successful
# ------------------------------------------------------
l_conf = {}
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_scopes_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes/0050.scopes.json'))
try:
with open(l_scopes_conf_path) as l_f:
l_conf = json.load(l_f)
except Exception as l_e:
print('error opening config file: %s. Reason: %s error: %s, doc: %s' % (
l_scopes_conf_path, type(l_e), l_e, l_e.__doc__))
assert False
l_conf['scopes'][1]['rules_prod_id'] = "0gG8osWJ"
l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
#-------------------------------------------------------
# POST conf
# ------------------------------------------------------
l_url = '%s/update_scopes'%(G_TEST_HOST)
l_headers = {'Content-Type': 'application/json'}
l_r = requests.post(l_url,
headers=l_headers,
data=json.dumps(l_conf))
assert l_r.status_code == 200
#-------------------------------------------------------
# make the same request. should get 200
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/path.html'
l_headers = {'host': 'test.com',
'waf-scopes-id':'0050',
'User-Agent': 'monkeez'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
#assert l_r.text == 'This is from GLOB scope\n'
#-------------------------------------------------------
# make a request with user-agent bananas
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/path.html'
l_headers = {'host': 'test.com',
'waf-scopes-id':'0050',
'User-Agent': 'bananas'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is rules custom response\n'
# ------------------------------------------------------------------------------
# test /update_bots endpoint
# ------------------------------------------------------------------------------
def test_update_bots_endpoint(setup_scopez_server_action):
l_url = G_TEST_HOST + '/update_bots'
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_test_file = os.path.realpath(os.path.join(l_file_path,
'../../data/waf/conf/bots/0052-wHyMHxV7.bots.json'))
l_test_payload = ''
# ------------------------------------------------------
# check setup
# ------------------------------------------------------
assert os.path.exists(l_test_file), 'test file not found!'
# ------------------------------------------------------
# slurp test file
# ------------------------------------------------------
with open(l_test_file) as l_tf:
l_test_payload = l_tf.read()
# ------------------------------------------------------
# check setup
# ------------------------------------------------------
assert l_test_payload, 'payload is empty!'
l_json_payload = json.loads(l_test_payload)
# ------------------------------------------------------
# Check that challenge works
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401
# ------------------------------------------------------
# Update the bot config
# ------------------------------------------------------
l_json_payload['directive'][0]['sec_rule']['operator']['value'] = 'chowdah'
# ------------------------------------------------------
# update the timestamp, else it will silently do nothing and return 200
# ref: scopes.cc:load_bots (compare time)
# ------------------------------------------------------
l_json_payload['last_modified_date'] = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
l_result = requests.post(l_url, timeout=3, json=l_json_payload)
assert l_result.status_code == 200
assert l_result.json()['status'] == 'success'
# ------------------------------------------------------
# Expect 200
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200,\
"expecting 200, got {resp_code} since user-agent changed to chowdah".format(resp_code=l_r.status_code)
# ------------------------------------------------------
# Expect 401 due to new UA
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'chowdah',
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401,\
"expecting 401, got {resp_code} since user-agent changed to chowdah".format(resp_code=l_r.status_code)
# ------------------------------------------------------
# check negative test - missing customer_id field
# ------------------------------------------------------
l_cust_id = l_json_payload.pop('customer_id')
l_n2_result = requests.post(l_url, json=l_json_payload)
assert l_n2_result.status_code == 500,\
'expected 500 since customer_id {} is removed'.format(l_cust_id)
| test_acl_config_update |
translator_tests.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use bytecode_source_map::source_map::SourceMap;
use bytecode_to_boogie::translator::BoogieTranslator;
use bytecode_verifier::VerifiedModule;
use ir_to_bytecode::{compiler::compile_module, parser::ast::Loc, parser::parse_module};
use libra_types::account_address::AccountAddress;
use std::fs;
use stdlib::{stdlib_modules, stdlib_source_map};
// mod translator;
fn compile_files(file_names: Vec<String>) -> (Vec<VerifiedModule>, SourceMap<Loc>) {
let mut verified_modules = stdlib_modules().to_vec();
let mut source_maps = stdlib_source_map().to_vec();
let files_len = file_names.len();
let dep_files = &file_names[0..files_len];
// assuming the last file is a program that might contain a script
let address = AccountAddress::default();
for file_name in dep_files {
let code = fs::read_to_string(file_name).unwrap();
let module = parse_module(&code).unwrap();
let (compiled_module, source_map) =
compile_module(address, module, &verified_modules).expect("module failed to compile");
let verified_module_res = VerifiedModule::new(compiled_module);
match verified_module_res {
Err(e) => {
panic!("{:?}", e);
}
Ok(verified_module) => {
verified_modules.push(verified_module);
source_maps.push(source_map);
}
}
}
(verified_modules, source_maps)
}
#[test]
fn | () {
let mut file_names = vec![];
let name = "test_mvir/test3.mvir".to_string();
file_names.push(name);
let (modules, source_maps) = compile_files(file_names.to_vec());
let mut ts = BoogieTranslator::new(&modules, &source_maps);
let mut res = String::new();
// handwritten boogie code
let written_code = fs::read_to_string("src/bytecode_instrs.bpl").unwrap();
res.push_str(&written_code);
res.push_str(&ts.translate());
// This is probably too sensitive to minor changes in libra; commenting for now.
// let expected_code = fs::read_to_string("test_mvir/test3.bpl.expect").unwrap();
// assert_eq!(res, expected_code);
}
#[test]
fn test_arithmetic() {
let mut file_names = vec![];
let name = "test_mvir/test-arithmetic.mvir".to_string();
file_names.push(name);
let (modules, source_maps) = compile_files(file_names.to_vec());
let mut ts = BoogieTranslator::new(&modules, &source_maps);
let mut res = String::new();
// handwritten boogie code
let written_code = fs::read_to_string("src/bytecode_instrs.bpl").unwrap();
res.push_str(&written_code);
res.push_str(&ts.translate());
}
#[test]
fn test_control_flow() {
let mut file_names = vec![];
let name = "test_mvir/test-control-flow.mvir".to_string();
file_names.push(name);
let (modules, source_maps) = compile_files(file_names.to_vec());
let mut ts = BoogieTranslator::new(&modules, &source_maps);
let mut res = String::new();
// handwritten boogie code
let written_code = fs::read_to_string("src/bytecode_instrs.bpl").unwrap();
res.push_str(&written_code);
res.push_str(&ts.translate());
}
#[test]
fn test_func_call() {
let mut file_names = vec![];
let name = "test_mvir/test-func-call.mvir".to_string();
file_names.push(name);
let (modules, source_maps) = compile_files(file_names.to_vec());
let mut ts = BoogieTranslator::new(&modules, &source_maps);
let mut res = String::new();
// handwritten boogie code
let written_code = fs::read_to_string("src/bytecode_instrs.bpl").unwrap();
res.push_str(&written_code);
res.push_str(&ts.translate());
}
#[test]
fn test_reference() {
let mut file_names = vec![];
let name = "test_mvir/test-reference.mvir".to_string();
file_names.push(name);
let (modules, source_maps) = compile_files(file_names.to_vec());
let mut ts = BoogieTranslator::new(&modules, &source_maps);
let mut res = String::new();
// handwritten boogie code
let written_code = fs::read_to_string("src/bytecode_instrs.bpl").unwrap();
res.push_str(&written_code);
res.push_str(&ts.translate());
}
#[test]
fn test_special_instr() {
let mut file_names = vec![];
let name = "test_mvir/test-special-instr.mvir".to_string();
file_names.push(name);
let (modules, source_maps) = compile_files(file_names.to_vec());
let mut ts = BoogieTranslator::new(&modules, &source_maps);
let mut res = String::new();
// handwritten boogie code
let written_code = fs::read_to_string("src/bytecode_instrs.bpl").unwrap();
res.push_str(&written_code);
res.push_str(&ts.translate());
}
#[test]
fn test_struct() {
let mut file_names = vec![];
let name = "test_mvir/test-struct.mvir".to_string();
file_names.push(name);
let (modules, source_maps) = compile_files(file_names.to_vec());
let mut ts = BoogieTranslator::new(&modules, &source_maps);
let mut res = String::new();
// handwritten boogie code
let written_code = fs::read_to_string("src/bytecode_instrs.bpl").unwrap();
res.push_str(&written_code);
res.push_str(&ts.translate());
}
| test3 |
init.rs | use crate::init;
use std::env;
use structopt::StructOpt;
#[derive(StructOpt, Debug)]
pub struct | {
/// Agree to all prompts. Useful for non-interactive uses
#[structopt(long = "force-yes", short = "y")]
force_yes: bool,
}
pub fn init(opt: InitOpt) -> anyhow::Result<()> {
let current_directory = env::current_dir()?;
init::init(current_directory, opt.force_yes)
}
#[cfg(feature = "integration_tests")]
impl InitOpt {
pub fn new(force_yes: bool) -> Self {
InitOpt { force_yes }
}
}
| InitOpt |
calendar.js | import { setData } from '@progress/kendo-angular-intl';
setData({
name: "en-FI",
likelySubtags: {
en: "en-Latn-US"
},
identity: {
language: "en",
territory: "FI"
},
territory: "FI",
calendar: {
patterns: {
d: "dd/MM/y",
D: "EEEE, d MMMM y",
m: "d MMM",
M: "d MMMM",
y: "MMM y",
Y: "MMMM y",
F: "EEEE, d MMMM y H.mm.ss",
g: "dd/MM/y H.mm",
G: "dd/MM/y H.mm.ss",
t: "H.mm",
T: "H.mm.ss",
s: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
u: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'"
},
dateTimeFormats: {
full: "{1} 'at' {0}",
long: "{1} 'at' {0}",
medium: "{1}, {0}",
short: "{1}, {0}",
availableFormats: {
Bh: "h B",
Bhm: "h:mm B",
Bhms: "h:mm:ss B",
d: "d",
E: "ccc",
EBhm: "E h:mm B",
EBhms: "E h:mm:ss B",
Ed: "E d",
Ehm: "E h.mm a",
EHm: "E H.mm",
Ehms: "E h.mm.ss a",
EHms: "E H.mm.ss",
Gy: "y G",
GyMMM: "MMM y G",
GyMMMd: "d MMM y G",
GyMMMEd: "E, d MMM y G",
h: "h a",
H: "HH",
hm: "h.mm a",
Hm: "H.mm",
hms: "h.mm.ss a",
Hms: "H.mm.ss",
hmsv: "h.mm.ss a v",
Hmsv: "H.mm.ss v",
hmv: "h.mm a v",
Hmv: "H.mm v",
M: "L",
Md: "dd/MM",
MEd: "E, dd/MM",
MMdd: "dd/MM",
MMM: "LLL",
MMMd: "d MMM",
MMMEd: "E, d MMM",
MMMMd: "d MMMM",
"MMMMW-count-one": "'week' W 'of' MMMM",
"MMMMW-count-other": "'week' W 'of' MMMM",
ms: "mm.ss",
y: "y",
yM: "MM/y",
yMd: "dd/MM/y",
yMEd: "E, dd/MM/y",
yMMM: "MMM y",
yMMMd: "d MMM y",
yMMMEd: "E, d MMM y",
yMMMM: "MMMM y",
yQQQ: "QQQ y",
yQQQQ: "QQQQ y",
"yw-count-one": "'week' w 'of' Y",
"yw-count-other": "'week' w 'of' Y"
}
},
timeFormats: {
full: "H.mm.ss zzzz",
long: "H.mm.ss z",
medium: "H.mm.ss",
short: "H.mm"
},
dateFormats: {
full: "EEEE, d MMMM y",
long: "d MMMM y",
medium: "d MMM y",
short: "dd/MM/y"
},
days: {
format: {
abbreviated: [
"Sun",
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat"
],
narrow: [
"S",
"M",
"T",
"W",
"T",
"F",
"S"
],
short: [
"Su",
"Mo",
"Tu",
"We",
"Th",
"Fr",
"Sa"
],
wide: [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
]
},
"stand-alone": {
abbreviated: [
"Sun",
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat"
],
narrow: [
"S",
"M",
"T",
"W",
"T",
"F",
"S"
],
short: [
"Su",
"Mo",
"Tu",
"We",
"Th",
"Fr",
"Sa"
],
wide: [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
]
}
},
months: {
format: {
abbreviated: [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec"
],
narrow: [
"J",
"F",
"M",
"A",
"M",
"J",
"J",
"A",
"S",
"O",
"N",
"D"
],
wide: [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
]
},
"stand-alone": {
abbreviated: [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec"
],
narrow: [
"J",
"F",
"M",
"A",
"M",
"J",
"J",
"A",
"S",
"O",
"N",
"D"
],
wide: [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
]
}
},
quarters: {
format: {
abbreviated: [
"Q1",
"Q2",
"Q3",
"Q4"
],
narrow: [
"1",
"2",
"3",
"4"
],
wide: [
"1st quarter",
"2nd quarter",
"3rd quarter",
"4th quarter"
]
},
"stand-alone": {
abbreviated: [
"Q1",
"Q2",
"Q3",
"Q4"
],
narrow: [
"1",
"2",
"3",
"4"
],
wide: [
"1st quarter",
"2nd quarter",
"3rd quarter",
"4th quarter"
]
}
},
dayPeriods: {
format: {
abbreviated: {
midnight: "midnight",
am: "AM",
"am-alt-variant": "am",
noon: "noon",
pm: "PM",
"pm-alt-variant": "pm",
morning1: "in the morning",
afternoon1: "in the afternoon",
evening1: "in the evening",
night1: "at night"
},
narrow: {
midnight: "mi",
am: "a",
"am-alt-variant": "am",
noon: "n",
pm: "p",
"pm-alt-variant": "pm",
morning1: "in the morning",
afternoon1: "in the afternoon",
evening1: "in the evening",
night1: "at night"
},
wide: {
midnight: "midnight",
am: "AM",
"am-alt-variant": "am",
noon: "noon",
pm: "PM",
"pm-alt-variant": "pm",
morning1: "in the morning",
afternoon1: "in the afternoon",
evening1: "in the evening",
night1: "at night"
}
},
"stand-alone": {
abbreviated: {
midnight: "midnight",
am: "AM",
"am-alt-variant": "am",
noon: "noon", | afternoon1: "afternoon",
evening1: "evening",
night1: "night"
},
narrow: {
midnight: "midnight",
am: "AM",
"am-alt-variant": "am",
noon: "noon",
pm: "PM",
"pm-alt-variant": "pm",
morning1: "morning",
afternoon1: "afternoon",
evening1: "evening",
night1: "night"
},
wide: {
midnight: "midnight",
am: "AM",
"am-alt-variant": "am",
noon: "noon",
pm: "PM",
"pm-alt-variant": "pm",
morning1: "morning",
afternoon1: "afternoon",
evening1: "evening",
night1: "night"
}
}
},
eras: {
format: {
wide: {
0: "Before Christ",
1: "Anno Domini",
"0-alt-variant": "Before Common Era",
"1-alt-variant": "Common Era"
},
abbreviated: {
0: "BC",
1: "AD",
"0-alt-variant": "BCE",
"1-alt-variant": "CE"
},
narrow: {
0: "B",
1: "A",
"0-alt-variant": "BCE",
"1-alt-variant": "CE"
}
}
},
gmtFormat: "GMT{0}",
gmtZeroFormat: "GMT",
dateFields: {
era: {
wide: "era",
short: "era",
narrow: "era"
},
year: {
wide: "year",
short: "yr",
narrow: "yr"
},
quarter: {
wide: "quarter",
short: "qtr",
narrow: "qtr"
},
month: {
wide: "month",
short: "mo",
narrow: "mo"
},
week: {
wide: "week",
short: "wk",
narrow: "wk"
},
weekOfMonth: {
wide: "week of month",
short: "wk. of mo.",
narrow: "wk. of mo."
},
day: {
wide: "day",
short: "day",
narrow: "day"
},
dayOfYear: {
wide: "day of year",
short: "day of yr.",
narrow: "day of yr."
},
weekday: {
wide: "day of the week",
short: "day of wk.",
narrow: "day of wk."
},
weekdayOfMonth: {
wide: "weekday of the month",
short: "wkday. of mo.",
narrow: "wkday. of mo."
},
dayperiod: {
short: "AM/PM",
wide: "am/pm",
narrow: "AM/PM"
},
hour: {
wide: "hour",
short: "hr",
narrow: "hr"
},
minute: {
wide: "minute",
short: "min",
narrow: "min"
},
second: {
wide: "second",
short: "sec",
narrow: "sec"
},
zone: {
wide: "time zone",
short: "zone",
narrow: "zone"
}
}
},
firstDay: 1
}); | pm: "PM",
"pm-alt-variant": "pm",
morning1: "morning", |
test_sparse_emb_demo_model_multi_worker.py | """
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import sys, os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), r"../../../")))
import sparse_operation_kit as sok
import tensorflow as tf
import numpy as np
import os, json
import pickle
import utils
from test_sparse_emb_demo_model_single_worker import SOKDemo, test_tf_demo, check_saved_embedding_variables
def test_sok_demo(args, init_tensors, *random_samples):
port = 12345
os.environ["TF_CONFIG"] = json.dumps({
'cluster': {"worker": [args.ips[i] + ":" + str(port + i) for i in range(args.worker_num)] },
'task': {"type": 'worker', "index": args.task_id}
})
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
result = sok.Init(global_batch_size=args.global_batch_size)
plugin_demo = SOKDemo(combiner=args.combiner,
max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,
slot_num=args.slot_num, max_nnz=args.max_nnz,
embedding_vec_size=args.embedding_vec_size)
emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)
dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
plugin_saver = sok.Saver()
if (1 == args.restore_params):
filepath = r"./embedding_variables"
plugin_saver.restore_from_file(plugin_demo.embedding_layer.embedding_variable, filepath)
else:
status = plugin_saver.load_embedding_values(plugin_demo.embedding_layer.embedding_variable, init_tensors)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
@tf.function
def _train_step(inputs, labels):
with tf.GradientTape() as tape:
logit, embedding_vector = plugin_demo(inputs, training=True)
loss = _replica_loss(labels, logit)
embedding_variables, other_variable = sok.split_embedding_variable_from_others(plugin_demo.trainable_variables)
grads, emb_grads = tape.gradient(loss, [other_variable, embedding_variables])
if "plugin" not in args.optimizer:
with sok.OptimizerScope(embedding_variables):
emb_opt.apply_gradients(zip(emb_grads, embedding_variables),
experimental_aggregate_gradients=False)
else:
emb_opt.apply_gradients(zip(emb_grads, embedding_variables),
experimental_aggregate_gradients=False)
dense_opt.apply_gradients(zip(grads, other_variable))
return logit, embedding_vector
sok_results = list()
def _dataset_fn(input_context):
replica_batch_size = input_context.get_per_replica_batch_size(args.global_batch_size)
dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size, to_sparse_tensor=True, repeat=1)
# because each worker has its own data source, so that no need to shard the dataset.
return dataset
dataset = strategy.distribute_datasets_from_function(_dataset_fn)
for i, (sparse_tensors, replica_labels) in enumerate(dataset):
print("-" * 30, "step ", str(i), "-" * 30)
logit, embedding_vector = strategy.run(_train_step, args=(sparse_tensors, replica_labels))
print("[INFO]: embedding_vector\n", embedding_vector)
sok_results.append(embedding_vector)
# FIXME: when the forward computation is too fast, there
# may exist some conficts with datareader, which cause the program hang.
import time
time.sleep(0.2) # seconds
# save params to file.
if 1 == args.save_params:
filepath = r"./embedding_variables/"
utils.try_make_dirs(filepath, chief=(True if args.task_id == 0 else False))
plugin_saver.dump_to_file(plugin_demo.embedding_layer.embedding_variable, filepath)
return sok_results, plugin_demo.embedding_layer.embedding_variable.values[0].m_var_name
def compare_sok_with_tf(args):
if (args.global_batch_size % args.local_gpu_num != 0):
raise ValueError("global_batch_size: %d is not divisible by local_gpu_num: %d"
%(args.global_batch_size, args.local_gpu_num))
if (args.global_batch_size % args.worker_num != 0):
raise ValueError("global_batch_size: %d is not divisible by worker_num: %d"
%(args.global_batch_size, args.worker_num))
# each worker generate different dataset
if args.generate_new_datas:
worker_batch_size = args.global_batch_size // args.worker_num
random_samples_local = utils.generate_random_samples(num_of_samples=worker_batch_size * args.iter_num,
vocabulary_size=args.local_gpu_num * args.max_vocabulary_size_per_gpu * args.worker_num,
slot_num=args.slot_num,
max_nnz=args.max_nnz)
utils.save_to_file(r"./random_samples_" + str(args.task_id) + r".file", *random_samples_local)
else:
random_samples_local = utils.restore_from_file(r"./random_samples_" + str(args.task_id) + r".file")
if (0 == args.restore_params):
# each worker generate same init tensors, because each worker will do the filtering by itself.
init_tensors = utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size,
num=args.local_gpu_num * args.worker_num)
else:
filepath = r"./embedding_variables"
tf_values_filename = os.path.join(filepath, r"tf_variable.file")
init_tensors = utils.restore_from_file(tf_values_filename)
sok_results_local, embedding_variable_name = test_sok_demo(args, init_tensors, *random_samples_local)
# save the forward embedding vector from different worker to file
utils.save_to_file(r"./sok_embedding_vectors_" + str(args.task_id) + r".file", *sok_results_local)
# aggregate dataset from different worker
dataset_filenames = [r"./random_samples_" + str(task_id) + r".file"
for task_id in range(args.worker_num)]
random_samples_total = [list() for _ in range(args.iter_num)]
random_labels_total = [list() for _ in range(args.iter_num)]
local_batch_size = args.global_batch_size // args.worker_num
for work_id in range(args.worker_num):
samples, labels = utils.restore_from_file(dataset_filenames[work_id])
for i in range(args.iter_num):
random_samples_total[i].extend(samples[i * local_batch_size : (i + 1) * local_batch_size])
random_labels_total[i].extend(labels[i * local_batch_size : (i + 1) * local_batch_size])
random_samples_total = np.concatenate(random_samples_total, axis=0)
random_labels_total = np.concatenate(random_labels_total, axis=0)
tf_results = test_tf_demo(args, init_tensors, random_samples_total, random_labels_total)
# aggregate forward embedding vector from different worker
sok_results_filenames = [r"./sok_embedding_vectors_" + str(task_id) + r".file"
for task_id in range(args.worker_num)]
sok_results_total = list()
for file_name in sok_results_filenames:
sok_results_local = utils.restore_from_file(file_name)
sok_results_total.append(sok_results_local)
if (len(sok_results_total[0]) != len(tf_results)):
raise ValueError("The length of results obtained from sok: %d is not equal to that of tensorflow: %d."
%(len(sok_results_total[0]), len(tf_results)))
if (len(tf_results) != args.iter_num):
raise ValueError("The length of embedding vectors: %d is not equal to iteration number: %d."
%(len(tf_results), args.iter_num))
# for i, sok_vector in enumerate(sok_results_total):
for i in range(args.iter_num):
if args.local_gpu_num != 1:
sok_vector = tf.concat([tf.concat(sok_results_total[task_id][i].values, axis=0)
for task_id in range(args.worker_num)], axis=0)
else:
sok_vector = tf.concat([sok_results_total[task_id][i]
for task_id in range(args.worker_num)], axis=0)
tf.debugging.assert_near(tf.reshape(sok_vector,
shape=[-1, tf.shape(sok_vector)[-1]]),
tf_results[i],
atol=1e-4,
rtol=1e-4)
print("\n[INFO]: With MultiWorkerMirroredStrategy, the embedding vector obtained from " +\
"sparse operation kit and tensorflow are consistent for %d iterations."
%args.iter_num)
if (1 == args.save_params):
check_saved_embedding_variables(args, embedding_variable_name)
def get_task_id(ips):
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='test demo model with single worker.')
parser.add_argument('--local_gpu_num', type=int,
help='the number of GPUs used to do paralell training.',
required=False, default=8)
parser.add_argument('--iter_num', type=int,
help='the number of testing iterations.',
required=False, default=100)
parser.add_argument('--max_vocabulary_size_per_gpu', type=int,
required=False, default=128)
parser.add_argument('--slot_num', type=int,
help='the number of feature fields',
required=False, default=1)
parser.add_argument('--max_nnz', type=int,
help='the maximum number of keys in one slot',
required=False, default=1)
parser.add_argument('--embedding_vec_size', type=int,
help='the dimention of embedding vector',
required=False, default=1)
parser.add_argument('--combiner', type=str,
help='the combiner used to do reduction for sparse embedding layer. ' +\
'It is only respected in sparse embedding layer.',
required=False, default='mean', choices=['mean', 'sum'])
parser.add_argument('--global_batch_size', type=int, required=False, default=16)
parser.add_argument('--optimizer', type=str,
help="use what optimizer",
required=False, default='plugin_adam',
choices=['plugin_adam', 'adam', 'sgd'])
parser.add_argument('--ips', type=str, nargs="+",
help="the ip address of each worker.",
required=False, default="0.0.0.0")
parser.add_argument('--generate_new_datas', type=int, choices=[0, 1],
help='whether to generate new random samples',
required=False, default=1)
parser.add_argument('--save_params', type=int, choices=[0, 1],
help='whether to save the trained parameters.',
required=False, default=0)
parser.add_argument('--restore_params', type=int, choices=[0, 1],
help='whether to restore from saved files. '+\
'By default, the testing program will generate random ' +\
'initial value to initialize trainable parameters '+\
'rather than restore trainable parameters from file.',
required=False, default=0)
args = parser.parse_args()
if not isinstance(args.ips, list):
args.ips = [args.ips]
args.worker_num = len(args.ips)
if utils.all_ips_in_local(args.ips):
processes = list()
for task_id in range(args.worker_num):
available_gpus = ",".join([str(args.local_gpu_num * task_id + i)
for i in range(args.local_gpu_num)])
print("[INFO]: on task: %d, its available GPUs are: %s" %(task_id, available_gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = available_gpus
process = utils.TestProcess(func=compare_sok_with_tf, task_id=task_id, arguments=args)
process.start()
processes.append(process)
for process in processes:
process.join()
else:
args.task_id = get_task_id(args.ips)
os.environ['CUDA_VISIBLE_DEVICES'] = ",".join([str(i) for i in range(args.local_gpu_num)])
compare_sok_with_tf(args)
| local_ip = utils.get_local_ip()
for i in range(len(ips)):
if ips[i] == local_ip:
return i
raise ValueError("Cannot find local_ip: %s in ips list: [%s]"
%(local_ip, ", ".join(ips))) |
postgres.rs | use migration_core::migration_connector::{ConnectorError, ConnectorResult, DiffTarget};
use quaint::{prelude::*, single::Quaint};
use std::collections::HashMap;
use url::Url;
pub(crate) async fn postgres_setup(url: String, prisma_schema: &str) -> ConnectorResult<()> {
{
let mut url = Url::parse(&url).map_err(ConnectorError::url_parse_error)?;
let quaint_url = quaint::connector::PostgresUrl::new(url.clone()).unwrap();
let (db_name, schema) = (quaint_url.dbname(), quaint_url.schema());
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = Quaint::new(&url.to_string()).await.unwrap();
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
);
conn.raw_cmd(&drop_and_recreate_schema)
.await
.map_err(|e| ConnectorError::from_source(e, ""))?;
}
{
let api = migration_core::migration_api(prisma_schema)?;
// 2. create the database schema for given Prisma schema
let (config, schema) = datamodel::parse_schema(prisma_schema).unwrap();
let migration = api
.connector()
.diff(DiffTarget::Empty, DiffTarget::Datamodel((&config, &schema)))
.await
.unwrap();
api.connector()
.database_migration_step_applier()
.apply_migration(&migration)
.await
.unwrap();
};
Ok(())
}
pub(crate) async fn postgres_teardown(url: &str) -> ConnectorResult<()> |
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Quaint> {
url.set_path("/postgres");
Ok(Quaint::new(&url.to_string()).await.unwrap())
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
| {
let mut url = Url::parse(url).map_err(ConnectorError::url_parse_error)?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let db_name = url.path().strip_prefix('/').unwrap();
let query = format!("DROP DATABASE \"{}\" CASCADE", db_name);
conn.raw_cmd(&query).await.ok();
Ok(())
} |
apps.py | from __future__ import unicode_literals
from django.apps import AppConfig
| name = 'account' | class AccountConfig(AppConfig): |
kubeletclient.go | package kubeletclient
import (
"os"
"path/filepath"
"time"
"golang.org/x/net/context"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/checkpoint"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/util"
)
const (
defaultKubeletSocketFile = "kubelet.sock"
defaultPodResourcesMaxSize = 1024 * 1024 * 16 // 16 Mb
)
var (
kubeletSocket string
defaultPodResourcesPath = "/var/lib/kubelet/pod-resources"
)
// GetResourceClient returns an instance of ResourceClient interface initialized with Pod resource information
func GetResourceClient() (types.ResourceClient, error) {
// If Kubelet resource API endpoint exist use that by default
// Or else fallback with checkpoint file
if hasKubeletAPIEndpoint() {
logging.Debugf("GetResourceClient: using Kubelet resource API endpoint")
return getKubeletClient()
}
logging.Debugf("GetResourceClient: using Kubelet device plugin checkpoint")
return checkpoint.GetCheckpoint()
}
func getKubeletClient() (types.ResourceClient, error) {
newClient := &kubeletClient{}
if kubeletSocket == "" {
kubeletSocket = util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
}
client, conn, err := podresources.GetClient(kubeletSocket, 10*time.Second, defaultPodResourcesMaxSize)
if err != nil {
return nil, logging.Errorf("getKubeletClient: error getting grpc client: %v\n", err)
}
defer conn.Close()
if err := newClient.getPodResources(client); err != nil {
return nil, logging.Errorf("getKubeletClient: error ge tting pod resources from client: %v\n", err)
}
return newClient, nil
}
type kubeletClient struct {
resources []*podresourcesapi.PodResources
}
func (rc *kubeletClient) getPodResources(client podresourcesapi.PodResourcesListerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := client.List(ctx, &podresourcesapi.ListPodResourcesRequest{})
if err != nil {
return logging.Errorf("getPodResources: failed to list pod resources, %v.Get(_) = _, %v", client, err)
}
rc.resources = resp.PodResources
return nil
}
// GetPodResourceMap returns an instance of a map of Pod ResourceInfo given a (Pod name, namespace) tuple
func (rc *kubeletClient) GetPodResourceMap(pod *v1.Pod) (map[string]*types.ResourceInfo, error) {
resourceMap := make(map[string]*types.ResourceInfo)
name := pod.Name
ns := pod.Namespace
if name == "" || ns == "" {
return nil, logging.Errorf("GetPodResourcesMap: Pod name or namespace cannot be empty")
}
for _, pr := range rc.resources {
if pr.Name == name && pr.Namespace == ns {
for _, cnt := range pr.Containers {
for _, dev := range cnt.Devices {
if rInfo, ok := resourceMap[dev.ResourceName]; ok {
rInfo.DeviceIDs = append(rInfo.DeviceIDs, dev.DeviceIds...)
} else {
resourceMap[dev.ResourceName] = &types.ResourceInfo{DeviceIDs: dev.DeviceIds}
}
}
}
}
}
return resourceMap, nil
}
func hasKubeletAPIEndpoint() bool | {
// Check for kubelet resource API socket file
kubeletAPISocket := filepath.Join(defaultPodResourcesPath, defaultKubeletSocketFile)
if _, err := os.Stat(kubeletAPISocket); err != nil {
logging.Debugf("hasKubeletAPIEndpoint: error looking up kubelet resource api socket file: %q", err)
return false
}
return true
} |
|
HTMLScriptElement.py | ########################################################################
#
# File Name: HTMLScriptElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLScriptElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLScriptElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="SCRIPT"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_charset(self):
return self.getAttribute("CHARSET")
def _set_charset(self, value):
self.setAttribute("CHARSET", value)
def _get_defer(self):
|
def _set_defer(self, value):
if value:
self.setAttribute("DEFER", "DEFER")
else:
self.removeAttribute("DEFER")
def _get_event(self):
return self.getAttribute("EVENT")
def _set_event(self, value):
self.setAttribute("EVENT", value)
def _get_htmlFor(self):
return self.getAttribute("FOR")
def _set_htmlFor(self, value):
self.setAttribute("FOR", value)
def _get_src(self):
return self.getAttribute("SRC")
def _set_src(self, value):
self.setAttribute("SRC", value)
def _get_text(self):
if not self.firstChild:
return
if self.firstChild == self.lastChild:
return self.firstChild.data
self.normalize()
text = filter(lambda x: x.nodeType == Node.TEXT_NODE, self.childNodes)
return text[0].data
def _set_text(self, value):
text = None
for node in self.childNodes:
if not text and node.nodeType == Node.TEXT_NODE:
text = node
else:
self.removeChild(node)
if text:
text.data = value
else:
text = self.ownerDocument.createTextNode(value)
self.appendChild(text)
def _get_type(self):
return self.getAttribute("TYPE")
def _set_type(self, value):
self.setAttribute("TYPE", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"charset" : _get_charset,
"defer" : _get_defer,
"event" : _get_event,
"htmlFor" : _get_htmlFor,
"src" : _get_src,
"text" : _get_text,
"type" : _get_type
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"charset" : _set_charset,
"defer" : _set_defer,
"event" : _set_event,
"htmlFor" : _set_htmlFor,
"src" : _set_src,
"text" : _set_text,
"type" : _set_type
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| return self.hasAttribute("DEFER") |
graph_manager_add_solution_profile.py | import cProfile
import gym
import numpy as np
from connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager
from connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager
from connect_four.problem import ConnectFourGroupManager
env = gym.make('connect_four-v0')
env.state = np.array([
[
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 0, 0, 0, ],
],
[
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 1, 0, 0, ],
[0, 0, 0, 0, 1, 0, 0, ],
],
])
# noinspection SpellCheckingInspection
cfgm = ConnectFourGroupManager(env_variables=env.env_variables)
vsm = VictorSolutionManager(env_variables=env.env_variables)
player, row, col = 0, 5, 0
gm = GraphManager(player=player, problem_manager=cfgm, solution_manager=vsm)
_, removed_problems = cfgm.move(player=player, row=row, col=col)
for problem in removed_problems:
gm._remove_problem(problem)
removed_solutions, added_solutions = vsm.move(player=player, row=row, col=col)
print("len(removed_solutions) = ", len(removed_solutions))
print("len(added_solutions) = ", len(added_solutions))
# print("number of useful solutions =", len(self.solution_to_solutions))
for solution in removed_solutions:
gm._remove_solution(solution)
print("number of solutions that remained =", len(gm.solution_to_solutions))
def add_solutions():
|
cProfile.run(
'add_solutions()',
sort="cumtime",
)
| for solution in added_solutions:
gm._add_solution(solution)
print("number of solutions after adding =", len(gm.solution_to_solutions)) |
generate_simulated_mantid.py | """
This script is used to generate simulated count data based on a Mantid
script.
"""
|
def VariableStatsData(N, A0, omega, phi, sigma, bg):
x = numpy.linspace(start=0.0, stop=32.0, num=2001)
y = (1+A0*numpy.cos(omega*x+phi)*numpy.exp(-(sigma*x)**2)) * \
numpy.exp(-x/2.197)+bg
NN = N/numpy.sum(y) # normalisation so whole spectrum has ~N counts
return (x, numpy.random.poisson(y*NN))
def write_data(x, y, part=0):
path = f'{os.path.dirname(__file__)}/../data_files'
part_str = part if part != 0 else ""
with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:
f.write('# X Y\n')
lines = [[x[i], y[i]]
# if y[i] != 0 # Uncomment to replace 0s with 1s
# else [x[i], 1]
for i in range(len(x))
# if y[i] != 0 # Uncomment to ignore 0 values
]
f.writelines([f'{i} {j}\n' for i, j in lines])
def write_problem(N, part=0):
path = f'{os.path.dirname(__file__)}/..'
part_str = part if part != 0 else ""
with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:
f.write('# FitBenchmark Problem\n')
f.write("software = 'Mantid'\n")
f.write(f"name = 'Simulated poisson (Mantid) {part_str}'\n")
f.write("description = 'A simulated dataset for testing poisson cost"
"functions, based on a simple simulation from Mantid.'\n")
f.write(f"input_file = 'simulated_mantid{part_str}.txt'\n")
f.write("function = 'name=UserFunction,"
"Formula=N*((1+A*cos(omega*x+phi)*exp(-(sigma*x)^2))*"
"exp(-x/2.197)+bg),"
f"N={0.007*N},"
"A=0.3,"
"omega=0.9,"
"phi=0.2,"
"sigma=0.12,"
"bg=0.001'\n")
if __name__ == '__main__':
chunks = [1] #,8,16,20,32,40,50,100]
num = 1000
N0 = 4e5
for i, part in enumerate(chunks):
args = {'N': 1000/part,
'A0': 0.25,
'omega': 1.0,
'phi': 0.1,
'sigma': 0.1,
'bg': 1.E-4}
x, y = VariableStatsData(**args)
write_data(x, y, part=i)
write_problem(N=args['N'], part=i) | import os
import numpy |
name.rs | // Copyright 2020 Contributors to the Parsec project.
// SPDX-License-Identifier: Apache-2.0
use crate::tss2_esys::TPM2B_NAME;
use crate::{Error, Result, WrapperErrorKind};
use log::error;
use std::convert::TryFrom;
/// Structure holding the data representing names
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Name {
value: Vec<u8>,
}
impl Name {
const MAX_SIZE: usize = 68;
pub fn value(&self) -> &[u8] {
&self.value
}
}
impl TryFrom<Vec<u8>> for Name {
type Error = Error;
fn try_from(bytes: Vec<u8>) -> Result<Self> {
if bytes.len() > Name::MAX_SIZE {
error!("Error: Invalid Vec<u8> size(> {})", Name::MAX_SIZE);
return Err(Error::local_error(WrapperErrorKind::WrongParamSize));
}
Ok(Name { value: bytes })
}
}
impl TryFrom<TPM2B_NAME> for Name {
type Error = Error;
fn try_from(tss_name: TPM2B_NAME) -> Result<Self> {
let size = tss_name.size as usize;
if size > Name::MAX_SIZE {
error!("Error: Invalid TPM2B_NAME size(> {})", Name::MAX_SIZE);
return Err(Error::local_error(WrapperErrorKind::InvalidParam));
}
Ok(Name {
value: tss_name.name[..size].to_vec(),
})
}
}
impl TryFrom<Name> for TPM2B_NAME {
type Error = Error;
fn | (name: Name) -> Result<TPM2B_NAME> {
let size = name.value.len();
if size > Name::MAX_SIZE {
error!("Error: Invalid TPM2B_NAME size(> {})", Name::MAX_SIZE);
return Err(Error::local_error(WrapperErrorKind::WrongParamSize));
}
let mut tss_name = TPM2B_NAME {
size: size as u16,
..Default::default()
};
tss_name.name[..size].copy_from_slice(name.value());
Ok(tss_name)
}
}
| try_from |
calculator.py | import math
class Calculator:
def __init__(self, width):
self.width = width
self.params = {
'angle_x': [0],
'angle_y': [0],
'angle_z': [0],
'eye_l_open': [1],
'eye_r_open': [1],
'eye_ball_x': [0],
'eye_ball_y': [0],
'mouth_open_y': [0],
'body_angle_z': [0]
}
for k in self.params:
self.params[k] *= 5
setattr(self, k, lambda l = k : sum(self.params[l]) / len(self.params[l]))
def update(self, points):
self.points = points
for k in self.params:
f = getattr(self, f'calc_{k}')
self.params[k].append(f())
self.params[k].pop(0)
def calc_angle_x(self):
t = self.distance(self.points[2], self.points[33]) - self.distance(self.points[33], self.points[14])
d = self.distance(self.points[2], self.points[14])
return (t / d) * 50
def calc_angle_y(self):
t = self.distance(self.points[30], self.points[51]) - self.distance(self.points[28], self.points[30])
d = self.distance(self.points[28], self.points[51])
return (t / d - 0.2) * 90
def calc_angle_z(self):
return math.atan2(self.points[27][0] - self.points[33][0], self.points[33][1] - self.points[27][1]) * 100
def calc_eye_l_open(self):
|
def calc_eye_r_open(self):
t = self.distance(self.points[37], self.points[41]) + self.distance(self.points[38], self.points[40])
d = self.distance(self.points[19], self.points[37]) + self.distance(self.points[20], self.points[38])
return (t / d - 0.15) * 7
def calc_eye_ball_x(self):
if not self.points[68] or not self.points[69]:
return self.params['eye_ball_x'][4]
ln = self.distance(self.points[36], self.points[68])
rn = self.distance(self.points[42], self.points[69])
return (-1 + ln / (ln + self.distance(self.points[39], self.points[68])) + rn / (rn + self.distance(self.points[45], self.points[69]))) * 3
def calc_eye_ball_y(self):
if not self.points[68] or not self.points[69]:
return self.params['eye_ball_y'][4]
lt = self.middle(self.points[37], self.points[38])
ld = self.middle(self.points[40], self.points[41])
rt = self.middle(self.points[43], self.points[44])
rd = self.middle(self.points[46], self.points[47])
ln = self.distance(ld, self.points[68])
rn = self.distance(rd, self.points[69])
return (-1.3 + ln / (ln + self.distance(lt, self.points[68])) + rn / (rn + self.distance(rt, self.points[69]))) * 3
def calc_mouth_open_y(self):
return (self.distance(self.points[62], self.points[66]) / self.distance(self.points[33], self.points[66])) * 2
def calc_body_angle_z(self):
t = self.points[2][0] + self.points[14][0] - self.points[33][0] - self.width / 2
return (t / self.width) * 100
def middle(self, p1, p2):
return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2)
def distance(self, p1, p2):
return math.hypot(p1[0] - p2[0], p1[1] - p2[1])
| t = self.distance(self.points[43], self.points[47]) + self.distance(self.points[44], self.points[46])
d = self.distance(self.points[23], self.points[43]) + self.distance(self.points[24], self.points[44])
return (t / d - 0.15) * 7 |
WdatePicker.js | /*
* My97 DatePicker 4.8
* License: http://www.my97.net/license.asp
*/
var $dp,WdatePicker;(function(){var l={
$langList:[
{name:"en",charset:"UTF-8"},
{name:"zh-cn",charset:"gb2312"},
{name:"zh-tw",charset:"GBK"}
],
$skinList:[
{name:"default",charset:"gb2312"},
{name:"whyGreen",charset:"gb2312"},
{name:"blue",charset:"gb2312"},
{name:"green",charset:"gb2312"},
{name:"simple",charset:"gb2312"},
{name:"ext",charset:"gb2312"},
{name:"blueFresh",charset:"gb2312"},
{name:"twoer",charset:"gb2312"},
{name:"YcloudRed",charset:"gb2312"}],
$wdate:true,
$crossFrame:false,
$preLoad:false,
$dpPath:"",
doubleCalendar:false,
enableKeyboard:true,
enableInputMask:true,
autoUpdateOnChanged:null,
weekMethod:"MSExcel",
position:{},
| skin:"default",
dateFmt:"yyyy-MM-dd",
realDateFmt:"yyyy-MM-dd",
realTimeFmt:"HH:mm:ss",
realFullFmt:"%Date %Time",
minDate:"0001-01-01 00:00:00",
maxDate:"9999-12-31 23:59:59",
minTime:"00:00:00",
maxTime:"23:59:59",
startDate:"",
alwaysUseStartDate:false,
yearOffset:1911,
firstDayOfWeek:0,
isShowWeek:false,
highLineWeekDay:true,
isShowClear:true,
isShowToday:true,
isShowOK:true,
isShowOthers:true,
readOnly:false,
errDealMode:0,
autoPickDate:null,
qsEnabled:true,
autoShowQS:false,
hmsMenuCfg:{H:[1,6],m:[5,6],s:[15,4]},
opposite:false,specialDates:null,specialDays:null,disabledDates:null,disabledDays:null,onpicking:null,onpicked:null,onclearing:null,oncleared:null,ychanging:null,ychanged:null,Mchanging:null,Mchanged:null,dchanging:null,dchanged:null,Hchanging:null,Hchanged:null,mchanging:null,mchanged:null,schanging:null,schanged:null,eCont:null,vel:null,elProp:"",errMsg:"",quickSel:[],has:{},getRealLang:function(){var d=l.$langList;for(var e=0;e<d.length;e++){if(d[e].name==this.lang){return d[e]}}return d[0]}};WdatePicker=g;var n=window,i={innerHTML:""},z="document",B="documentElement",H="getElementsByTagName",E,u,h,f,D;var v=navigator.appName;if(v=="Microsoft Internet Explorer"){h=true}else{if(v=="Opera"){D=true}else{f=true}}u=l.$dpPath||q();if(l.$wdate){m(u+"skin/WdatePicker.css")}E=n;if(l.$crossFrame){try{while(E.parent!=E&&E.parent[z][H]("frameset").length==0){E=E.parent}}catch(y){}}if(!E.$dp){E.$dp={ff:f,ie:h,opera:D,status:0,defMinDate:l.minDate,defMaxDate:l.maxDate}}b();if(l.$preLoad&&$dp.status==0){k(n,"onload",function(){g(null,true)})}if(!n[z].docMD){k(n[z],"onmousedown",s,true);n[z].docMD=true}if(!E[z].docMD){k(E[z],"onmousedown",s,true);E[z].docMD=true}k(n,"onunload",function(){if($dp.dd){r($dp.dd,"none")}});function b(){try{E[z],E.$dp=E.$dp||{}}catch(I){E=n;$dp=$dp||{}}var w={win:n,$:function(e){return(typeof e=="string")?n[z].getElementById(e):e},$D:function(J,e){return this.$DV(this.$(J).value,e)},$DV:function(J,e){if(J!=""){this.dt=$dp.cal.splitDate(J,$dp.cal.dateFmt);if(e){for(var L in e){if(this.dt[L]===undefined){this.errMsg="invalid property:"+L}else{this.dt[L]+=e[L];if(L=="M"){var M=e.M>0?1:0;var K=new Date(this.dt.y,this.dt.M,0).getDate();this.dt.d=Math.min(K+M,this.dt.d)}}}}if(this.dt.refresh()){return this.dt}}return""},show:function(){var K=E[z].getElementsByTagName("div"),J=100000;for(var e=0;e<K.length;e++){var L=parseInt(K[e].style.zIndex);if(L>J){J=L}}this.dd.style.zIndex=J+2;r(this.dd,"block");r(this.dd.firstChild,"")},unbind:function(e){e=this.$(e);if(e.initcfg){t(e,"onclick",function(){g(e.initcfg)});t(e,"onfocus",function(){g(e.initcfg)})}},hide:function(){r(this.dd,"none")},attachEvent:k};for(var d in w){E.$dp[d]=w[d]}$dp=E.$dp}function k(I,J,w,d){if(I.addEventListener){var e=J.replace(/on/,"");w._ieEmuEventHandler=function(K){return w(K)};I.addEventListener(e,w._ieEmuEventHandler,d)}else{I.attachEvent(J,w)}}function t(w,I,e){if(w.removeEventListener){var d=I.replace(/on/,"");e._ieEmuEventHandler=function(J){return e(J)};w.removeEventListener(d,e._ieEmuEventHandler,false)}else{w.detachEvent(I,e)}}function C(w,e,d){if(typeof w!=typeof e){return false}if(typeof w=="object"){if(!d){for(var I in w){if(typeof e[I]=="undefined"){return false}if(!C(w[I],e[I],true)){return false}}}return true}else{if(typeof w=="function"&&typeof e=="function"){return w.toString()==e.toString()}else{return w==e}}}function q(){var I,w,d=n[z][H]("script");for(var e=0;e<d.length;e++){I=d[e].getAttribute("src")||"";I=I.substr(0,I.toLowerCase().indexOf("wdatepicker.js"));var w=I.lastIndexOf("/");if(w>0){I=I.substring(0,w+1)}if(I){break}}return I}function m(w,I,J){var d=n[z][H]("HEAD").item(0),e=n[z].createElement("link");if(d){e.href=w;e.rel="stylesheet";e.type="text/css";if(I){e.title=I}if(J){e.charset=J}d.appendChild(e)}}function p(I){I=I||E;var L=0,d=0;while(I!=E){var N=I.parent[z][H]("iframe");for(var J=0;J<N.length;J++){try{if(N[J].contentWindow==I){var K=o(N[J]);L+=K.left;d+=K.top;break}}catch(M){}}I=I.parent}return{leftM:L,topM:d}}function o(I,w){if(I.getBoundingClientRect){return I.getBoundingClientRect()}else{var J={ROOT_TAG:/^body|html$/i,OP_SCROLL:/^(?:inline|table-row)$/i};var e=false,M=null,P=I.offsetTop,K=I.offsetLeft,d=I.offsetWidth,O=I.offsetHeight;var L=I.offsetParent;if(L!=I){while(L){K+=L.offsetLeft;P+=L.offsetTop;if(c(L,"position").toLowerCase()=="fixed"){e=true}else{if(L.tagName.toLowerCase()=="body"){M=L.ownerDocument.defaultView}}L=L.offsetParent}}L=I.parentNode;while(L.tagName&&!J.ROOT_TAG.test(L.tagName)){if(L.scrollTop||L.scrollLeft){if(!J.OP_SCROLL.test(r(L))){if(!D||L.style.overflow!=="visible"){K-=L.scrollLeft;P-=L.scrollTop}}}L=L.parentNode}if(!e){var N=F(M);K-=N.left;P-=N.top}d+=K;O+=P;return{left:K,top:P,right:d,bottom:O}}}function x(e){e=e||E;var J=e[z],I=(e.innerWidth)?e.innerWidth:(J[B]&&J[B].clientWidth)?J[B].clientWidth:J.body.offsetWidth,d=(e.innerHeight)?e.innerHeight:(J[B]&&J[B].clientHeight)?J[B].clientHeight:J.body.offsetHeight;return{width:I,height:d}}function F(e){e=e||E;var J=e[z],d=J[B],I=J.body;J=(d&&d.scrollTop!=null&&(d.scrollTop>I.scrollTop||d.scrollLeft>I.scrollLeft))?d:I;return{top:J.scrollTop,left:J.scrollLeft}}function s(d){try{var w=d?(d.srcElement||d.target):null;if($dp.cal&&!$dp.eCont&&$dp.dd&&w!=$dp.el&&$dp.dd.style.display=="block"){$dp.cal.close()}}catch(d){}}function A(){$dp.status=2}var G,j;function g(M,d){if(!$dp){return}b();var J={};for(var L in M){J[L]=M[L]}for(var L in l){if(L.substring(0,1)!="$"&&J[L]===undefined){J[L]=l[L]}}if(d){if(!w()){j=j||setInterval(function(){if(E[z].readyState=="complete"){clearInterval(j)}g(null,true)},50);return}if($dp.status==0){$dp.status=1;J.el=i;a(J,true)}else{return}}else{if(J.eCont){J.eCont=$dp.$(J.eCont);J.el=i;J.autoPickDate=true;J.qsEnabled=false;a(J)}else{if(l.$preLoad&&$dp.status!=2){return}var I=N();if(n.event===I||I){J.srcEl=I.srcElement||I.target;I.cancelBubble=true}J.el=J.el=$dp.$(J.el||J.srcEl);if(J.el==null){alert("WdatePicker:el is null!\nexample:onclick=\"WdatePicker({el:this})\"");return;}try{if(!J.el||J.el.My97Mark===true||J.el.disabled||($dp.dd&&r($dp.dd)!="none"&&$dp.dd.style.left!="-970px")){if(J.el.My97Mark){J.el.My97Mark=false}return}}catch(K){}if(I&&J.el.nodeType==1&&!C(J.el.initcfg,M)){$dp.unbind(J.el);k(J.el,I.type=="focus"?"onclick":"onfocus",function(){g(M)});J.el.initcfg=M}a(J)}}function w(){if(h&&E!=n&&E[z].readyState!="complete"){return false}return true}function N(){if(f){try{func=N.caller;while(func!=null){var O=func.arguments[0];if(O&&(O+"").indexOf("Event")>=0){return O}func=func.caller}}catch(P){}return null}return event}}function c(e,d){return e.currentStyle?e.currentStyle[d]:document.defaultView.getComputedStyle(e,false)[d]}function r(e,d){if(e){if(d!=null){e.style.display=d}else{return c(e,"display")}}}function a(e,d){var K=e.el?e.el.nodeName:"INPUT";if(d||e.eCont||new RegExp(/input|textarea|div|span|p|a/ig).test(K)){e.elProp=K=="INPUT"?"value":"innerHTML"}else{return}if(e.lang=="auto"){e.lang=h?navigator.browserLanguage.toLowerCase():navigator.language.toLowerCase()}if(!e.eCont){for(var J in e){$dp[J]=e[J]}}if(!$dp.dd||e.eCont||($dp.dd&&(e.getRealLang().name!=$dp.dd.lang||e.skin!=$dp.dd.skin))){if(e.eCont){w(e.eCont,e)}else{$dp.dd=E[z].createElement("DIV");$dp.dd.style.cssText="position:absolute";E[z].body.appendChild($dp.dd);w($dp.dd,e);if(d){$dp.dd.style.left=$dp.dd.style.top="-970px"}else{$dp.show();I($dp)}}}else{if($dp.cal){$dp.show();$dp.cal.init();if(!$dp.eCont){I($dp)}}}function w(V,P){var O=E[z].domain,S=false,M='<iframe hideFocus=true width=9 height=7 frameborder=0 border=0 scrolling=no src="about:blank"></iframe>';V.innerHTML=M;var L=l.$langList,U=l.$skinList,T;try{T=V.lastChild.contentWindow[z]}catch(Q){S=true;V.removeChild(V.lastChild);var N=E[z].createElement("iframe");N.hideFocus=true;N.frameBorder=0;N.scrolling="no";N.src="javascript:(function(){var d=document;d.open();d.domain='"+O+"';})()";V.appendChild(N);setTimeout(function(){T=V.lastChild.contentWindow[z];R()},97);return}R();function R(){var Y=P.getRealLang();V.lang=Y.name;V.skin=P.skin;var X=["<head><script>","","var doc=document, $d, $dp, $cfg=doc.cfg, $pdp = parent.$dp, $dt, $tdt, $sdt, $lastInput, $IE=$pdp.ie, $FF = $pdp.ff,$OPERA=$pdp.opera, $ny, $cMark = false;","if($cfg.eCont){$dp = {};for(var p in $pdp)$dp[p]=$pdp[p];}else{$dp=$pdp;};for(var p in $cfg){$dp[p]=$cfg[p];}","doc.oncontextmenu=function(){try{$c._fillQS(!$dp.has.d,1);showB($d.qsDivSel);}catch(e){};return false;};","<\/script><script src=",u,"lang/",Y.name,".js charset=",Y.charset,"><\/script>"];if(S){X[1]='document.domain="'+O+'";'}for(var W=0;W<U.length;W++){if(U[W].name==P.skin){X.push('<link rel="stylesheet" type="text/css" href="'+u+"skin/"+U[W].name+'/datepicker.css" charset="'+U[W].charset+'"/>')}}X.push('<script src="'+u+'calendar.js"><\/script>');X.push('</head><body leftmargin="0" topmargin="0" tabindex=0></body></html>');X.push('<script>var t;t=t||setInterval(function(){if((typeof(doc.ready)=="boolean"&&doc.ready)||doc.readyState=="complete"){new My97DP();$cfg.onload();$c.autoSize();$cfg.setPos($dp);clearInterval(t);}},20);<\/script>');P.setPos=I;P.onload=A;T.write("<html>");T.cfg=P;T.write(X.join(""));T.close()}}function I(O){var M=O.position.left,V=O.position.top,L=O.el;if(L==i){return}if(L!=O.srcEl&&(r(L)=="none"||L.type=="hidden")){L=O.srcEl}var T=o(L),P=p(n),U=x(E),Q=F(E),N=$dp.dd.offsetHeight,S=$dp.dd.offsetWidth;if(isNaN(V)){V=0}if((P.topM+T.bottom+N>U.height)&&(P.topM+T.top-N>0)){V+=Q.top+P.topM+T.top-N-2}else{V+=Q.top+P.topM+T.bottom;var R=V-Q.top+N-U.height;if(R>0){V-=R}}if(isNaN(M)){M=0}M+=Q.left+Math.min(P.leftM+T.left,U.width-S-5)-(h?2:0);O.dd.style.top=V+"px";O.dd.style.left=M+"px"}}})(); | lang:"auto",
|
getfile.py | "Download utility"
#!/usr/local/bin/python
"""
Fetch an arbitrary file by FTP. Anonymous FTP unless you pass a
user=(name, pswd) tuple. Self-test FTPs a test file and site.
"""
from ftplib import FTP # socket-based FTP tools
from os.path import exists # file existence test
def getfile(file, site, dir, user=(), *, verbose=True, refetch=False):
"""
fetch a file by ftp from site/directory
anonymous or real login, binary transfer
"""
if exists(file) and not refetch:
if verbose: print(file, 'already fetched')
else:
if verbose: print('Downloading', file)
local = open(file, 'wb') # local file of same name
try:
remote = FTP(site) # connect to FTP site
remote.login(*user) # anonymous=() or (name, pswd)
remote.cwd(dir)
remote.retrbinary('RETR ' + file, local.write, 1024)
remote.quit()
finally:
local.close() # close file no matter what
if verbose: print('Download done.') # caller handles exceptions
if __name__ == '__main__':
from getpass import getpass | dir = '.'
site = 'ftp.rmi.net'
user = ('lutz', getpass('Pswd'))
getfile(file, site, dir, user)
'''
This module is mostly just a repackaging of the FTP code we used to fetch the image
file earlier, to make it simpler and reusable.
''' | file = 'monkeys.jpg' |
checkbox.ts | import {NgModule,Component,Input,Output,EventEmitter,forwardRef,ChangeDetectorRef} from '@angular/core';
import {CommonModule} from '@angular/common';
import {NG_VALUE_ACCESSOR, ControlValueAccessor} from '@angular/forms';
export const CHECKBOX_VALUE_ACCESSOR: any = {
provide: NG_VALUE_ACCESSOR,
useExisting: forwardRef(() => Checkbox),
multi: true
};
@Component({
selector: 'p-checkbox',
template: `
<div class="ui-chkbox ui-widget">
<div class="ui-helper-hidden-accessible">
<input #cb type="checkbox" [attr.id]="inputId" [name]="name" [value]="value" [checked]="checked" (focus)="onFocus($event)" (blur)="onBlur($event)"
[ngClass]="{'ui-state-focus':focused}" (change)="handleChange($event)" [disabled]="disabled" [attr.tabindex]="tabindex">
</div>
<div class="ui-chkbox-box ui-widget ui-corner-all ui-state-default" (click)="onClick($event,cb,true)"
[ngClass]="{'ui-state-active':checked,'ui-state-disabled':disabled,'ui-state-focus':focused}">
<span class="ui-chkbox-icon ui-c" [ngClass]="{'fa fa-check':checked}"></span>
</div>
</div>
<label class="ui-chkbox-label" (click)="onClick($event,cb,true)" *ngIf="label">{{label}}</label>
`,
providers: [CHECKBOX_VALUE_ACCESSOR]
})
export class Checkbox implements ControlValueAccessor {
@Input() value: any;
@Input() name: string;
@Input() disabled: boolean;
@Input() binary: string;
@Input() label: string;
@Input() tabindex: number;
@Input() inputId: string;
@Output() onChange: EventEmitter<any> = new EventEmitter();
model: any;
onModelChange: Function = () => {};
onModelTouched: Function = () => {};
| constructor(private cd: ChangeDetectorRef) {}
onClick(event,checkbox,focus:boolean) {
event.preventDefault();
if(this.disabled) {
return;
}
this.checked = !this.checked;
this.updateModel();
if(focus) {
checkbox.focus();
}
}
updateModel() {
if(!this.binary) {
if(this.checked)
this.addValue();
else
this.removeValue();
this.onModelChange(this.model);
}
else {
this.onModelChange(this.checked);
}
this.onChange.emit(this.checked);
}
handleChange(event) {
this.checked = event.target.checked;
this.updateModel();
}
isChecked(): boolean {
if(this.binary)
return this.model;
else
return this.model && this.model.indexOf(this.value) > -1;
}
removeValue() {
this.model = this.model.filter(val => val !== this.value);
}
addValue() {
this.model = [...this.model, this.value];
}
onFocus(event) {
this.focused = true;
}
onBlur(event) {
this.focused = false;
this.onModelTouched();
}
writeValue(model: any) : void {
this.model = model;
this.checked = this.isChecked();
this.cd.markForCheck();
}
registerOnChange(fn: Function): void {
this.onModelChange = fn;
}
registerOnTouched(fn: Function): void {
this.onModelTouched = fn;
}
setDisabledState(val: boolean): void {
this.disabled = val;
}
}
@NgModule({
imports: [CommonModule],
exports: [Checkbox],
declarations: [Checkbox]
})
export class CheckboxModule { } | focused: boolean = false;
checked: boolean = false;
|
clean_s3_bucket.py | #!/usr/bin/env python
# Sourced from https://gist.github.com/seventhskye/0cc7b2804252975d36dca047ab7729e9 with some modifications
import os
import boto3
def | ():
client = boto3.client('s3')
Bucket = os.environ.get('S3_BUCKET')
Prefix = os.environ.get('S3_PREFIX', '') # leave blank to delete the entire contents
IsTruncated = True
MaxKeys = 1000
KeyMarker = None
if Bucket is None:
print("Environment variable S3_BUCKET must be set!")
return
while IsTruncated == True:
if not KeyMarker:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix)
else:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix,
KeyMarker=KeyMarker)
try:
objects = []
versions = version_list['Versions']
for v in versions:
objects.append({'VersionId':v['VersionId'],'Key': v['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
try:
objects = []
delete_markers = version_list['DeleteMarkers']
for d in delete_markers:
objects.append({'VersionId':d['VersionId'],'Key': d['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
IsTruncated = version_list['IsTruncated']
if 'NextKeyMarker' in version_list:
KeyMarker = version_list['NextKeyMarker']
if __name__ == '__main__':
main()
| main |
appengine.go | // +build appengine
// Package memcache provides an implementation of httpcache.Cache that uses App
// Engine's memcache package to store cached responses.
//
// When not built for Google App Engine, this package will provide an
// implementation that connects to a specified memcached server. See the
// memcache.go file in this package for details.
package memcache
import (
"crypto/md5"
"fmt"
"appengine"
"appengine/memcache"
)
// Cache is an implementation of httpcache.Cache that caches responses in App
// Engine's memcache.
type Cache struct {
appengine.Context
}
// cacheKey modifies an httpcache key for use in memcache. Specifically, it
// prefixes keys to avoid collision with other data stored in memcache. It
// also uses the MD5 hash of the key in order to avoid exceeding the 250
// character length limit for a memcache key.
func cacheKey(key string) string |
// Get returns the response corresponding to key if present.
func (c *Cache) Get(key string) (resp []byte, ok bool) {
item, err := memcache.Get(c.Context, cacheKey(key))
if err != nil {
if err != memcache.ErrCacheMiss {
c.Context.Errorf("error getting cached response: %v", err)
}
return nil, false
}
return item.Value, true
}
// Set saves a response to the cache as key.
func (c *Cache) Set(key string, resp []byte) {
item := &memcache.Item{
Key: cacheKey(key),
Value: resp,
}
if err := memcache.Set(c.Context, item); err != nil {
c.Context.Errorf("error caching response: %v", err)
}
}
// Delete removes the response with key from the cache.
func (c *Cache) Delete(key string) {
if err := memcache.Delete(c.Context, cacheKey(key)); err != nil {
c.Context.Errorf("error deleting cached response: %v", err)
}
}
// New returns a new Cache for the given context.
func New(ctx appengine.Context) *Cache {
return &Cache{ctx}
}
| {
md5 := md5.Sum([]byte(key))
return fmt.Sprintf("httpcache:%x", md5)
} |
stack.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stack provides the glue between networking protocols and the
// consumers of the networking stack.
//
// For consumers, the only function of interest is New(), everything else is
// provided by the tcpip/public package.
package stack
import (
"encoding/binary"
"fmt"
"io"
"math/rand"
"sync/atomic"
"time"
"golang.org/x/time/rate"
"gvisor.dev/gvisor/pkg/atomicbitops"
cryptorand "gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/ports"
"gvisor.dev/gvisor/pkg/waiter"
)
const (
// DefaultTOS is the default type of service value for network endpoints.
DefaultTOS = 0
)
type transportProtocolState struct {
proto TransportProtocol
defaultHandler func(id TransportEndpointID, pkt *PacketBuffer) bool
}
// ResumableEndpoint is an endpoint that needs to be resumed after restore.
type ResumableEndpoint interface {
// Resume resumes an endpoint after restore. This can be used to restart
// background workers such as protocol goroutines. This must be called after
// all indirect dependencies of the endpoint has been restored, which
// generally implies at the end of the restore process.
Resume(*Stack)
}
// uniqueIDGenerator is a default unique ID generator.
type uniqueIDGenerator atomicbitops.AlignedAtomicUint64
func (u *uniqueIDGenerator) UniqueID() uint64 {
return ((*atomicbitops.AlignedAtomicUint64)(u)).Add(1)
}
// Stack is a networking stack, with all supported protocols, NICs, and route
// table.
type Stack struct {
transportProtocols map[tcpip.TransportProtocolNumber]*transportProtocolState
networkProtocols map[tcpip.NetworkProtocolNumber]NetworkProtocol
// rawFactory creates raw endpoints. If nil, raw endpoints are
// disabled. It is set during Stack creation and is immutable.
rawFactory RawFactory
packetEndpointWriteSupported bool
demux *transportDemuxer
stats tcpip.Stats
// LOCK ORDERING: mu > route.mu.
route struct {
mu struct {
sync.RWMutex
table []tcpip.Route
}
}
mu sync.RWMutex
nics map[tcpip.NICID]*nic
defaultForwardingEnabled map[tcpip.NetworkProtocolNumber]struct{}
// cleanupEndpointsMu protects cleanupEndpoints.
cleanupEndpointsMu sync.Mutex
cleanupEndpoints map[TransportEndpoint]struct{}
*ports.PortManager
// If not nil, then any new endpoints will have this probe function
// invoked everytime they receive a TCP segment.
tcpProbeFunc atomic.Value // TCPProbeFunc
// clock is used to generate user-visible times.
clock tcpip.Clock
// handleLocal allows non-loopback interfaces to loop packets.
handleLocal bool
// tables are the iptables packet filtering and manipulation rules.
// TODO(gvisor.dev/issue/4595): S/R this field.
tables *IPTables
// resumableEndpoints is a list of endpoints that need to be resumed if the
// stack is being restored.
resumableEndpoints []ResumableEndpoint
// icmpRateLimiter is a global rate limiter for all ICMP messages generated
// by the stack.
icmpRateLimiter *ICMPRateLimiter
// seed is a one-time random value initialized at stack startup.
//
// TODO(gvisor.dev/issue/940): S/R this field.
seed uint32
// nudConfigs is the default NUD configurations used by interfaces.
nudConfigs NUDConfigurations
// nudDisp is the NUD event dispatcher that is used to send the netstack
// integrator NUD related events.
nudDisp NUDDispatcher
// uniqueIDGenerator is a generator of unique identifiers.
uniqueIDGenerator UniqueID
// randomGenerator is an injectable pseudo random generator that can be
// used when a random number is required.
randomGenerator *rand.Rand
// secureRNG is a cryptographically secure random number generator.
secureRNG io.Reader
// sendBufferSize holds the min/default/max send buffer sizes for
// endpoints other than TCP.
sendBufferSize tcpip.SendBufferSizeOption
// receiveBufferSize holds the min/default/max receive buffer sizes for
// endpoints other than TCP.
receiveBufferSize tcpip.ReceiveBufferSizeOption
// tcpInvalidRateLimit is the maximal rate for sending duplicate
// acknowledgements in response to incoming TCP packets that are for an existing
// connection but that are invalid due to any of the following reasons:
//
// a) out-of-window sequence number.
// b) out-of-window acknowledgement number.
// c) PAWS check failure (when implemented).
//
// This is required to prevent potential ACK loops.
// Setting this to 0 will disable all rate limiting.
tcpInvalidRateLimit time.Duration
// tsOffsetSecret is the secret key for generating timestamp offsets
// initialized at stack startup.
tsOffsetSecret uint32
}
// UniqueID is an abstract generator of unique identifiers.
type UniqueID interface {
UniqueID() uint64
}
// NetworkProtocolFactory instantiates a network protocol.
//
// NetworkProtocolFactory must not attempt to modify the stack, it may only
// query the stack.
type NetworkProtocolFactory func(*Stack) NetworkProtocol
// TransportProtocolFactory instantiates a transport protocol.
//
// TransportProtocolFactory must not attempt to modify the stack, it may only
// query the stack.
type TransportProtocolFactory func(*Stack) TransportProtocol
// Options contains optional Stack configuration.
type Options struct {
// NetworkProtocols lists the network protocols to enable.
NetworkProtocols []NetworkProtocolFactory
// TransportProtocols lists the transport protocols to enable.
TransportProtocols []TransportProtocolFactory
// Clock is an optional clock used for timekeeping.
//
// If Clock is nil, tcpip.NewStdClock() will be used.
Clock tcpip.Clock
// Stats are optional statistic counters.
Stats tcpip.Stats
// HandleLocal indicates whether packets destined to their source
// should be handled by the stack internally (true) or outside the
// stack (false).
HandleLocal bool
// UniqueID is an optional generator of unique identifiers.
UniqueID UniqueID
// NUDConfigs is the default NUD configurations used by interfaces.
NUDConfigs NUDConfigurations
// NUDDisp is the NUD event dispatcher that an integrator can provide to
// receive NUD related events.
NUDDisp NUDDispatcher
// RawFactory produces raw endpoints. Raw endpoints are enabled only if
// this is non-nil.
RawFactory RawFactory
// AllowPacketEndpointWrite determines if packet endpoints support write
// operations.
AllowPacketEndpointWrite bool
// RandSource is an optional source to use to generate random
// numbers. If omitted it defaults to a Source seeded by the data
// returned by the stack secure RNG.
//
// RandSource must be thread-safe.
RandSource rand.Source
// IPTables are the initial iptables rules. If nil, DefaultIPTables will be
// used to construct the initial iptables rules.
// all traffic.
IPTables *IPTables
// DefaultIPTables is an optional iptables rules constructor that is called
// if IPTables is nil. If both fields are nil, iptables will allow all
// traffic.
DefaultIPTables func(clock tcpip.Clock, rand *rand.Rand) *IPTables
// SecureRNG is a cryptographically secure random number generator.
SecureRNG io.Reader
}
// TransportEndpointInfo holds useful information about a transport endpoint
// which can be queried by monitoring tools.
//
// +stateify savable
type TransportEndpointInfo struct {
// The following fields are initialized at creation time and are
// immutable.
NetProto tcpip.NetworkProtocolNumber
TransProto tcpip.TransportProtocolNumber
// The following fields are protected by endpoint mu.
ID TransportEndpointID
// BindNICID and bindAddr are set via calls to Bind(). They are used to
// reject attempts to send data or connect via a different NIC or
// address
BindNICID tcpip.NICID
BindAddr tcpip.Address
// RegisterNICID is the default NICID registered as a side-effect of
// connect or datagram write.
RegisterNICID tcpip.NICID
}
// AddrNetProtoLocked unwraps the specified address if it is a V4-mapped V6
// address and returns the network protocol number to be used to communicate
// with the specified address. It returns an error if the passed address is
// incompatible with the receiver.
//
// Preconditon: the parent endpoint mu must be held while calling this method.
func (t *TransportEndpointInfo) AddrNetProtoLocked(addr tcpip.FullAddress, v6only bool) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
netProto := t.NetProto
switch len(addr.Addr) {
case header.IPv4AddressSize:
netProto = header.IPv4ProtocolNumber
case header.IPv6AddressSize:
if header.IsV4MappedAddress(addr.Addr) {
netProto = header.IPv4ProtocolNumber
addr.Addr = addr.Addr[header.IPv6AddressSize-header.IPv4AddressSize:]
if addr.Addr == header.IPv4Any {
addr.Addr = ""
}
}
}
switch len(t.ID.LocalAddress) {
case header.IPv4AddressSize:
if len(addr.Addr) == header.IPv6AddressSize {
return tcpip.FullAddress{}, 0, &tcpip.ErrInvalidEndpointState{}
}
case header.IPv6AddressSize:
if len(addr.Addr) == header.IPv4AddressSize {
return tcpip.FullAddress{}, 0, &tcpip.ErrNetworkUnreachable{}
}
}
switch {
case netProto == t.NetProto:
case netProto == header.IPv4ProtocolNumber && t.NetProto == header.IPv6ProtocolNumber:
if v6only {
return tcpip.FullAddress{}, 0, &tcpip.ErrNoRoute{}
}
default:
return tcpip.FullAddress{}, 0, &tcpip.ErrInvalidEndpointState{}
}
return addr, netProto, nil
}
// IsEndpointInfo is an empty method to implement the tcpip.EndpointInfo
// marker interface.
func (*TransportEndpointInfo) IsEndpointInfo() {}
// New allocates a new networking stack with only the requested networking and
// transport protocols configured with default options.
//
// Note, NDPConfigurations will be fixed before being used by the Stack. That
// is, if an invalid value was provided, it will be reset to the default value.
//
// Protocol options can be changed by calling the
// SetNetworkProtocolOption/SetTransportProtocolOption methods provided by the
// stack. Please refer to individual protocol implementations as to what options
// are supported.
func | (opts Options) *Stack {
clock := opts.Clock
if clock == nil {
clock = tcpip.NewStdClock()
}
if opts.UniqueID == nil {
opts.UniqueID = new(uniqueIDGenerator)
}
if opts.SecureRNG == nil {
opts.SecureRNG = cryptorand.Reader
}
randSrc := opts.RandSource
if randSrc == nil {
var v int64
if err := binary.Read(opts.SecureRNG, binary.LittleEndian, &v); err != nil {
panic(err)
}
// Source provided by rand.NewSource is not thread-safe so
// we wrap it in a simple thread-safe version.
randSrc = &lockedRandomSource{src: rand.NewSource(v)}
}
randomGenerator := rand.New(randSrc)
if opts.IPTables == nil {
if opts.DefaultIPTables == nil {
opts.DefaultIPTables = DefaultTables
}
opts.IPTables = opts.DefaultIPTables(clock, randomGenerator)
}
opts.NUDConfigs.resetInvalidFields()
s := &Stack{
transportProtocols: make(map[tcpip.TransportProtocolNumber]*transportProtocolState),
networkProtocols: make(map[tcpip.NetworkProtocolNumber]NetworkProtocol),
nics: make(map[tcpip.NICID]*nic),
packetEndpointWriteSupported: opts.AllowPacketEndpointWrite,
defaultForwardingEnabled: make(map[tcpip.NetworkProtocolNumber]struct{}),
cleanupEndpoints: make(map[TransportEndpoint]struct{}),
PortManager: ports.NewPortManager(),
clock: clock,
stats: opts.Stats.FillIn(),
handleLocal: opts.HandleLocal,
tables: opts.IPTables,
icmpRateLimiter: NewICMPRateLimiter(clock),
seed: randomGenerator.Uint32(),
nudConfigs: opts.NUDConfigs,
uniqueIDGenerator: opts.UniqueID,
nudDisp: opts.NUDDisp,
randomGenerator: randomGenerator,
secureRNG: opts.SecureRNG,
sendBufferSize: tcpip.SendBufferSizeOption{
Min: MinBufferSize,
Default: DefaultBufferSize,
Max: DefaultMaxBufferSize,
},
receiveBufferSize: tcpip.ReceiveBufferSizeOption{
Min: MinBufferSize,
Default: DefaultBufferSize,
Max: DefaultMaxBufferSize,
},
tcpInvalidRateLimit: defaultTCPInvalidRateLimit,
tsOffsetSecret: randomGenerator.Uint32(),
}
// Add specified network protocols.
for _, netProtoFactory := range opts.NetworkProtocols {
netProto := netProtoFactory(s)
s.networkProtocols[netProto.Number()] = netProto
}
// Add specified transport protocols.
for _, transProtoFactory := range opts.TransportProtocols {
transProto := transProtoFactory(s)
s.transportProtocols[transProto.Number()] = &transportProtocolState{
proto: transProto,
}
}
// Add the factory for raw endpoints, if present.
s.rawFactory = opts.RawFactory
// Create the global transport demuxer.
s.demux = newTransportDemuxer(s)
return s
}
// UniqueID returns a unique identifier.
func (s *Stack) UniqueID() uint64 {
return s.uniqueIDGenerator.UniqueID()
}
// SetNetworkProtocolOption allows configuring individual protocol level
// options. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation or the provided value
// is incorrect.
func (s *Stack) SetNetworkProtocolOption(network tcpip.NetworkProtocolNumber, option tcpip.SettableNetworkProtocolOption) tcpip.Error {
netProto, ok := s.networkProtocols[network]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return netProto.SetOption(option)
}
// NetworkProtocolOption allows retrieving individual protocol level option
// values. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation.
// e.g.
// var v ipv4.MyOption
// err := s.NetworkProtocolOption(tcpip.IPv4ProtocolNumber, &v)
// if err != nil {
// ...
// }
func (s *Stack) NetworkProtocolOption(network tcpip.NetworkProtocolNumber, option tcpip.GettableNetworkProtocolOption) tcpip.Error {
netProto, ok := s.networkProtocols[network]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return netProto.Option(option)
}
// SetTransportProtocolOption allows configuring individual protocol level
// options. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation or the provided value
// is incorrect.
func (s *Stack) SetTransportProtocolOption(transport tcpip.TransportProtocolNumber, option tcpip.SettableTransportProtocolOption) tcpip.Error {
transProtoState, ok := s.transportProtocols[transport]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return transProtoState.proto.SetOption(option)
}
// TransportProtocolOption allows retrieving individual protocol level option
// values. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation.
// var v tcp.SACKEnabled
// if err := s.TransportProtocolOption(tcpip.TCPProtocolNumber, &v); err != nil {
// ...
// }
func (s *Stack) TransportProtocolOption(transport tcpip.TransportProtocolNumber, option tcpip.GettableTransportProtocolOption) tcpip.Error {
transProtoState, ok := s.transportProtocols[transport]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return transProtoState.proto.Option(option)
}
// SetTransportProtocolHandler sets the per-stack default handler for the given
// protocol.
//
// It must be called only during initialization of the stack. Changing it as the
// stack is operating is not supported.
func (s *Stack) SetTransportProtocolHandler(p tcpip.TransportProtocolNumber, h func(TransportEndpointID, *PacketBuffer) bool) {
state := s.transportProtocols[p]
if state != nil {
state.defaultHandler = h
}
}
// Clock returns the Stack's clock for retrieving the current time and
// scheduling work.
func (s *Stack) Clock() tcpip.Clock {
return s.clock
}
// Stats returns a mutable copy of the current stats.
//
// This is not generally exported via the public interface, but is available
// internally.
func (s *Stack) Stats() tcpip.Stats {
return s.stats
}
// SetNICForwarding enables or disables packet forwarding on the specified NIC
// for the passed protocol.
func (s *Stack) SetNICForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, enable bool) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.setForwarding(protocol, enable)
}
// NICForwarding returns the forwarding configuration for the specified NIC.
func (s *Stack) NICForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber) (bool, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return false, &tcpip.ErrUnknownNICID{}
}
return nic.forwarding(protocol)
}
// SetForwardingDefaultAndAllNICs sets packet forwarding for all NICs for the
// passed protocol and sets the default setting for newly created NICs.
func (s *Stack) SetForwardingDefaultAndAllNICs(protocol tcpip.NetworkProtocolNumber, enable bool) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
doneOnce := false
for id, nic := range s.nics {
if err := nic.setForwarding(protocol, enable); err != nil {
// Expect forwarding to be settable on all interfaces if it was set on
// one.
if doneOnce {
panic(fmt.Sprintf("nic(id=%d).setForwarding(%d, %t): %s", id, protocol, enable, err))
}
return err
}
doneOnce = true
}
if enable {
s.defaultForwardingEnabled[protocol] = struct{}{}
} else {
delete(s.defaultForwardingEnabled, protocol)
}
return nil
}
// PortRange returns the UDP and TCP inclusive range of ephemeral ports used in
// both IPv4 and IPv6.
func (s *Stack) PortRange() (uint16, uint16) {
return s.PortManager.PortRange()
}
// SetPortRange sets the UDP and TCP IPv4 and IPv6 ephemeral port range
// (inclusive).
func (s *Stack) SetPortRange(start uint16, end uint16) tcpip.Error {
return s.PortManager.SetPortRange(start, end)
}
// SetRouteTable assigns the route table to be used by this stack. It
// specifies which NIC to use for given destination address ranges.
//
// This method takes ownership of the table.
func (s *Stack) SetRouteTable(table []tcpip.Route) {
s.route.mu.Lock()
defer s.route.mu.Unlock()
s.route.mu.table = table
}
// GetRouteTable returns the route table which is currently in use.
func (s *Stack) GetRouteTable() []tcpip.Route {
s.route.mu.RLock()
defer s.route.mu.RUnlock()
return append([]tcpip.Route(nil), s.route.mu.table...)
}
// AddRoute appends a route to the route table.
func (s *Stack) AddRoute(route tcpip.Route) {
s.route.mu.Lock()
defer s.route.mu.Unlock()
s.route.mu.table = append(s.route.mu.table, route)
}
// RemoveRoutes removes matching routes from the route table.
func (s *Stack) RemoveRoutes(match func(tcpip.Route) bool) {
s.route.mu.Lock()
defer s.route.mu.Unlock()
var filteredRoutes []tcpip.Route
for _, route := range s.route.mu.table {
if !match(route) {
filteredRoutes = append(filteredRoutes, route)
}
}
s.route.mu.table = filteredRoutes
}
// NewEndpoint creates a new transport layer endpoint of the given protocol.
func (s *Stack) NewEndpoint(transport tcpip.TransportProtocolNumber, network tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
t, ok := s.transportProtocols[transport]
if !ok {
return nil, &tcpip.ErrUnknownProtocol{}
}
return t.proto.NewEndpoint(network, waiterQueue)
}
// NewRawEndpoint creates a new raw transport layer endpoint of the given
// protocol. Raw endpoints receive all traffic for a given protocol regardless
// of address.
func (s *Stack) NewRawEndpoint(transport tcpip.TransportProtocolNumber, network tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue, associated bool) (tcpip.Endpoint, tcpip.Error) {
if s.rawFactory == nil {
return nil, &tcpip.ErrNotPermitted{}
}
if !associated {
return s.rawFactory.NewUnassociatedEndpoint(s, network, transport, waiterQueue)
}
t, ok := s.transportProtocols[transport]
if !ok {
return nil, &tcpip.ErrUnknownProtocol{}
}
return t.proto.NewRawEndpoint(network, waiterQueue)
}
// NewPacketEndpoint creates a new packet endpoint listening for the given
// netProto.
func (s *Stack) NewPacketEndpoint(cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
if s.rawFactory == nil {
return nil, &tcpip.ErrNotPermitted{}
}
return s.rawFactory.NewPacketEndpoint(s, cooked, netProto, waiterQueue)
}
// NICContext is an opaque pointer used to store client-supplied NIC metadata.
type NICContext interface{}
// NICOptions specifies the configuration of a NIC as it is being created.
// The zero value creates an enabled, unnamed NIC.
type NICOptions struct {
// Name specifies the name of the NIC.
Name string
// Disabled specifies whether to avoid calling Attach on the passed
// LinkEndpoint.
Disabled bool
// Context specifies user-defined data that will be returned in stack.NICInfo
// for the NIC. Clients of this library can use it to add metadata that
// should be tracked alongside a NIC, to avoid having to keep a
// map[tcpip.NICID]metadata mirroring stack.Stack's nic map.
Context NICContext
// QDisc is the queue discipline to use for this NIC.
QDisc QueueingDiscipline
}
// CreateNICWithOptions creates a NIC with the provided id, LinkEndpoint, and
// NICOptions. See the documentation on type NICOptions for details on how
// NICs can be configured.
//
// LinkEndpoint.Attach will be called to bind ep with a NetworkDispatcher.
func (s *Stack) CreateNICWithOptions(id tcpip.NICID, ep LinkEndpoint, opts NICOptions) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
// Make sure id is unique.
if _, ok := s.nics[id]; ok {
return &tcpip.ErrDuplicateNICID{}
}
// Make sure name is unique, unless unnamed.
if opts.Name != "" {
for _, n := range s.nics {
if n.Name() == opts.Name {
return &tcpip.ErrDuplicateNICID{}
}
}
}
n := newNIC(s, id, ep, opts)
for proto := range s.defaultForwardingEnabled {
if err := n.setForwarding(proto, true); err != nil {
panic(fmt.Sprintf("newNIC(%d, ...).setForwarding(%d, true): %s", id, proto, err))
}
}
s.nics[id] = n
if !opts.Disabled {
return n.enable()
}
return nil
}
// CreateNIC creates a NIC with the provided id and LinkEndpoint and calls
// LinkEndpoint.Attach to bind ep with a NetworkDispatcher.
func (s *Stack) CreateNIC(id tcpip.NICID, ep LinkEndpoint) tcpip.Error {
return s.CreateNICWithOptions(id, ep, NICOptions{})
}
// GetLinkEndpointByName gets the link endpoint specified by name.
func (s *Stack) GetLinkEndpointByName(name string) LinkEndpoint {
s.mu.RLock()
defer s.mu.RUnlock()
for _, nic := range s.nics {
if nic.Name() == name {
linkEP, ok := nic.NetworkLinkEndpoint.(LinkEndpoint)
if !ok {
panic(fmt.Sprintf("unexpected NetworkLinkEndpoint(%#v) is not a LinkEndpoint", nic.NetworkLinkEndpoint))
}
return linkEP
}
}
return nil
}
// EnableNIC enables the given NIC so that the link-layer endpoint can start
// delivering packets to it.
func (s *Stack) EnableNIC(id tcpip.NICID) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.enable()
}
// DisableNIC disables the given NIC.
func (s *Stack) DisableNIC(id tcpip.NICID) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
nic.disable()
return nil
}
// CheckNIC checks if a NIC is usable.
func (s *Stack) CheckNIC(id tcpip.NICID) bool {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return false
}
return nic.Enabled()
}
// RemoveNIC removes NIC and all related routes from the network stack.
func (s *Stack) RemoveNIC(id tcpip.NICID) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
return s.removeNICLocked(id)
}
// removeNICLocked removes NIC and all related routes from the network stack.
//
// s.mu must be locked.
func (s *Stack) removeNICLocked(id tcpip.NICID) tcpip.Error {
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
if nic.IsLoopback() {
return &tcpip.ErrNotSupported{}
}
delete(s.nics, id)
// Remove routes in-place. n tracks the number of routes written.
s.route.mu.Lock()
n := 0
for i, r := range s.route.mu.table {
s.route.mu.table[i] = tcpip.Route{}
if r.NIC != id {
// Keep this route.
s.route.mu.table[n] = r
n++
}
}
s.route.mu.table = s.route.mu.table[:n]
s.route.mu.Unlock()
return nic.remove()
}
// NICInfo captures the name and addresses assigned to a NIC.
type NICInfo struct {
Name string
LinkAddress tcpip.LinkAddress
ProtocolAddresses []tcpip.ProtocolAddress
// Flags indicate the state of the NIC.
Flags NICStateFlags
// MTU is the maximum transmission unit.
MTU uint32
Stats tcpip.NICStats
// NetworkStats holds the stats of each NetworkEndpoint bound to the NIC.
NetworkStats map[tcpip.NetworkProtocolNumber]NetworkEndpointStats
// Context is user-supplied data optionally supplied in CreateNICWithOptions.
// See type NICOptions for more details.
Context NICContext
// ARPHardwareType holds the ARP Hardware type of the NIC. This is the
// value sent in haType field of an ARP Request sent by this NIC and the
// value expected in the haType field of an ARP response.
ARPHardwareType header.ARPHardwareType
// Forwarding holds the forwarding status for each network endpoint that
// supports forwarding.
Forwarding map[tcpip.NetworkProtocolNumber]bool
}
// HasNIC returns true if the NICID is defined in the stack.
func (s *Stack) HasNIC(id tcpip.NICID) bool {
s.mu.RLock()
_, ok := s.nics[id]
s.mu.RUnlock()
return ok
}
// NICInfo returns a map of NICIDs to their associated information.
func (s *Stack) NICInfo() map[tcpip.NICID]NICInfo {
s.mu.RLock()
defer s.mu.RUnlock()
nics := make(map[tcpip.NICID]NICInfo)
for id, nic := range s.nics {
flags := NICStateFlags{
Up: true, // Netstack interfaces are always up.
Running: nic.Enabled(),
Promiscuous: nic.Promiscuous(),
Loopback: nic.IsLoopback(),
}
netStats := make(map[tcpip.NetworkProtocolNumber]NetworkEndpointStats)
for proto, netEP := range nic.networkEndpoints {
netStats[proto] = netEP.Stats()
}
info := NICInfo{
Name: nic.name,
LinkAddress: nic.NetworkLinkEndpoint.LinkAddress(),
ProtocolAddresses: nic.primaryAddresses(),
Flags: flags,
MTU: nic.NetworkLinkEndpoint.MTU(),
Stats: nic.stats.local,
NetworkStats: netStats,
Context: nic.context,
ARPHardwareType: nic.NetworkLinkEndpoint.ARPHardwareType(),
Forwarding: make(map[tcpip.NetworkProtocolNumber]bool),
}
for proto := range s.networkProtocols {
switch forwarding, err := nic.forwarding(proto); err.(type) {
case nil:
info.Forwarding[proto] = forwarding
case *tcpip.ErrUnknownProtocol:
panic(fmt.Sprintf("expected network protocol %d to be available on NIC %d", proto, nic.ID()))
case *tcpip.ErrNotSupported:
// Not all network protocols support forwarding.
default:
panic(fmt.Sprintf("nic(id=%d).forwarding(%d): %s", nic.ID(), proto, err))
}
}
nics[id] = info
}
return nics
}
// NICStateFlags holds information about the state of an NIC.
type NICStateFlags struct {
// Up indicates whether the interface is running.
Up bool
// Running indicates whether resources are allocated.
Running bool
// Promiscuous indicates whether the interface is in promiscuous mode.
Promiscuous bool
// Loopback indicates whether the interface is a loopback.
Loopback bool
}
// AddProtocolAddress adds an address to the specified NIC, possibly with extra
// properties.
func (s *Stack) AddProtocolAddress(id tcpip.NICID, protocolAddress tcpip.ProtocolAddress, properties AddressProperties) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.addAddress(protocolAddress, properties)
}
// RemoveAddress removes an existing network-layer address from the specified
// NIC.
func (s *Stack) RemoveAddress(id tcpip.NICID, addr tcpip.Address) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[id]; ok {
return nic.removeAddress(addr)
}
return &tcpip.ErrUnknownNICID{}
}
// AllAddresses returns a map of NICIDs to their protocol addresses (primary
// and non-primary).
func (s *Stack) AllAddresses() map[tcpip.NICID][]tcpip.ProtocolAddress {
s.mu.RLock()
defer s.mu.RUnlock()
nics := make(map[tcpip.NICID][]tcpip.ProtocolAddress)
for id, nic := range s.nics {
nics[id] = nic.allPermanentAddresses()
}
return nics
}
// GetMainNICAddress returns the first non-deprecated primary address and prefix
// for the given NIC and protocol. If no non-deprecated primary addresses exist,
// a deprecated address will be returned. If no deprecated addresses exist, the
// zero value will be returned.
func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return tcpip.AddressWithPrefix{}, &tcpip.ErrUnknownNICID{}
}
return nic.PrimaryAddress(protocol)
}
func (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint {
if len(localAddr) == 0 {
return nic.primaryEndpoint(netProto, remoteAddr)
}
return nic.findEndpoint(netProto, localAddr, CanBePrimaryEndpoint)
}
// findLocalRouteFromNICRLocked is like findLocalRouteRLocked but finds a route
// from the specified NIC.
//
// Precondition: s.mu must be read locked.
func (s *Stack) findLocalRouteFromNICRLocked(localAddressNIC *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) *Route {
localAddressEndpoint := localAddressNIC.getAddressOrCreateTempInner(netProto, localAddr, false /* createTemp */, NeverPrimaryEndpoint)
if localAddressEndpoint == nil {
return nil
}
var outgoingNIC *nic
// Prefer a local route to the same interface as the local address.
if localAddressNIC.hasAddress(netProto, remoteAddr) {
outgoingNIC = localAddressNIC
}
// If the remote address isn't owned by the local address's NIC, check all
// NICs.
if outgoingNIC == nil {
for _, nic := range s.nics {
if nic.hasAddress(netProto, remoteAddr) {
outgoingNIC = nic
break
}
}
}
// If the remote address is not owned by the stack, we can't return a local
// route.
if outgoingNIC == nil {
localAddressEndpoint.DecRef()
return nil
}
r := makeLocalRoute(
netProto,
localAddr,
remoteAddr,
outgoingNIC,
localAddressNIC,
localAddressEndpoint,
)
if r.IsOutboundBroadcast() {
r.Release()
return nil
}
return r
}
// findLocalRouteRLocked returns a local route.
//
// A local route is a route to some remote address which the stack owns. That
// is, a local route is a route where packets never have to leave the stack.
//
// Precondition: s.mu must be read locked.
func (s *Stack) findLocalRouteRLocked(localAddressNICID tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) *Route {
if len(localAddr) == 0 {
localAddr = remoteAddr
}
if localAddressNICID == 0 {
for _, localAddressNIC := range s.nics {
if r := s.findLocalRouteFromNICRLocked(localAddressNIC, localAddr, remoteAddr, netProto); r != nil {
return r
}
}
return nil
}
if localAddressNIC, ok := s.nics[localAddressNICID]; ok {
return s.findLocalRouteFromNICRLocked(localAddressNIC, localAddr, remoteAddr, netProto)
}
return nil
}
// HandleLocal returns true if non-loopback interfaces are allowed to loop packets.
func (s *Stack) HandleLocal() bool {
return s.handleLocal
}
func isNICForwarding(nic *nic, proto tcpip.NetworkProtocolNumber) bool {
switch forwarding, err := nic.forwarding(proto); err.(type) {
case nil:
return forwarding
case *tcpip.ErrUnknownProtocol:
panic(fmt.Sprintf("expected network protocol %d to be available on NIC %d", proto, nic.ID()))
case *tcpip.ErrNotSupported:
// Not all network protocols support forwarding.
return false
default:
panic(fmt.Sprintf("nic(id=%d).forwarding(%d): %s", nic.ID(), proto, err))
}
}
// FindRoute creates a route to the given destination address, leaving through
// the given NIC and local address (if provided).
//
// If a NIC is not specified, the returned route will leave through the same
// NIC as the NIC that has the local address assigned when forwarding is
// disabled. If forwarding is enabled and the NIC is unspecified, the route may
// leave through any interface unless the route is link-local.
//
// If no local address is provided, the stack will select a local address. If no
// remote address is provided, the stack wil use a remote address equal to the
// local address.
func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool) (*Route, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
isLinkLocal := header.IsV6LinkLocalUnicastAddress(remoteAddr) || header.IsV6LinkLocalMulticastAddress(remoteAddr)
isLocalBroadcast := remoteAddr == header.IPv4Broadcast
isMulticast := header.IsV4MulticastAddress(remoteAddr) || header.IsV6MulticastAddress(remoteAddr)
isLoopback := header.IsV4LoopbackAddress(remoteAddr) || header.IsV6LoopbackAddress(remoteAddr)
needRoute := !(isLocalBroadcast || isMulticast || isLinkLocal || isLoopback)
if s.handleLocal && !isMulticast && !isLocalBroadcast {
if r := s.findLocalRouteRLocked(id, localAddr, remoteAddr, netProto); r != nil {
return r, nil
}
}
// If the interface is specified and we do not need a route, return a route
// through the interface if the interface is valid and enabled.
if id != 0 && !needRoute {
if nic, ok := s.nics[id]; ok && nic.Enabled() {
if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil {
return makeRoute(
netProto,
"", /* gateway */
localAddr,
remoteAddr,
nic, /* outboundNIC */
nic, /* localAddressNIC*/
addressEndpoint,
s.handleLocal,
multicastLoop,
), nil
}
}
if isLoopback {
return nil, &tcpip.ErrBadLocalAddress{}
}
return nil, &tcpip.ErrNetworkUnreachable{}
}
onlyGlobalAddresses := !header.IsV6LinkLocalUnicastAddress(localAddr) && !isLinkLocal
// Find a route to the remote with the route table.
var chosenRoute tcpip.Route
if r := func() *Route {
s.route.mu.RLock()
defer s.route.mu.RUnlock()
for _, route := range s.route.mu.table {
if len(remoteAddr) != 0 && !route.Destination.Contains(remoteAddr) {
continue
}
nic, ok := s.nics[route.NIC]
if !ok || !nic.Enabled() {
continue
}
if id == 0 || id == route.NIC {
if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil {
var gateway tcpip.Address
if needRoute {
gateway = route.Gateway
}
r := constructAndValidateRoute(netProto, addressEndpoint, nic /* outgoingNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop)
if r == nil {
panic(fmt.Sprintf("non-forwarding route validation failed with route table entry = %#v, id = %d, localAddr = %s, remoteAddr = %s", route, id, localAddr, remoteAddr))
}
return r
}
}
// If the stack has forwarding enabled and we haven't found a valid route
// to the remote address yet, keep track of the first valid route. We
// keep iterating because we prefer routes that let us use a local
// address that is assigned to the outgoing interface. There is no
// requirement to do this from any RFC but simply a choice made to better
// follow a strong host model which the netstack follows at the time of
// writing.
if onlyGlobalAddresses && chosenRoute == (tcpip.Route{}) && isNICForwarding(nic, netProto) {
chosenRoute = route
}
}
return nil
}(); r != nil {
return r, nil
}
if chosenRoute != (tcpip.Route{}) {
// At this point we know the stack has forwarding enabled since chosenRoute is
// only set when forwarding is enabled.
nic, ok := s.nics[chosenRoute.NIC]
if !ok {
// If the route's NIC was invalid, we should not have chosen the route.
panic(fmt.Sprintf("chosen route must have a valid NIC with ID = %d", chosenRoute.NIC))
}
var gateway tcpip.Address
if needRoute {
gateway = chosenRoute.Gateway
}
// Use the specified NIC to get the local address endpoint.
if id != 0 {
if aNIC, ok := s.nics[id]; ok {
if addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto); addressEndpoint != nil {
if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil {
return r, nil
}
}
}
return nil, &tcpip.ErrNoRoute{}
}
if id == 0 {
// If an interface is not specified, try to find a NIC that holds the local
// address endpoint to construct a route.
for _, aNIC := range s.nics {
addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto)
if addressEndpoint == nil {
continue
}
if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil {
return r, nil
}
}
}
}
if needRoute {
return nil, &tcpip.ErrNoRoute{}
}
if header.IsV6LoopbackAddress(remoteAddr) {
return nil, &tcpip.ErrBadLocalAddress{}
}
return nil, &tcpip.ErrNetworkUnreachable{}
}
// CheckNetworkProtocol checks if a given network protocol is enabled in the
// stack.
func (s *Stack) CheckNetworkProtocol(protocol tcpip.NetworkProtocolNumber) bool {
_, ok := s.networkProtocols[protocol]
return ok
}
// CheckDuplicateAddress performs duplicate address detection for the address on
// the specified interface.
func (s *Stack) CheckDuplicateAddress(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address, h DADCompletionHandler) (DADCheckAddressDisposition, tcpip.Error) {
nic, ok := s.nics[nicID]
if !ok {
return 0, &tcpip.ErrUnknownNICID{}
}
return nic.checkDuplicateAddress(protocol, addr, h)
}
// CheckLocalAddress determines if the given local address exists, and if it
// does, returns the id of the NIC it's bound to. Returns 0 if the address
// does not exist.
func (s *Stack) CheckLocalAddress(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.NICID {
s.mu.RLock()
defer s.mu.RUnlock()
// If a NIC is specified, we try to find the address there only.
if nicID != 0 {
nic, ok := s.nics[nicID]
if !ok {
return 0
}
if nic.CheckLocalAddress(protocol, addr) {
return nic.id
}
return 0
}
// Go through all the NICs.
for _, nic := range s.nics {
if nic.CheckLocalAddress(protocol, addr) {
return nic.id
}
}
return 0
}
// SetPromiscuousMode enables or disables promiscuous mode in the given NIC.
func (s *Stack) SetPromiscuousMode(nicID tcpip.NICID, enable bool) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[nicID]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
nic.setPromiscuousMode(enable)
return nil
}
// SetSpoofing enables or disables address spoofing in the given NIC, allowing
// endpoints to bind to any address in the NIC.
func (s *Stack) SetSpoofing(nicID tcpip.NICID, enable bool) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[nicID]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
nic.setSpoofing(enable)
return nil
}
// LinkResolutionResult is the result of a link address resolution attempt.
type LinkResolutionResult struct {
LinkAddress tcpip.LinkAddress
Err tcpip.Error
}
// GetLinkAddress finds the link address corresponding to a network address.
//
// Returns ErrNotSupported if the stack is not configured with a link address
// resolver for the specified network protocol.
//
// Returns ErrWouldBlock if the link address is not readily available, along
// with a notification channel for the caller to block on. Triggers address
// resolution asynchronously.
//
// onResolve will be called either immediately, if resolution is not required,
// or when address resolution is complete, with the resolved link address and
// whether resolution succeeded.
//
// If specified, the local address must be an address local to the interface
// the neighbor cache belongs to. The local address is the source address of
// a packet prompting NUD/link address resolution.
func (s *Stack) GetLinkAddress(nicID tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, onResolve func(LinkResolutionResult)) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.getLinkAddress(addr, localAddr, protocol, onResolve)
}
// Neighbors returns all IP to MAC address associations.
func (s *Stack) Neighbors(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber) ([]NeighborEntry, tcpip.Error) {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return nil, &tcpip.ErrUnknownNICID{}
}
return nic.neighbors(protocol)
}
// AddStaticNeighbor statically associates an IP address to a MAC address.
func (s *Stack) AddStaticNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address, linkAddr tcpip.LinkAddress) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.addStaticNeighbor(addr, protocol, linkAddr)
}
// RemoveNeighbor removes an IP to MAC address association previously created
// either automically or by AddStaticNeighbor. Returns ErrBadAddress if there
// is no association with the provided address.
func (s *Stack) RemoveNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.removeNeighbor(protocol, addr)
}
// ClearNeighbors removes all IP to MAC address associations.
func (s *Stack) ClearNeighbors(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.clearNeighbors(protocol)
}
// RegisterTransportEndpoint registers the given endpoint with the stack
// transport dispatcher. Received packets that match the provided id will be
// delivered to the given endpoint; specifying a nic is optional, but
// nic-specific IDs have precedence over global ones.
func (s *Stack) RegisterTransportEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) tcpip.Error {
return s.demux.registerEndpoint(netProtos, protocol, id, ep, flags, bindToDevice)
}
// CheckRegisterTransportEndpoint checks if an endpoint can be registered with
// the stack transport dispatcher.
func (s *Stack) CheckRegisterTransportEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, flags ports.Flags, bindToDevice tcpip.NICID) tcpip.Error {
return s.demux.checkEndpoint(netProtos, protocol, id, flags, bindToDevice)
}
// UnregisterTransportEndpoint removes the endpoint with the given id from the
// stack transport dispatcher.
func (s *Stack) UnregisterTransportEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) {
s.demux.unregisterEndpoint(netProtos, protocol, id, ep, flags, bindToDevice)
}
// StartTransportEndpointCleanup removes the endpoint with the given id from
// the stack transport dispatcher. It also transitions it to the cleanup stage.
func (s *Stack) StartTransportEndpointCleanup(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) {
s.cleanupEndpointsMu.Lock()
s.cleanupEndpoints[ep] = struct{}{}
s.cleanupEndpointsMu.Unlock()
s.demux.unregisterEndpoint(netProtos, protocol, id, ep, flags, bindToDevice)
}
// CompleteTransportEndpointCleanup removes the endpoint from the cleanup
// stage.
func (s *Stack) CompleteTransportEndpointCleanup(ep TransportEndpoint) {
s.cleanupEndpointsMu.Lock()
delete(s.cleanupEndpoints, ep)
s.cleanupEndpointsMu.Unlock()
}
// FindTransportEndpoint finds an endpoint that most closely matches the provided
// id. If no endpoint is found it returns nil.
func (s *Stack) FindTransportEndpoint(netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, id TransportEndpointID, nicID tcpip.NICID) TransportEndpoint {
return s.demux.findTransportEndpoint(netProto, transProto, id, nicID)
}
// RegisterRawTransportEndpoint registers the given endpoint with the stack
// transport dispatcher. Received packets that match the provided transport
// protocol will be delivered to the given endpoint.
func (s *Stack) RegisterRawTransportEndpoint(netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, ep RawTransportEndpoint) tcpip.Error {
return s.demux.registerRawEndpoint(netProto, transProto, ep)
}
// UnregisterRawTransportEndpoint removes the endpoint for the transport
// protocol from the stack transport dispatcher.
func (s *Stack) UnregisterRawTransportEndpoint(netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, ep RawTransportEndpoint) {
s.demux.unregisterRawEndpoint(netProto, transProto, ep)
}
// RegisterRestoredEndpoint records e as an endpoint that has been restored on
// this stack.
func (s *Stack) RegisterRestoredEndpoint(e ResumableEndpoint) {
s.mu.Lock()
s.resumableEndpoints = append(s.resumableEndpoints, e)
s.mu.Unlock()
}
// RegisteredEndpoints returns all endpoints which are currently registered.
func (s *Stack) RegisteredEndpoints() []TransportEndpoint {
s.mu.Lock()
defer s.mu.Unlock()
var es []TransportEndpoint
for _, e := range s.demux.protocol {
es = append(es, e.transportEndpoints()...)
}
return es
}
// CleanupEndpoints returns endpoints currently in the cleanup state.
func (s *Stack) CleanupEndpoints() []TransportEndpoint {
s.cleanupEndpointsMu.Lock()
es := make([]TransportEndpoint, 0, len(s.cleanupEndpoints))
for e := range s.cleanupEndpoints {
es = append(es, e)
}
s.cleanupEndpointsMu.Unlock()
return es
}
// RestoreCleanupEndpoints adds endpoints to cleanup tracking. This is useful
// for restoring a stack after a save.
func (s *Stack) RestoreCleanupEndpoints(es []TransportEndpoint) {
s.cleanupEndpointsMu.Lock()
for _, e := range es {
s.cleanupEndpoints[e] = struct{}{}
}
s.cleanupEndpointsMu.Unlock()
}
// Close closes all currently registered transport endpoints.
//
// Endpoints created or modified during this call may not get closed.
func (s *Stack) Close() {
for _, e := range s.RegisteredEndpoints() {
e.Abort()
}
for _, p := range s.transportProtocols {
p.proto.Close()
}
for _, p := range s.networkProtocols {
p.Close()
}
}
// Wait waits for all transport and link endpoints to halt their worker
// goroutines.
//
// Endpoints created or modified during this call may not get waited on.
//
// Note that link endpoints must be stopped via an implementation specific
// mechanism.
func (s *Stack) Wait() {
for _, e := range s.RegisteredEndpoints() {
e.Wait()
}
for _, e := range s.CleanupEndpoints() {
e.Wait()
}
for _, p := range s.transportProtocols {
p.proto.Wait()
}
for _, p := range s.networkProtocols {
p.Wait()
}
s.mu.RLock()
defer s.mu.RUnlock()
for _, n := range s.nics {
n.NetworkLinkEndpoint.Wait()
}
}
// Resume restarts the stack after a restore. This must be called after the
// entire system has been restored.
func (s *Stack) Resume() {
// ResumableEndpoint.Resume() may call other methods on s, so we can't hold
// s.mu while resuming the endpoints.
s.mu.Lock()
eps := s.resumableEndpoints
s.resumableEndpoints = nil
s.mu.Unlock()
for _, e := range eps {
e.Resume(s)
}
}
// RegisterPacketEndpoint registers ep with the stack, causing it to receive
// all traffic of the specified netProto on the given NIC. If nicID is 0, it
// receives traffic from every NIC.
func (s *Stack) RegisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
// If no NIC is specified, capture on all devices.
if nicID == 0 {
// Register with each NIC.
for _, nic := range s.nics {
if err := nic.registerPacketEndpoint(netProto, ep); err != nil {
s.unregisterPacketEndpointLocked(0, netProto, ep)
return err
}
}
return nil
}
// Capture on a specific device.
nic, ok := s.nics[nicID]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
if err := nic.registerPacketEndpoint(netProto, ep); err != nil {
return err
}
return nil
}
// UnregisterPacketEndpoint unregisters ep for packets of the specified
// netProto from the specified NIC. If nicID is 0, ep is unregistered from all
// NICs.
func (s *Stack) UnregisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) {
s.mu.Lock()
defer s.mu.Unlock()
s.unregisterPacketEndpointLocked(nicID, netProto, ep)
}
func (s *Stack) unregisterPacketEndpointLocked(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) {
// If no NIC is specified, unregister on all devices.
if nicID == 0 {
// Unregister with each NIC.
for _, nic := range s.nics {
nic.unregisterPacketEndpoint(netProto, ep)
}
return
}
// Unregister in a single device.
nic, ok := s.nics[nicID]
if !ok {
return
}
nic.unregisterPacketEndpoint(netProto, ep)
}
// WritePacketToRemote writes a payload on the specified NIC using the provided
// network protocol and remote link address.
func (s *Stack) WritePacketToRemote(nicID tcpip.NICID, remote tcpip.LinkAddress, netProto tcpip.NetworkProtocolNumber, payload buffer.VectorisedView) tcpip.Error {
s.mu.Lock()
nic, ok := s.nics[nicID]
s.mu.Unlock()
if !ok {
return &tcpip.ErrUnknownDevice{}
}
pkt := NewPacketBuffer(PacketBufferOptions{
ReserveHeaderBytes: int(nic.MaxHeaderLength()),
Data: payload,
})
defer pkt.DecRef()
pkt.NetworkProtocolNumber = netProto
return nic.WritePacketToRemote(remote, netProto, pkt)
}
// WriteRawPacket writes data directly to the specified NIC without adding any
// headers.
func (s *Stack) WriteRawPacket(nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber, payload buffer.VectorisedView) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
pkt := NewPacketBuffer(PacketBufferOptions{
Data: payload,
})
defer pkt.DecRef()
pkt.NetworkProtocolNumber = proto
return nic.WriteRawPacket(pkt)
}
// NetworkProtocolInstance returns the protocol instance in the stack for the
// specified network protocol. This method is public for protocol implementers
// and tests to use.
func (s *Stack) NetworkProtocolInstance(num tcpip.NetworkProtocolNumber) NetworkProtocol {
if p, ok := s.networkProtocols[num]; ok {
return p
}
return nil
}
// TransportProtocolInstance returns the protocol instance in the stack for the
// specified transport protocol. This method is public for protocol implementers
// and tests to use.
func (s *Stack) TransportProtocolInstance(num tcpip.TransportProtocolNumber) TransportProtocol {
if pState, ok := s.transportProtocols[num]; ok {
return pState.proto
}
return nil
}
// AddTCPProbe installs a probe function that will be invoked on every segment
// received by a given TCP endpoint. The probe function is passed a copy of the
// TCP endpoint state before and after processing of the segment.
//
// NOTE: TCPProbe is added only to endpoints created after this call. Endpoints
// created prior to this call will not call the probe function.
//
// Further, installing two different probes back to back can result in some
// endpoints calling the first one and some the second one. There is no
// guarantee provided on which probe will be invoked. Ideally this should only
// be called once per stack.
func (s *Stack) AddTCPProbe(probe TCPProbeFunc) {
s.tcpProbeFunc.Store(probe)
}
// GetTCPProbe returns the TCPProbeFunc if installed with AddTCPProbe, nil
// otherwise.
func (s *Stack) GetTCPProbe() TCPProbeFunc {
p := s.tcpProbeFunc.Load()
if p == nil {
return nil
}
return p.(TCPProbeFunc)
}
// RemoveTCPProbe removes an installed TCP probe.
//
// NOTE: This only ensures that endpoints created after this call do not
// have a probe attached. Endpoints already created will continue to invoke
// TCP probe.
func (s *Stack) RemoveTCPProbe() {
// This must be TCPProbeFunc(nil) because atomic.Value.Store(nil) panics.
s.tcpProbeFunc.Store(TCPProbeFunc(nil))
}
// JoinGroup joins the given multicast group on the given NIC.
func (s *Stack) JoinGroup(protocol tcpip.NetworkProtocolNumber, nicID tcpip.NICID, multicastAddr tcpip.Address) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[nicID]; ok {
return nic.joinGroup(protocol, multicastAddr)
}
return &tcpip.ErrUnknownNICID{}
}
// LeaveGroup leaves the given multicast group on the given NIC.
func (s *Stack) LeaveGroup(protocol tcpip.NetworkProtocolNumber, nicID tcpip.NICID, multicastAddr tcpip.Address) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[nicID]; ok {
return nic.leaveGroup(protocol, multicastAddr)
}
return &tcpip.ErrUnknownNICID{}
}
// IsInGroup returns true if the NIC with ID nicID has joined the multicast
// group multicastAddr.
func (s *Stack) IsInGroup(nicID tcpip.NICID, multicastAddr tcpip.Address) (bool, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[nicID]; ok {
return nic.isInGroup(multicastAddr), nil
}
return false, &tcpip.ErrUnknownNICID{}
}
// IPTables returns the stack's iptables.
func (s *Stack) IPTables() *IPTables {
return s.tables
}
// ICMPLimit returns the maximum number of ICMP messages that can be sent
// in one second.
func (s *Stack) ICMPLimit() rate.Limit {
return s.icmpRateLimiter.Limit()
}
// SetICMPLimit sets the maximum number of ICMP messages that be sent
// in one second.
func (s *Stack) SetICMPLimit(newLimit rate.Limit) {
s.icmpRateLimiter.SetLimit(newLimit)
}
// ICMPBurst returns the maximum number of ICMP messages that can be sent
// in a single burst.
func (s *Stack) ICMPBurst() int {
return s.icmpRateLimiter.Burst()
}
// SetICMPBurst sets the maximum number of ICMP messages that can be sent
// in a single burst.
func (s *Stack) SetICMPBurst(burst int) {
s.icmpRateLimiter.SetBurst(burst)
}
// AllowICMPMessage returns true if we the rate limiter allows at least one
// ICMP message to be sent at this instant.
func (s *Stack) AllowICMPMessage() bool {
return s.icmpRateLimiter.Allow()
}
// GetNetworkEndpoint returns the NetworkEndpoint with the specified protocol
// number installed on the specified NIC.
func (s *Stack) GetNetworkEndpoint(nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber) (NetworkEndpoint, tcpip.Error) {
s.mu.Lock()
defer s.mu.Unlock()
nic, ok := s.nics[nicID]
if !ok {
return nil, &tcpip.ErrUnknownNICID{}
}
return nic.getNetworkEndpoint(proto), nil
}
// NUDConfigurations gets the per-interface NUD configurations.
func (s *Stack) NUDConfigurations(id tcpip.NICID, proto tcpip.NetworkProtocolNumber) (NUDConfigurations, tcpip.Error) {
s.mu.RLock()
nic, ok := s.nics[id]
s.mu.RUnlock()
if !ok {
return NUDConfigurations{}, &tcpip.ErrUnknownNICID{}
}
return nic.nudConfigs(proto)
}
// SetNUDConfigurations sets the per-interface NUD configurations.
//
// Note, if c contains invalid NUD configuration values, it will be fixed to
// use default values for the erroneous values.
func (s *Stack) SetNUDConfigurations(id tcpip.NICID, proto tcpip.NetworkProtocolNumber, c NUDConfigurations) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[id]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.setNUDConfigs(proto, c)
}
// Seed returns a 32 bit value that can be used as a seed value.
//
// NOTE: The seed is generated once during stack initialization only.
func (s *Stack) Seed() uint32 {
return s.seed
}
// Rand returns a reference to a pseudo random generator that can be used
// to generate random numbers as required.
func (s *Stack) Rand() *rand.Rand {
return s.randomGenerator
}
// SecureRNG returns the stack's cryptographically secure random number
// generator.
func (s *Stack) SecureRNG() io.Reader {
return s.secureRNG
}
// FindNICNameFromID returns the name of the NIC for the given NICID.
func (s *Stack) FindNICNameFromID(id tcpip.NICID) string {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return ""
}
return nic.Name()
}
// ParseResult indicates the result of a parsing attempt.
type ParseResult int
const (
// ParsedOK indicates that a packet was successfully parsed.
ParsedOK ParseResult = iota
// UnknownTransportProtocol indicates that the transport protocol is unknown.
UnknownTransportProtocol
// TransportLayerParseError indicates that the transport packet was not
// successfully parsed.
TransportLayerParseError
)
// ParsePacketBufferTransport parses the provided packet buffer's transport
// header.
func (s *Stack) ParsePacketBufferTransport(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) ParseResult {
pkt.TransportProtocolNumber = protocol
// Parse the transport header if present.
state, ok := s.transportProtocols[protocol]
if !ok {
return UnknownTransportProtocol
}
if !state.proto.Parse(pkt) {
return TransportLayerParseError
}
return ParsedOK
}
// networkProtocolNumbers returns the network protocol numbers the stack is
// configured with.
func (s *Stack) networkProtocolNumbers() []tcpip.NetworkProtocolNumber {
protos := make([]tcpip.NetworkProtocolNumber, 0, len(s.networkProtocols))
for p := range s.networkProtocols {
protos = append(protos, p)
}
return protos
}
func isSubnetBroadcastOnNIC(nic *nic, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) bool {
addressEndpoint := nic.getAddressOrCreateTempInner(protocol, addr, false /* createTemp */, NeverPrimaryEndpoint)
if addressEndpoint == nil {
return false
}
subnet := addressEndpoint.Subnet()
addressEndpoint.DecRef()
return subnet.IsBroadcast(addr)
}
// IsSubnetBroadcast returns true if the provided address is a subnet-local
// broadcast address on the specified NIC and protocol.
//
// Returns false if the NIC is unknown or if the protocol is unknown or does
// not support addressing.
//
// If the NIC is not specified, the stack will check all NICs.
func (s *Stack) IsSubnetBroadcast(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) bool {
s.mu.RLock()
defer s.mu.RUnlock()
if nicID != 0 {
nic, ok := s.nics[nicID]
if !ok {
return false
}
return isSubnetBroadcastOnNIC(nic, protocol, addr)
}
for _, nic := range s.nics {
if isSubnetBroadcastOnNIC(nic, protocol, addr) {
return true
}
}
return false
}
// PacketEndpointWriteSupported returns true iff packet endpoints support write
// operations.
func (s *Stack) PacketEndpointWriteSupported() bool {
return s.packetEndpointWriteSupported
}
| New |
generated.go | // Code generated by github.com/swiftcarrot/gqlgen, DO NOT EDIT.
package fileupload
import (
"bytes"
"context"
"errors"
"strconv"
"sync"
"sync/atomic"
"github.com/swiftcarrot/gqlgen/example/fileupload/model"
"github.com/swiftcarrot/gqlgen/graphql"
"github.com/swiftcarrot/gqlgen/graphql/introspection"
gqlparser "github.com/vektah/gqlparser/v2"
"github.com/vektah/gqlparser/v2/ast"
)
// region ************************** generated!.gotpl **************************
// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
func | (cfg Config) graphql.ExecutableSchema {
return &executableSchema{
resolvers: cfg.Resolvers,
directives: cfg.Directives,
complexity: cfg.Complexity,
}
}
type Config struct {
Resolvers ResolverRoot
Directives DirectiveRoot
Complexity ComplexityRoot
}
type ResolverRoot interface {
Mutation() MutationResolver
Query() QueryResolver
}
type DirectiveRoot struct {
}
type ComplexityRoot struct {
File struct {
Content func(childComplexity int) int
ContentType func(childComplexity int) int
ID func(childComplexity int) int
Name func(childComplexity int) int
}
Mutation struct {
MultipleUpload func(childComplexity int, files []*graphql.Upload) int
MultipleUploadWithPayload func(childComplexity int, req []*model.UploadFile) int
SingleUpload func(childComplexity int, file graphql.Upload) int
SingleUploadWithPayload func(childComplexity int, req model.UploadFile) int
}
Query struct {
Empty func(childComplexity int) int
}
}
type MutationResolver interface {
SingleUpload(ctx context.Context, file graphql.Upload) (*model.File, error)
SingleUploadWithPayload(ctx context.Context, req model.UploadFile) (*model.File, error)
MultipleUpload(ctx context.Context, files []*graphql.Upload) ([]*model.File, error)
MultipleUploadWithPayload(ctx context.Context, req []*model.UploadFile) ([]*model.File, error)
}
type QueryResolver interface {
Empty(ctx context.Context) (string, error)
}
type executableSchema struct {
resolvers ResolverRoot
directives DirectiveRoot
complexity ComplexityRoot
}
func (e *executableSchema) Schema() *ast.Schema {
return parsedSchema
}
func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) {
ec := executionContext{nil, e}
_ = ec
switch typeName + "." + field {
case "File.content":
if e.complexity.File.Content == nil {
break
}
return e.complexity.File.Content(childComplexity), true
case "File.contentType":
if e.complexity.File.ContentType == nil {
break
}
return e.complexity.File.ContentType(childComplexity), true
case "File.id":
if e.complexity.File.ID == nil {
break
}
return e.complexity.File.ID(childComplexity), true
case "File.name":
if e.complexity.File.Name == nil {
break
}
return e.complexity.File.Name(childComplexity), true
case "Mutation.multipleUpload":
if e.complexity.Mutation.MultipleUpload == nil {
break
}
args, err := ec.field_Mutation_multipleUpload_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.MultipleUpload(childComplexity, args["files"].([]*graphql.Upload)), true
case "Mutation.multipleUploadWithPayload":
if e.complexity.Mutation.MultipleUploadWithPayload == nil {
break
}
args, err := ec.field_Mutation_multipleUploadWithPayload_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.MultipleUploadWithPayload(childComplexity, args["req"].([]*model.UploadFile)), true
case "Mutation.singleUpload":
if e.complexity.Mutation.SingleUpload == nil {
break
}
args, err := ec.field_Mutation_singleUpload_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.SingleUpload(childComplexity, args["file"].(graphql.Upload)), true
case "Mutation.singleUploadWithPayload":
if e.complexity.Mutation.SingleUploadWithPayload == nil {
break
}
args, err := ec.field_Mutation_singleUploadWithPayload_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.SingleUploadWithPayload(childComplexity, args["req"].(model.UploadFile)), true
case "Query.empty":
if e.complexity.Query.Empty == nil {
break
}
return e.complexity.Query.Empty(childComplexity), true
}
return 0, false
}
func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
rc := graphql.GetOperationContext(ctx)
ec := executionContext{rc, e}
first := true
switch rc.Operation.Operation {
case ast.Query:
return func(ctx context.Context) *graphql.Response {
if !first {
return nil
}
first = false
data := ec._Query(ctx, rc.Operation.SelectionSet)
var buf bytes.Buffer
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
}
}
case ast.Mutation:
return func(ctx context.Context) *graphql.Response {
if !first {
return nil
}
first = false
data := ec._Mutation(ctx, rc.Operation.SelectionSet)
var buf bytes.Buffer
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
}
}
default:
return graphql.OneShot(graphql.ErrorResponse(ctx, "unsupported GraphQL operation"))
}
}
type executionContext struct {
*graphql.OperationContext
*executableSchema
}
func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
return introspection.WrapSchema(parsedSchema), nil
}
func (ec *executionContext) introspectType(name string) (*introspection.Type, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil
}
var sources = []*ast.Source{
{Name: "schema.graphql", Input: `"The ` + "`" + `Upload` + "`" + ` scalar type represents a multipart file upload."
scalar Upload
"The ` + "`" + `File` + "`" + ` type, represents the response of uploading a file."
type File {
id: Int!
name: String!
content: String!
contentType: String!
}
"The ` + "`" + `UploadFile` + "`" + ` type, represents the request for uploading a file with certain payload."
input UploadFile {
id: Int!
file: Upload!
}
"The ` + "`" + `Query` + "`" + ` type, represents all of the entry points into our object graph."
type Query {
empty: String!
}
"The ` + "`" + `Mutation` + "`" + ` type, represents all updates we can make to our data."
type Mutation {
singleUpload(file: Upload!): File!
singleUploadWithPayload(req: UploadFile!): File!
multipleUpload(files: [Upload!]!): [File!]!
multipleUploadWithPayload(req: [UploadFile!]!): [File!]!
}
`, BuiltIn: false},
}
var parsedSchema = gqlparser.MustLoadSchema(sources...)
// endregion ************************** generated!.gotpl **************************
// region ***************************** args.gotpl *****************************
func (ec *executionContext) field_Mutation_multipleUploadWithPayload_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 []*model.UploadFile
if tmp, ok := rawArgs["req"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("req"))
arg0, err = ec.unmarshalNUploadFile2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐUploadFileᚄ(ctx, tmp)
if err != nil {
return nil, err
}
}
args["req"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_multipleUpload_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 []*graphql.Upload
if tmp, ok := rawArgs["files"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("files"))
arg0, err = ec.unmarshalNUpload2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUploadᚄ(ctx, tmp)
if err != nil {
return nil, err
}
}
args["files"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_singleUploadWithPayload_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 model.UploadFile
if tmp, ok := rawArgs["req"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("req"))
arg0, err = ec.unmarshalNUploadFile2githubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐUploadFile(ctx, tmp)
if err != nil {
return nil, err
}
}
args["req"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_singleUpload_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 graphql.Upload
if tmp, ok := rawArgs["file"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("file"))
arg0, err = ec.unmarshalNUpload2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx, tmp)
if err != nil {
return nil, err
}
}
args["file"] = arg0
return args, nil
}
func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalNString2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 bool
if tmp, ok := rawArgs["includeDeprecated"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
if err != nil {
return nil, err
}
}
args["includeDeprecated"] = arg0
return args, nil
}
func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 bool
if tmp, ok := rawArgs["includeDeprecated"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
if err != nil {
return nil, err
}
}
args["includeDeprecated"] = arg0
return args, nil
}
// endregion ***************************** args.gotpl *****************************
// region ************************** directives.gotpl **************************
// endregion ************************** directives.gotpl **************************
// region **************************** field.gotpl *****************************
func (ec *executionContext) _File_id(ctx context.Context, field graphql.CollectedField, obj *model.File) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "File",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _File_name(ctx context.Context, field graphql.CollectedField, obj *model.File) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "File",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _File_content(ctx context.Context, field graphql.CollectedField, obj *model.File) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "File",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Content, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _File_contentType(ctx context.Context, field graphql.CollectedField, obj *model.File) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "File",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ContentType, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_singleUpload(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_singleUpload_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().SingleUpload(rctx, args["file"].(graphql.Upload))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*model.File)
fc.Result = res
return ec.marshalNFile2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFile(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_singleUploadWithPayload(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_singleUploadWithPayload_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().SingleUploadWithPayload(rctx, args["req"].(model.UploadFile))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*model.File)
fc.Result = res
return ec.marshalNFile2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFile(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_multipleUpload(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_multipleUpload_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().MultipleUpload(rctx, args["files"].([]*graphql.Upload))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]*model.File)
fc.Result = res
return ec.marshalNFile2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFileᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_multipleUploadWithPayload(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_multipleUploadWithPayload_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().MultipleUploadWithPayload(rctx, args["req"].([]*model.UploadFile))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]*model.File)
fc.Result = res
return ec.marshalNFile2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFileᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Query_empty(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().Empty(rctx)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Query___type_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.introspectType(args["name"].(string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.introspectSchema()
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Schema)
fc.Result = res
return ec.marshalO__Schema2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Locations, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalN__DirectiveLocation2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Args, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalN__InputValue2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IsRepeatable, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IsDeprecated(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeprecationReason(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Args, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalN__InputValue2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Type, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IsDeprecated(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeprecationReason(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Type, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DefaultValue, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Types(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.QueryType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MutationType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SubscriptionType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Directives(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.Directive)
fc.Result = res
return ec.marshalN__Directive2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalN__TypeKind2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field___Type_fields_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Fields(args["includeDeprecated"].(bool)), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Field)
fc.Result = res
return ec.marshalO__Field2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Interfaces(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.PossibleTypes(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field___Type_enumValues_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.EnumValues(args["includeDeprecated"].(bool)), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.EnumValue)
fc.Result = res
return ec.marshalO__EnumValue2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.InputFields(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalO__InputValue2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OfType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
// endregion **************************** field.gotpl *****************************
// region **************************** input.gotpl *****************************
func (ec *executionContext) unmarshalInputUploadFile(ctx context.Context, obj interface{}) (model.UploadFile, error) {
var it model.UploadFile
asMap := map[string]interface{}{}
for k, v := range obj.(map[string]interface{}) {
asMap[k] = v
}
for k, v := range asMap {
switch k {
case "id":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id"))
it.ID, err = ec.unmarshalNInt2int(ctx, v)
if err != nil {
return it, err
}
case "file":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("file"))
it.File, err = ec.unmarshalNUpload2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx, v)
if err != nil {
return it, err
}
}
}
return it, nil
}
// endregion **************************** input.gotpl *****************************
// region ************************** interface.gotpl ***************************
// endregion ************************** interface.gotpl ***************************
// region **************************** object.gotpl ****************************
var fileImplementors = []string{"File"}
func (ec *executionContext) _File(ctx context.Context, sel ast.SelectionSet, obj *model.File) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, fileImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("File")
case "id":
out.Values[i] = ec._File_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "name":
out.Values[i] = ec._File_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "content":
out.Values[i] = ec._File_content(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "contentType":
out.Values[i] = ec._File_contentType(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var mutationImplementors = []string{"Mutation"}
func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, mutationImplementors)
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: "Mutation",
})
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Mutation")
case "singleUpload":
out.Values[i] = ec._Mutation_singleUpload(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
case "singleUploadWithPayload":
out.Values[i] = ec._Mutation_singleUploadWithPayload(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
case "multipleUpload":
out.Values[i] = ec._Mutation_multipleUpload(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
case "multipleUploadWithPayload":
out.Values[i] = ec._Mutation_multipleUploadWithPayload(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var queryImplementors = []string{"Query"}
func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, queryImplementors)
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: "Query",
})
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Query")
case "empty":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Query_empty(ctx, field)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "__type":
out.Values[i] = ec._Query___type(ctx, field)
case "__schema":
out.Values[i] = ec._Query___schema(ctx, field)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __DirectiveImplementors = []string{"__Directive"}
func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Directive")
case "name":
out.Values[i] = ec.___Directive_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___Directive_description(ctx, field, obj)
case "locations":
out.Values[i] = ec.___Directive_locations(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "args":
out.Values[i] = ec.___Directive_args(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "isRepeatable":
out.Values[i] = ec.___Directive_isRepeatable(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __EnumValueImplementors = []string{"__EnumValue"}
func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__EnumValue")
case "name":
out.Values[i] = ec.___EnumValue_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___EnumValue_description(ctx, field, obj)
case "isDeprecated":
out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "deprecationReason":
out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __FieldImplementors = []string{"__Field"}
func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Field")
case "name":
out.Values[i] = ec.___Field_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___Field_description(ctx, field, obj)
case "args":
out.Values[i] = ec.___Field_args(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "type":
out.Values[i] = ec.___Field_type(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "isDeprecated":
out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "deprecationReason":
out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __InputValueImplementors = []string{"__InputValue"}
func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__InputValue")
case "name":
out.Values[i] = ec.___InputValue_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___InputValue_description(ctx, field, obj)
case "type":
out.Values[i] = ec.___InputValue_type(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "defaultValue":
out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __SchemaImplementors = []string{"__Schema"}
func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Schema")
case "types":
out.Values[i] = ec.___Schema_types(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "queryType":
out.Values[i] = ec.___Schema_queryType(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "mutationType":
out.Values[i] = ec.___Schema_mutationType(ctx, field, obj)
case "subscriptionType":
out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj)
case "directives":
out.Values[i] = ec.___Schema_directives(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __TypeImplementors = []string{"__Type"}
func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Type")
case "kind":
out.Values[i] = ec.___Type_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "name":
out.Values[i] = ec.___Type_name(ctx, field, obj)
case "description":
out.Values[i] = ec.___Type_description(ctx, field, obj)
case "fields":
out.Values[i] = ec.___Type_fields(ctx, field, obj)
case "interfaces":
out.Values[i] = ec.___Type_interfaces(ctx, field, obj)
case "possibleTypes":
out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj)
case "enumValues":
out.Values[i] = ec.___Type_enumValues(ctx, field, obj)
case "inputFields":
out.Values[i] = ec.___Type_inputFields(ctx, field, obj)
case "ofType":
out.Values[i] = ec.___Type_ofType(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
// endregion **************************** object.gotpl ****************************
// region ***************************** type.gotpl *****************************
func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
res, err := graphql.UnmarshalBoolean(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
res := graphql.MarshalBoolean(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) marshalNFile2githubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFile(ctx context.Context, sel ast.SelectionSet, v model.File) graphql.Marshaler {
return ec._File(ctx, sel, &v)
}
func (ec *executionContext) marshalNFile2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFileᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.File) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNFile2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFile(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalNFile2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐFile(ctx context.Context, sel ast.SelectionSet, v *model.File) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._File(ctx, sel, v)
}
func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) {
res, err := graphql.UnmarshalInt(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler {
res := graphql.MarshalInt(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNUpload2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx context.Context, v interface{}) (graphql.Upload, error) {
res, err := graphql.UnmarshalUpload(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNUpload2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx context.Context, sel ast.SelectionSet, v graphql.Upload) graphql.Marshaler {
res := graphql.MarshalUpload(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNUpload2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUploadᚄ(ctx context.Context, v interface{}) ([]*graphql.Upload, error) {
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]*graphql.Upload, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNUpload2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalNUpload2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUploadᚄ(ctx context.Context, sel ast.SelectionSet, v []*graphql.Upload) graphql.Marshaler {
ret := make(graphql.Array, len(v))
for i := range v {
ret[i] = ec.marshalNUpload2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx, sel, v[i])
}
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) unmarshalNUpload2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx context.Context, v interface{}) (*graphql.Upload, error) {
res, err := graphql.UnmarshalUpload(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNUpload2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚐUpload(ctx context.Context, sel ast.SelectionSet, v *graphql.Upload) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := graphql.MarshalUpload(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNUploadFile2githubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐUploadFile(ctx context.Context, v interface{}) (model.UploadFile, error) {
res, err := ec.unmarshalInputUploadFile(ctx, v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) unmarshalNUploadFile2ᚕᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐUploadFileᚄ(ctx context.Context, v interface{}) ([]*model.UploadFile, error) {
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]*model.UploadFile, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNUploadFile2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐUploadFile(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) unmarshalNUploadFile2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋexampleᚋfileuploadᚋmodelᚐUploadFile(ctx context.Context, v interface{}) (*model.UploadFile, error) {
res, err := ec.unmarshalInputUploadFile(ctx, v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalN__Directive2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler {
return ec.___Directive(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Directive2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]string, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler {
return ec.___EnumValue(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Field2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler {
return ec.___Field(ctx, sel, &v)
}
func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler {
return ec.___InputValue(ctx, sel, &v)
}
func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__InputValue2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalN__Type2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler {
return ec.___Type(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Type2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec.___Type(ctx, sel, v)
}
func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalOBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
res, err := graphql.UnmarshalBoolean(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
return graphql.MarshalBoolean(v)
}
func (ec *executionContext) unmarshalOBoolean2ᚖbool(ctx context.Context, v interface{}) (*bool, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalBoolean(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast.SelectionSet, v *bool) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalBoolean(*v)
}
func (ec *executionContext) unmarshalOString2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
return graphql.MarshalString(v)
}
func (ec *executionContext) unmarshalOString2ᚖstring(ctx context.Context, v interface{}) (*string, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalString(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalString(*v)
}
func (ec *executionContext) marshalO__EnumValue2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__EnumValue2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalO__Field2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Field) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Field2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalO__InputValue2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__InputValue2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalO__Schema2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v *introspection.Schema) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec.___Schema(ctx, sel, v)
}
func (ec *executionContext) marshalO__Type2ᚕgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Type2githubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
for _, e := range ret {
if e == graphql.Null {
return graphql.Null
}
}
return ret
}
func (ec *executionContext) marshalO__Type2ᚖgithubᚗcomᚋswiftcarrotᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec.___Type(ctx, sel, v)
}
// endregion ***************************** type.gotpl *****************************
| NewExecutableSchema |
users.py | from json import loads
from tenable_io.api.base import BaseApi, BaseRequest
from tenable_io.api.models import User, UserKeys, UserList
class UsersApi(BaseApi):
def get(self, user_id):
response = self._client.get('users/%(user_id)s', {'user_id': user_id})
return User.from_json(response.text)
def list(self):
"""Return the user list.
:return: An instance of :class:`tenable_io.api.models.UserList`.
"""
response = self._client.get('users')
return UserList.from_json(response.text)
def impersonate(self, user_id):
response = self._client.post('users/%(user_id)s/impersonate', path_params={'user_id': user_id})
return loads(response.text)
def create(self, user_create):
"""Create a new user.
:param user_create: An instance of :class:`UserCreateRequest`.
:raise TenableIOApiException: When API error is encountered.
:return: The ID of the created user.
"""
response = self._client.post('users', user_create)
return loads(response.text).get('id')
def edit(self, user_id, user_edit):
"""Edit an existing user.
:param user_id: The user ID.
:param user_edit: An instance of :class:`UserEditRequest`.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.User`.
"""
response = self._client.put('users/%(user_id)s', user_edit, {'user_id': user_id})
return User.from_json(response.text)
def delete(self, user_id):
"""Delete a user.
:param user_id: The user ID.
:raise TenableIOApiException: When API error is encountered.
:return: True if successful.
"""
self._client.delete('users/%(user_id)s', {'user_id': user_id})
return True
def password(self, user_id, password):
"""Change the password for the given user.
:param user_id: The user ID.
:param password: Current password for the user.
:raise TenableIOApiException: When API error is encountered.
:return: True if successful.
"""
self._client.put('users/%(user_id)s/chpasswd', {'password': password}, {'user_id': user_id})
return True
def details(self, user_id):
"""Return details for the given user.
:param user_id: The user ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.User`
"""
response = self._client.get('users/%(user_id)s', {'user_id': user_id})
return User.from_json(response.text)
def | (self, user_id):
"""Generate the API Keys for the given user.
:param user_id: The user ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.UserKeys`
"""
response = self._client.put('users/%(user_id)s/keys', path_params={'user_id': user_id})
return UserKeys.from_json(response.text)
def enabled(self, user_id, enabled):
"""Enable or disable an user.
:param user_id: The user ID.
:param enabled: True to enable. False to Disable.
:raise TenableIOApiException: When API error is encountered.
:return: True if successful.
"""
self._client.put('users/%(user_id)s/enabled', {'enabled': enabled}, {'user_id': user_id})
return True
class UserCreateRequest(BaseRequest):
def __init__(
self,
username=None,
password=None,
permissions=None,
name=None,
email=None,
type=None
):
self.username = username
self.password = password
self.permissions = permissions
self.name = name
self.email = email
self.type = type
class UserEditRequest(BaseRequest):
def __init__(
self,
permissions=None,
name=None,
email=None
):
self.permissions = permissions
self.name = name
self.email = email
| keys |
non_string_guid.rs | use com::com_interface;
|
fn main() {} | #[com_interface(cc2d05c7-7d20-4ccb-ad75-1e7fb7c77254)]
pub trait Interface: IUnknown {}
|
classrtf_1_1_formatting.js | var classrtf_1_1_formatting =
[ | [ "operator!=", "classrtf_1_1_formatting.html#a871dd52e729506f0390d665ab3f5dd1e", null ],
[ "operator=", "classrtf_1_1_formatting.html#a41d6638afe3d7c3ae6e057f03a44ac17", null ],
[ "operator==", "classrtf_1_1_formatting.html#ab7e2d766a41c6f008d7417e74ca13068", null ],
[ "m_backgroundColor", "classrtf_1_1_formatting.html#a72fba585e402349dc22f903602932111", null ],
[ "m_font", "classrtf_1_1_formatting.html#a84ea5c5641c3b7e46cd2084fe46cd1f4", null ],
[ "m_fontColor", "classrtf_1_1_formatting.html#ad59c5977351d9240b2f52297e09fd7f8", null ],
[ "m_fontSize", "classrtf_1_1_formatting.html#a263ecd6be00cd636bbfffdc523d0bad1", null ],
[ "m_horizontalAlign", "classrtf_1_1_formatting.html#a35e2c51c16999c9a2f1d678c28baf9bf", null ],
[ "m_isBold", "classrtf_1_1_formatting.html#a7ed672340072a8e5af6073e4c8226bfc", null ],
[ "m_isItalic", "classrtf_1_1_formatting.html#a23850526d7093669cbecd9c607301339", null ],
[ "m_isOutlined", "classrtf_1_1_formatting.html#af406bbbef45d0082172968a7145c4252", null ],
[ "m_isStruckOut", "classrtf_1_1_formatting.html#aab925f1e4b6688d682fbeb5609375bd1", null ],
[ "m_isSub", "classrtf_1_1_formatting.html#ad2b5266e49fa3205ef8e4d1da64e58bd", null ],
[ "m_isSup", "classrtf_1_1_formatting.html#aa30dc62479fefcb95d9ae11add42cd00", null ],
[ "m_isUnderlined", "classrtf_1_1_formatting.html#a46f4f02c323e85f2092825c8b32aec27", null ],
[ "m_listLevel", "classrtf_1_1_formatting.html#ad08cba9978340e3de246ac6a04d071a9", null ],
[ "m_parInTable", "classrtf_1_1_formatting.html#a12dcb7ce7d64761d03782ab2e6a22017", null ],
[ "m_verticalAlign", "classrtf_1_1_formatting.html#a17a5883a2d8486353dfa7683d29c27a9", null ]
]; | [ "Formatting", "classrtf_1_1_formatting.html#a5f64baac7436e5c1e75a5525aa5c588a", null ], |
maposmatic.wsgi | # coding: utf-8
| # Copyright (C) 2009 David Mentré
# Copyright (C) 2009 Maxime Petazzoni
# Copyright (C) 2009 Thomas Petazzoni
# Copyright (C) 2009 Gaël Utard
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys
sys.path.append("/home/maposmatic/maposmatic")
sys.path.append("/home/maposmatic/ocitysmap")
os.environ["DJANGO_SETTINGS_MODULE"] = 'www.settings'
os.environ["MAPOSMATIC_LOG_FILE"] = "/home/maposmatic/maposmatic/logs/maposmatic-www.log"
os.environ["PGCONNECT_TIMEOUT"] = "1"
import django.core.handlers.wsgi
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application() | # maposmatic, the web front-end of the MapOSMatic city map generation system
# Copyright (C) 2009 David Decotigny
# Copyright (C) 2009 Frédéric Lehobey |
utils.rs | //! Helpers for writing generators
use clap::{App, Arg};
/// Gets all subcommands including child subcommands in the form of `("name", "bin_name")`.
///
/// Subcommand `rustup toolchain install` would be converted to
/// `("install", "rustup toolchain install")`.
pub fn all_subcommands(app: &App) -> Vec<(String, String)> {
let mut subcmds: Vec<_> = subcommands(app);
for sc_v in app.get_subcommands().map(all_subcommands) {
subcmds.extend(sc_v);
}
subcmds
}
/// Finds the subcommand [`clap::App`] from the given [`clap::App`] with the given path.
///
/// **NOTE:** `path` should not contain the root `bin_name`.
pub fn find_subcommand_with_path<'help, 'app>(
p: &'app App<'help>,
path: Vec<&str>,
) -> &'app App<'help> {
let mut app = p;
for sc in path {
app = app.find_subcommand(sc).unwrap();
}
app
}
/// Gets subcommands of [`clap::App`] in the form of `("name", "bin_name")`.
///
/// Subcommand `rustup toolchain install` would be converted to
/// `("install", "rustup toolchain install")`.
pub fn subcommands(p: &App) -> Vec<(String, String)> {
debug!("subcommands: name={}", p.get_name());
debug!("subcommands: Has subcommands...{:?}", p.has_subcommands());
let mut subcmds = vec![];
if !p.has_subcommands() {
return subcmds;
}
for sc in p.get_subcommands() {
let sc_bin_name = sc.get_bin_name().unwrap();
debug!(
"subcommands:iter: name={}, bin_name={}",
sc.get_name(),
sc_bin_name
);
subcmds.push((sc.get_name().to_string(), sc_bin_name.to_string()));
}
subcmds
}
/// Gets all the short options, their visible aliases and flags of a [`clap::App`].
/// Includes `h` and `V` depending on the [`clap::AppSettings`].
pub fn shorts_and_visible_aliases(p: &App) -> Vec<char> {
debug!("shorts: name={}", p.get_name());
p.get_arguments()
.filter_map(|a| {
if !a.is_positional() {
if a.get_visible_short_aliases().is_some() && a.get_short().is_some() {
let mut shorts_and_visible_aliases = a.get_visible_short_aliases().unwrap();
shorts_and_visible_aliases.push(a.get_short().unwrap());
Some(shorts_and_visible_aliases)
} else if a.get_visible_short_aliases().is_none() && a.get_short().is_some() {
Some(vec![a.get_short().unwrap()])
} else {
None
}
} else {
None
}
})
.flatten()
.collect()
}
/// Gets all the long options, their visible aliases and flags of a [`clap::App`].
/// Includes `help` and `version` depending on the [`clap::AppSettings`].
pub fn longs_and_visible_aliases(p: &App) -> Vec<String> {
debug!("longs: name={}", p.get_name());
p.get_arguments()
.filter_map(|a| {
if !a.is_positional() {
if a.get_visible_aliases().is_some() && a.get_long().is_some() {
let mut visible_aliases: Vec<_> = a
.get_visible_aliases()
.unwrap()
.into_iter()
.map(|s| s.to_string())
.collect();
visible_aliases.push(a.get_long().unwrap().to_string());
Some(visible_aliases)
} else if a.get_visible_aliases().is_none() && a.get_long().is_some() {
Some(vec![a.get_long().unwrap().to_string()])
} else {
None
}
} else {
None
}
})
.flatten()
.collect()
}
/// Gets all the flags of a [`clap::App`](App).
/// Includes `help` and `version` depending on the [`clap::AppSettings`].
pub fn flags<'help>(p: &App<'help>) -> Vec<Arg<'help>> {
debug!("flags: name={}", p.get_name());
p.get_arguments()
.filter(|a| !a.is_takes_value_set() && !a.is_positional())
.cloned()
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Arg;
use pretty_assertions::assert_eq;
fn common_app() -> App<'static> {
App::new("myapp")
.subcommand(
App::new("test").subcommand(App::new("config")).arg(
Arg::new("file")
.short('f')
.short_alias('c')
.visible_short_alias('p')
.long("file")
.visible_alias("path"),
),
)
.subcommand(App::new("hello"))
.bin_name("my-app")
}
fn | () -> App<'static> {
let mut app = common_app();
app._build_all();
app
}
fn built_with_version() -> App<'static> {
let mut app = common_app().version("3.0");
app._build_all();
app
}
#[test]
fn test_subcommands() {
let app = built_with_version();
assert_eq!(
subcommands(&app),
vec![
("test".to_string(), "my-app test".to_string()),
("hello".to_string(), "my-app hello".to_string()),
("help".to_string(), "my-app help".to_string()),
]
);
}
#[test]
fn test_all_subcommands() {
let app = built_with_version();
assert_eq!(
all_subcommands(&app),
vec![
("test".to_string(), "my-app test".to_string()),
("hello".to_string(), "my-app hello".to_string()),
("help".to_string(), "my-app help".to_string()),
("config".to_string(), "my-app test config".to_string()),
("help".to_string(), "my-app test help".to_string()),
]
);
}
#[test]
fn test_find_subcommand_with_path() {
let app = built_with_version();
let sc_app = find_subcommand_with_path(&app, "test config".split(' ').collect());
assert_eq!(sc_app.get_name(), "config");
}
#[test]
fn test_flags() {
let app = built_with_version();
let actual_flags = flags(&app);
assert_eq!(actual_flags.len(), 2);
assert_eq!(actual_flags[0].get_long(), Some("help"));
assert_eq!(actual_flags[1].get_long(), Some("version"));
let sc_flags = flags(find_subcommand_with_path(&app, vec!["test"]));
assert_eq!(sc_flags.len(), 2);
assert_eq!(sc_flags[0].get_long(), Some("file"));
assert_eq!(sc_flags[1].get_long(), Some("help"));
}
#[test]
fn test_flag_subcommand() {
let app = built();
let actual_flags = flags(&app);
assert_eq!(actual_flags.len(), 1);
assert_eq!(actual_flags[0].get_long(), Some("help"));
let sc_flags = flags(find_subcommand_with_path(&app, vec!["test"]));
assert_eq!(sc_flags.len(), 2);
assert_eq!(sc_flags[0].get_long(), Some("file"));
assert_eq!(sc_flags[1].get_long(), Some("help"));
}
#[test]
fn test_shorts() {
let app = built_with_version();
let shorts = shorts_and_visible_aliases(&app);
assert_eq!(shorts.len(), 2);
assert_eq!(shorts[0], 'h');
assert_eq!(shorts[1], 'V');
let sc_shorts = shorts_and_visible_aliases(find_subcommand_with_path(&app, vec!["test"]));
assert_eq!(sc_shorts.len(), 3);
assert_eq!(sc_shorts[0], 'p');
assert_eq!(sc_shorts[1], 'f');
assert_eq!(sc_shorts[2], 'h');
}
#[test]
fn test_longs() {
let app = built_with_version();
let longs = longs_and_visible_aliases(&app);
assert_eq!(longs.len(), 2);
assert_eq!(longs[0], "help");
assert_eq!(longs[1], "version");
let sc_longs = longs_and_visible_aliases(find_subcommand_with_path(&app, vec!["test"]));
assert_eq!(sc_longs.len(), 3);
assert_eq!(sc_longs[0], "path");
assert_eq!(sc_longs[1], "file");
assert_eq!(sc_longs[2], "help");
}
}
| built |
blockchainStatus.js | "use strict"; | Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=blockchainStatus.js.map | |
casechecker.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Bot to find all pages on the wiki with mixed latin and cyrilic alphabets."""
#
# (C) Pywikibot team, 2006-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import codecs
from itertools import chain, combinations
import os
import re
from string import ascii_letters
import sys
import pywikibot
from pywikibot import i18n
from pywikibot.data import api
from pywikibot.tools import first_lower, first_upper, formatter, PY2
from scripts.category import CategoryMoveRobot as CategoryMoveBot
if PY2:
from future_builtins import zip
class CaseChecker(object):
"""Case checker."""
# These words are always in one language, even though they could be typed
# in both
alwaysInLocal = ['СССР', 'Как', 'как']
alwaysInLatin = ['II', 'III']
localUpperLtr = 'ЁІЇЎАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯҐ'
localLowerLtr = 'ёіїўабвгдежзийклмнопрстуфхцчшщъыьэюяґ'
localLtr = localUpperLtr + localLowerLtr
localSuspects = 'АВЕКМНОРСТХІЁЇаеорсухіёї'
latinSuspects = 'ABEKMHOPCTXIËÏaeopcyxiëï'
# possibly try to fix one character mistypes in an alternative keyboard
# layout
localKeyboard = 'йцукенгшщзфывапролдячсмить'
latinKeyboard = 'qwertyuiopasdfghjklzxcvbnm'
romanNumChars = 'IVXLCDM'
# all letters that may be used as suffixes after roman numbers: "Iый"
romannumSuffixes = localLowerLtr
romanNumSfxPtrn = re.compile(
'[{}]+[{}]+$'.format(romanNumChars, localLowerLtr))
whitelists = {
'ru': 'ВП:КЛ/Проверенные',
}
lclClrFnt = '<font color=green>'
latClrFnt = '<font color=brown>'
suffixClr = '</font>'
colorFormatLocalColor = '{green}'
colorFormatLatinColor = '{red}'
colorFormatSuffix = '{default}'
wordBreaker = re.compile(r'[ _\-/\|#[\]():]')
stripChars = ' \t,'
titles = True
links = False
aplimit = None
apfrom = ''
title = None
replace = False
stopAfter = -1
wikilog = None
wikilogfile = 'wikilog.txt'
failedTitles = 'failedTitles.txt'
nosuggestions = 'nosuggestions.txt'
doFailed = False
titleList = None
autonomous = False
namespaces = []
filterredir = 'nonredirects'
def __init__(self):
"""Initializer with arg parsing."""
for arg in pywikibot.handle_args():
arg, sep, value = arg.partition(':')
if arg == '-from':
self.apfrom = value or pywikibot.input(
'Which page to start from: ')
elif arg == '-reqsize':
self.aplimit = int(value)
elif arg == '-links':
self.links = True
elif arg == '-linksonly':
self.links = True
self.titles = False
elif arg == '-replace':
self.replace = True
elif arg == '-redir':
self.filterredir = 'all'
elif arg == '-redironly':
self.filterredir = 'redirects'
elif arg == '-limit':
self.stopAfter = int(value)
elif arg in ('-autonomous', '-a'):
self.autonomous = True
elif arg == '-ns':
self.namespaces.append(int(value))
elif arg == '-wikilog':
self.wikilogfile = value
elif arg == '-failedlog':
self.failedTitles = value
elif arg == '-failed':
self.doFailed = True
else:
pywikibot.output('Unknown argument {}.'.format(arg))
pywikibot.showHelp()
sys.exit()
if self.namespaces == [] and not self.doFailed:
if self.apfrom == '':
# 0 should be after templates ns
self.namespaces = [14, 10, 12, 0]
else:
self.namespaces = [0]
if self.aplimit is None:
self.aplimit = 200 if self.links else 'max'
if not self.doFailed:
self.queryParams = {'action': 'query',
'generator': 'allpages',
'gaplimit': self.aplimit,
'gapfilterredir': self.filterredir}
else:
self.queryParams = {'action': 'query'}
if self.apfrom != '':
pywikibot.output('Argument "-from" is ignored with "-failed"')
propParam = 'info'
if self.links:
propParam += '|links|categories'
self.queryParams['pllimit'] = 'max'
self.queryParams['cllimit'] = 'max'
self.queryParams['prop'] = propParam
self.site = pywikibot.Site()
if len(self.localSuspects) != len(self.latinSuspects):
raise ValueError('Suspects must be the same size')
if len(self.localKeyboard) != len(self.latinKeyboard):
raise ValueError('Keyboard info must be the same size')
if not os.path.isabs(self.wikilogfile):
self.wikilogfile = pywikibot.config.datafilepath(self.wikilogfile)
self.wikilog = self.OpenLogFile(self.wikilogfile)
if not os.path.isabs(self.failedTitles):
self.failedTitles = pywikibot.config.datafilepath(
self.failedTitles)
if self.doFailed:
with codecs.open(self.failedTitles, 'r', 'utf-8') as f:
self.titleList = [self.Page(t) for t in f]
self.failedTitles += '.failed'
iterzip = zip(self.localSuspects, self.latinSuspects)
self.lclToLatDict = {
ord(local): latin for local, latin in iterzip}
self.latToLclDict = {
ord(latin): local for local, latin in iterzip}
if self.localKeyboard is not None:
iterzip = zip(self.localKeyboard, self.latinKeyboard)
self.lclToLatKeybDict = {
ord(local): latin for local, latin in iterzip}
self.latToLclKeybDict = {
ord(latin): local for local, latin in iterzip}
else:
self.lclToLatKeybDict = {}
self.latToLclKeybDict = {}
badPtrnStr = '([{ascii}][{local}]|[{local}][{ascii}])'.format(
ascii=ascii_letters, local=self.localLtr)
self.badWordPtrn = re.compile('[{ascii}{local}]*{bad}[{ascii}{local}]*'
.format(ascii=ascii_letters,
local=self.localLtr,
bad=badPtrnStr))
# Get whitelist
self.knownWords = set()
self.seenUnresolvedLinks = set()
# TODO: handle "continue"
if self.site.code in self.whitelists:
wlpage = self.whitelists[self.site.code]
pywikibot.output('Loading whitelist from {}'.format(wlpage))
wlparams = {
'action': 'query',
'prop': 'links',
'titles': wlpage,
'redirects': '',
'indexpageids': '',
'pllimit': 'max',
}
req = api.Request(site=self.site, parameters=wlparams)
data = req.submit()
if len(data['query']['pageids']) == 1:
pageid = data['query']['pageids'][0]
links = data['query']['pages'][pageid]['links']
allWords = [nn for n in links
for nn in self.FindBadWords(n['title'])]
self.knownWords = set(allWords)
else:
raise ValueError('The number of pageids is not 1')
pywikibot.output('Loaded whitelist with %i items'
% len(self.knownWords))
if len(self.knownWords) > 0:
pywikibot.log('Whitelist: '
+ ', '.join(self.MakeLink(i, False)
for i in self.knownWords))
else:
pywikibot.output('Whitelist is not known for language %s'
% self.site.code)
def RunQuery(self, params):
"""API query."""
while True:
# Get data
req = api.Request(**params)
data = req.submit()
# Process received data
yield data
# Clear any continuations first
if 'clcontinue' in params:
del params['clcontinue']
if 'plcontinue' in params:
del params['plcontinue']
if 'query-continue' not in data:
if 'gapcontinue' in params:
del params['gapcontinue']
break
qc = data['query-continue']
# First continue properties only, once done, continue with allpages
if 'categories' in qc or 'links' in qc:
if 'categories' in qc:
params.update(qc['categories'])
if 'links' in qc:
params.update(qc['links'])
elif 'allpages' in qc:
params.update(qc['allpages'])
else:
raise ValueError('Unexpected query-continue values: {}'
.format(qc))
continue
def Run(self):
"""Run the bot."""
try:
self.lastLetter = ''
if not self.doFailed:
for namespace in self.namespaces:
self.currentTitle = None
self.queryParams['gapnamespace'] = namespace
self.queryParams['gapfrom'] = self.apfrom
for data in self.RunQuery(self.queryParams):
self.ProcessDataBlock(data)
else:
self.currentTitle = None
batchSize = 10
for batchStart in range(0, len(self.titleList), batchSize):
self.queryParams['titles'] = self.titleList[
batchStart:batchStart + batchSize]
for data in self.RunQuery(self.queryParams):
self.ProcessDataBlock(data)
except Exception:
pywikibot.output('Exception at Title = %s, Next = %s'
% (self.currentTitle, self.apfrom))
try:
import traceback
pywikibot.output(traceback.format_exc())
except Exception:
pywikibot.output('Unable to print exception info')
raise
def ProcessDataBlock(self, data):
"""Process data block given by RunQuery()."""
if 'query' not in data or 'pages' not in data['query']:
| firstItem = True
for pageID, page in data['query']['pages'].items():
printed = False
title = page['title']
self.currentTitle = title
if 'missing' in page:
continue
if firstItem:
if self.lastLetter != title[0]:
pywikibot.ui.output('Processing %s\n' % title)
self.lastLetter = title[0]
firstItem = False
if self.titles:
err = self.ProcessTitle(title)
if err:
changed = False
if self.replace:
if len(err[1]) == 1:
newTitle = err[1][0]
editSummary = i18n.twtranslate(
self.site, 'casechecker-rename')
dst = self.Page(newTitle)
if 'redirect' in page:
src = self.Page(title)
redir = src.getRedirectTarget()
redirTitle = redir.title(as_link=True,
textlink=True)
if not dst.exists():
src.move(newTitle, editSummary,
movesubpages=True)
changed = True
replErrors = False
for p in src.getReferences(
follow_redirects=False):
if p.namespace() == 2:
continue
oldText = p.text
newText = self.ReplaceLink(oldText, title,
newTitle)
if not self.PutNewPage(
p, newText, [
self.MakeMoveSummary(title,
newTitle)]):
replErrors = True
if not replErrors:
editSummary = i18n.twtranslate(
self.site,
'casechecker-delete-summary')
newText = i18n.twtranslate(
self.site,
'casechecker-delete-reason',
redirTitle)
if newText:
src.text = '{{delete}}\n\n' + newText
src.save(editSummary, minor=False)
changed = True
elif not dst.exists():
src = self.Page(title)
if page['ns'] == 14:
dst = self.Page(newTitle)
bot = CategoryMoveBot(
src.title(with_ns=False),
dst.title(with_ns=False),
self.autonomous,
editSummary + ' '
+ self.MakeMoveSummary(title,
newTitle),
True)
bot.run()
else:
src.move(newTitle, editSummary,
movesubpages=True)
changed = True
if not changed:
if len(err[1]) > 0:
self.AppendLineToLog(self.failedTitles, title)
else:
self.AddNoSuggestionTitle(title)
self.WikiLog('* ' + err[0])
printed = True
if self.links:
allLinks = None
if 'links' in page:
allLinks = page['links']
if 'categories' in page:
if allLinks:
allLinks = allLinks + page['categories']
else:
allLinks = page['categories']
if allLinks:
pageObj = None
pageTxt = None
msg = []
foundSuggestions = False
for link in allLinks:
ltxt = link['title']
err = self.ProcessTitle(ltxt)
if err:
if len(err[1]) > 0:
foundSuggestions = True
elif self.AddNoSuggestionTitle(ltxt):
continue
newTitle = None
if self.replace:
newTitle = self.PickTarget(title, ltxt, err[1])
if newTitle:
if pageObj is None:
pageObj = self.Page(title)
pageTxt = pageObj.get()
msg.append(self.MakeMoveSummary(ltxt,
newTitle))
pageTxt = self.ReplaceLink(pageTxt, ltxt,
newTitle)
if not newTitle:
if not printed:
self.WikiLog('* {}: link to {}'
.format(self.MakeLink(title,
False),
err[0]))
printed = True
else:
self.WikiLog('** link to {}'
.format(err[0]))
if pageObj is not None:
if self.PutNewPage(pageObj, pageTxt, msg):
# done, no need to log anything
foundSuggestions = False
if foundSuggestions:
self.AppendLineToLog(self.failedTitles, title)
if self.stopAfter > 0:
self.stopAfter -= 1
if self.stopAfter == 0:
raise ValueError('Stopping because we are done')
def WikiLog(self, text):
"""Write log."""
pywikibot.output(text)
self.wikilog.write(text + '\n')
self.wikilog.flush()
def FindBadWords(self, title):
"""Retrieve bad words."""
for m in self.badWordPtrn.finditer(title):
yield title[m.span()[0]:m.span()[1]]
def ProcessTitle(self, title):
"""Process title."""
badWords = list(self.FindBadWords(title))
if len(badWords) > 0:
# Allow known words, allow any roman numerals with local suffixes
badWords = {i for i in badWords
if i not in self.knownWords
and self.romanNumSfxPtrn.match(i) is not None}
if len(badWords) == 0 or self.Page(title).is_filepage():
return
count = 0
ambigBadWords = set()
ambigBadWordsCount = 0
mapLcl = {}
mapLat = {}
for badWord in badWords:
# See if it would make sense to treat the whole word as either
# cyrilic or latin
mightBeLat = mightBeLcl = True
for letter in badWord:
if letter in self.localLtr:
if mightBeLat and letter not in self.localSuspects:
mightBeLat = False
else:
if mightBeLcl and letter not in self.latinSuspects:
mightBeLcl = False
if letter not in ascii_letters:
raise ValueError('Assert failed')
# Some words are well known and frequently mixed-typed
if mightBeLcl and mightBeLat:
if badWord in self.alwaysInLocal:
mightBeLat = False
elif badWord in self.alwaysInLatin:
mightBeLcl = False
if mightBeLcl:
mapLcl[badWord] = badWord.translate(self.latToLclDict)
if mightBeLat:
mapLat[badWord] = badWord.translate(self.lclToLatDict)
if mightBeLcl and mightBeLat:
ambigBadWords.add(badWord)
# Cannot do len(ambigBadWords) because they might be duplicates
ambigBadWordsCount += 1
if not mightBeLcl and not mightBeLat:
# try to match one of the knownWords
bwLen = len(badWord)
kw = [w for w in self.knownWords if len(w) == bwLen]
for p in range(bwLen):
if len(kw) == 0:
break
c = badWord[p]
co = ord(c)
if co in self.latToLclDict:
c2 = self.latToLclDict[co]
elif co in self.lclToLatDict:
c2 = self.lclToLatDict[co]
else:
c2 = None
kw = [w for w in kw if p < len(w)
and (w[p] == c or (c2 is not None and w[p] == c2))]
if len(kw) > 1:
pywikibot.output("Word '{}' could be treated as more than "
'one known words'.format(badWord))
elif len(kw) == 1:
mapLcl[badWord] = kw[0]
count += 1
infoText = self.MakeLink(title)
possibleAlternatives = []
if len(mapLcl) + len(mapLat) - ambigBadWordsCount < count:
# We cannot auto-translate - offer a list of suggested words
suggestions = list(mapLcl.values()) + list(mapLat.values())
if len(suggestions) > 0:
infoText += ', word suggestions: ' + ', '.join(
self.ColorCodeWord(t) for t in suggestions)
else:
infoText += ', no suggestions'
else:
# Replace all unambiguous bad words
for k, v in dict(chain(mapLat.items(), mapLcl.items())).items():
if k not in ambigBadWords:
title = title.replace(k, v)
if len(ambigBadWords) == 0:
# There are no ambiguity, we can safelly convert
possibleAlternatives.append(title)
infoText += ', convert to ' + self.MakeLink(title)
else:
# Try to pick 0, 1, 2, ..., len(ambiguous words) unique
# combinations from the bad words list, and convert just the
# picked words to cyrilic, whereas making all other words as
# latin character.
for itemCntToPick in range(len(ambigBadWords) + 1):
title2 = title
for uc in combinations(list(ambigBadWords), itemCntToPick):
wordsToLat = ambigBadWords.copy()
for bw in uc:
title2 = title2.replace(bw, mapLcl[bw])
wordsToLat.remove(bw)
for bw in wordsToLat:
title2 = title2.replace(bw, mapLat[bw])
possibleAlternatives.append(title2)
if len(possibleAlternatives) > 0:
infoText += ', can be converted to ' + ', '.join(
self.MakeLink(t) for t in possibleAlternatives)
else:
infoText += ', no suggestions'
return (infoText, possibleAlternatives)
def PickTarget(self, title, original, candidates):
"""Pick target from candidates."""
if len(candidates) == 0:
return
if len(candidates) == 1:
return candidates[0]
pagesDontExist = []
pagesRedir = {}
pagesExist = []
for newTitle in candidates:
dst = self.Page(newTitle)
if not dst.exists():
pagesDontExist.append(newTitle)
elif dst.isRedirectPage():
pagesRedir[newTitle] = dst.getRedirectTarget().title()
else:
pagesExist.append(newTitle)
if len(pagesExist) == 1:
return pagesExist[0]
elif len(pagesExist) == 0 and len(pagesRedir) > 0:
if len(pagesRedir) == 1:
return list(pagesRedir.keys())[0]
t = None
for v in pagesRedir.values():
if not t:
t = v # first item
elif t != v:
break
else:
# all redirects point to the same target
# pick the first one, doesn't matter what it is
return list(pagesRedir.keys())[0]
if not self.autonomous:
pywikibot.output('Could not auto-decide for page %s. Which link '
'should be chosen?' % self.MakeLink(title, False))
pywikibot.output('Original title: ', newline=False)
self.ColorCodeWord(original + '\n', True)
for count, t in enumerate(candidates, 1):
if t in pagesDontExist:
msg = 'missing'
elif t in pagesRedir:
msg = 'Redirect to ' + pagesRedir[t]
else:
msg = 'page exists'
self.ColorCodeWord(' {}: {} ({})\n'.format(count, t, msg),
True)
answers = [('skip', 's')] + [(str(i), i) for i in range(1, count)]
choice = pywikibot.input_choice('Which link to choose?', answers)
if choice != 's':
return candidates[int(choice) - 1]
def ColorCodeWord(self, word, toScreen=False):
"""Colorize code word."""
if not toScreen:
return self._ColorCodeWordHtml(word)
else:
return self._ColorCodeWordScreen(word)
def _ColorCodeWordHtml(self, word):
res = '<b>'
lastIsCyr = word[0] in self.localLtr
if lastIsCyr:
res += self.lclClrFnt
else:
res += self.latClrFnt
for letter in word:
if letter in self.localLtr:
if not lastIsCyr:
res += self.suffixClr + self.lclClrFnt
lastIsCyr = True
elif letter in ascii_letters:
if lastIsCyr:
res += self.suffixClr + self.latClrFnt
lastIsCyr = False
res += letter
return res + self.suffixClr + '</b>'
def _ColorCodeWordScreen(self, word):
res = ''
lastIsCyr = word[0] in self.localLtr
if lastIsCyr:
res += self.colorFormatLocalColor
else:
res += self.colorFormatLatinColor
for letter in word:
if letter in self.localLtr:
if not lastIsCyr:
res += self.colorFormatLocalColor
lastIsCyr = True
elif letter in self.latLtr:
if lastIsCyr:
res += self.colorFormatLatinColor
lastIsCyr = False
res += letter
return formatter.color_format(res + self.colorFormatSuffix)
def AddNoSuggestionTitle(self, title):
"""Add backlinks to log."""
if title in self.seenUnresolvedLinks:
return True
self.seenUnresolvedLinks.add(title)
params = {
'action': 'query',
'list': 'backlinks',
'bltitle': title,
'bllimit': '50',
}
req = api.Request(site=self.site, parameters=params)
data = req.submit()
cl = 0
redirs = 0
if 'backlinks' in data['query']:
bl = data['query']['backlinks']
cl = len(bl)
redirs = len([i for i in bl if 'redirect' in i])
if cl > 0 and 'query-continue' in data:
count = '50+'
else:
count = str(cl if cl > 0 else 'no backlinks')
self.AppendLineToLog(self.nosuggestions, '* {} ({}{})'
.format(self.MakeLink(title), count,
', {} redirects'
.format(redirs if redirs > 0 else '')))
return False
def PutNewPage(self, pageObj, pageTxt, msg):
"""Save new page."""
title = pageObj.title(as_link=True, textlink=True)
coloredMsg = ', '.join(self.ColorCodeWord(m) for m in msg)
if pageObj.text == pageTxt:
self.WikiLog('* Error: Text replacement failed in %s (%s)'
% (self.MakeLink(title, False), coloredMsg))
else:
pywikibot.output('Case Replacements: {}'.format(', '.join(msg)))
pageObj.text = pageTxt
try:
pageObj.save(
'{}: {}'.format(
i18n.twtranslate(
self.site, 'casechecker-replacement-summary'),
self.site.mediawiki_message(
'comma-separator').join(msg)))
return True
except KeyboardInterrupt:
raise
except (pywikibot.LockedPage, pywikibot.PageNotSaved):
self.WikiLog('* Error: Could not save updated page %s (%s)'
% (self.MakeLink(title, False), coloredMsg))
return False
def MakeMoveSummary(self, fromTitle, toTitle):
"""Move summary from i18n."""
return i18n.twtranslate(self.site, 'casechecker-replacement-linklist',
{'source': fromTitle, 'target': toTitle})
def MakeLink(self, title, colorcode=True):
"""Create a colored link string."""
prf = '' if self.Page(title).namespace() == 0 else ':'
cc = '|««« {} »»»'.format(
self.ColorCodeWord(title) if colorcode else '')
return '[[%s%s%s]]' % (prf, title, cc)
def OpenLogFile(self, filename):
"""Open logfile."""
try:
return codecs.open(filename, 'a', 'utf-8')
except IOError:
return codecs.open(filename, 'w', 'utf-8')
def AppendLineToLog(self, filename, text):
"""Write text to logfile."""
with self.OpenLogFile(filename) as f:
f.write(text + '\n')
def Page(self, title):
"""Create Page object from title."""
return pywikibot.Page(self.site, title)
def ReplaceLink(self, text, oldtxt, newtxt):
"""Replace links."""
frmParts = [s.strip(self.stripChars)
for s in self.wordBreaker.split(oldtxt)]
toParts = [s.strip(self.stripChars)
for s in self.wordBreaker.split(newtxt)]
if len(frmParts) != len(toParts):
raise ValueError('Splitting parts do not match counts')
for i, part in enumerate(frmParts):
if part != len(toParts[i]):
raise ValueError('Splitting parts do not match word length')
if part:
text = text.replace(first_lower(part), first_lower(toParts[i]))
text = text.replace(first_upper(part), first_upper(toParts[i]))
return text
if __name__ == '__main__':
bot = CaseChecker()
bot.Run()
| return
|
pointers-and-references-5.go | func | () *int {
i := 3
return &i // valid. no worry, no crash.
}
//\Pointers-and-references\pointers-and-references-5.go
| three |
bench_get_priority.rs | extern crate criterion;
use criterion::{black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
use keyed_priority_queue::{KeyedBinaryPriorityQueue, KeyedWeakPriorityQueue};
mod generators;
use crate::generators::{choose_some, gen_random_usizes, get_random_strings};
/* BECNHES */
pub fn | (c: &mut Criterion) {
let base_keys = gen_random_usizes(500_000, 0);
let base_values = gen_random_usizes(500_000, 7);
let mut group = c.benchmark_group("binary_get_priority_usize");
for &size in &[10_000, 500_000] {
assert!(base_keys.len() >= size);
let test_keys: Vec<_> = choose_some(&base_keys[..size], 500, 500);
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let base_queue: KeyedBinaryPriorityQueue<_, _> = base_keys[..size]
.iter()
.cloned()
.zip(base_values[..size].iter().cloned())
.collect();
b.iter_batched(
|| base_queue.clone(),
|queue| {
for k in test_keys.iter() {
black_box(queue.get_priority(k));
}
queue
},
BatchSize::LargeInput,
);
});
}
group.finish();
let base_keys = gen_random_usizes(500_000, 0);
let base_values = gen_random_usizes(500_000, 7);
let mut group = c.benchmark_group("weak_get_priority_usize");
for &size in &[10_000, 500_000] {
assert!(base_keys.len() >= size);
let test_keys: Vec<_> = choose_some(&base_keys[..size], 500, 500);
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let base_queue: KeyedWeakPriorityQueue<_, _> = base_keys[..size]
.iter()
.cloned()
.zip(base_values[..size].iter().cloned())
.collect();
b.iter_batched(
|| base_queue.clone(),
|queue| {
for k in test_keys.iter() {
black_box(queue.get_priority(k));
}
queue
},
BatchSize::LargeInput,
);
});
}
group.finish();
let mut group = c.benchmark_group("binary_get_priority_string");
let base_keys = get_random_strings(50_000, 0);
let base_values = get_random_strings(50_000, 7);
for &size in &[1_000, 50_000] {
assert!(base_keys.len() >= size);
let test_keys: Vec<_> = choose_some(&base_keys[..size], 500, 500);
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let base_queue: KeyedBinaryPriorityQueue<_, _> = base_keys[..size]
.iter()
.cloned()
.zip(base_values[..size].iter().cloned())
.collect();
b.iter_batched(
|| base_queue.clone(),
|queue| {
for k in test_keys.iter() {
black_box(queue.get_priority(k));
}
queue
},
BatchSize::LargeInput,
);
});
}
group.finish();
let mut group = c.benchmark_group("weak_get_priority_string");
let base_keys = get_random_strings(50_000, 0);
let base_values = get_random_strings(50_000, 7);
for &size in &[1_000, 50_000] {
assert!(base_keys.len() >= size);
let test_keys: Vec<_> = choose_some(&base_keys[..size], 500, 500);
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let base_queue: KeyedWeakPriorityQueue<_, _> = base_keys[..size]
.iter()
.cloned()
.zip(base_values[..size].iter().cloned())
.collect();
b.iter_batched(
|| base_queue.clone(),
|queue| {
for k in test_keys.iter() {
black_box(queue.get_priority(k));
}
queue
},
BatchSize::LargeInput,
);
});
}
group.finish();
}
criterion_group!(benches, bench_get_priority);
criterion_main!(benches);
| bench_get_priority |
packets_mm_tlm.py | from scapy.all import *
from ccsds_base import CCSDSPacket
class | (Packet):
"""Housekeeping Packet
app = MM
command = HK_TLM_PKT
msg_id = MM_HK_TLM_MID = 0x0887 = 0x0800 + 0x087
"""
name = "MM_HK_TLM_PKT_TlmPkt"
fields_desc = [
# APPEND_ITEM CMD_VALID_COUNT 8 UINT "Valid Command Count"
ByteField("CMD_VALID_COUNT", 0),
# APPEND_ITEM CMD_ERROR_COUNT 8 UINT "Error Command Count"
ByteField("CMD_ERROR_COUNT", 0),
# APPEND_ITEM LAST_ACTION 8 UINT "Last command action executed"
ByteField("LAST_ACTION", 0),
# STATE NONE 0
# STATE MEM_PEEK 1
# STATE MEM_POKE 2
# STATE LD_FR_FILE 3
# STATE LD_NO_INT 4
# STATE DMP_TO_FILE 5
# STATE DMP_TO_EVENT 6
# STATE MEM_FILL 7
# STATE SYM_LOOKUP 8
# STATE SYM_TO_FILE 9
# STATE EE_WR_ENA 10
# STATE EE_WR_DIS 11
# STATE NOOP 12
# STATE RESET 13
# APPEND_ITEM MEM_TYPE 8 UINT "Memory type for last command"
ByteField("MEM_TYPE", 0),
# APPEND_ITEM ADDRESS 32 UINT "Fully resolved address used for last command"
IntField("ADDRESS", 0),
# APPEND_ITEM FILL_PATTERN 32 UINT "Fill pattern used if memory fill command was issued"
IntField("FILL_PATTERN", 0),
# APPEND_ITEM BYTES_PROCESSED 32 UINT "Bytes processed for last command"
IntField("BYTES_PROCESSED", 0),
# APPEND_ITEM FILENAME 512 STRING "Name of the data file used for last command, where applicable"
StrFixedLenField("FILENAME", b"", 64),
]
bind_layers(CCSDSPacket, MM_HK_TLM_PKT_TlmPkt, pkttype=0, apid=135)
| MM_HK_TLM_PKT_TlmPkt |
redis_db.rs | use redis::{
Client,
Connection,
RedisError,
};
pub async fn db_connection() -> Result<Connection, RedisError> | {
let client = Client::open("redis://localhost:9086/").unwrap();
let conn = client.get_connection().unwrap();
Ok(conn)
} |
|
lemmatizer.py | # coding: utf8
from __future__ import unicode_literals
from collections import OrderedDict
from .symbols import NOUN, VERB, ADJ, PUNCT, PROPN
from .errors import Errors
from .lookups import Lookups
from .parts_of_speech import NAMES as UPOS_NAMES
class Lemmatizer(object):
"""
The Lemmatizer supports simple part-of-speech-sensitive suffix rules and
lookup tables.
DOCS: https://spacy.io/api/lemmatizer
"""
@classmethod
def load(cls, *args, **kwargs):
raise NotImplementedError(Errors.E172)
def __init__(self, lookups, *args, is_base_form=None, **kwargs):
"""Initialize a Lemmatizer.
lookups (Lookups): The lookups object containing the (optional) tables
"lemma_rules", "lemma_index", "lemma_exc" and "lemma_lookup".
RETURNS (Lemmatizer): The newly constructed object.
"""
if args or kwargs or not isinstance(lookups, Lookups):
raise ValueError(Errors.E173)
self.lookups = lookups
self.is_base_form = is_base_form
def __call__(self, string, univ_pos, morphology=None):
"""Lemmatize a string.
string (unicode): The string to lemmatize, e.g. the token text.
univ_pos (unicode / int): The token's universal part-of-speech tag.
morphology (dict): The token's morphological features following the
Universal Dependencies scheme.
RETURNS (list): The available lemmas for the string.
"""
lookup_table = self.lookups.get_table("lemma_lookup", {})
if "lemma_rules" not in self.lookups:
return [lookup_table.get(string, string)]
if isinstance(univ_pos, int):
univ_pos = UPOS_NAMES.get(univ_pos, "X")
univ_pos = univ_pos.lower()
if univ_pos in ("", "eol", "space"):
return [string.lower()]
# See Issue #435 for example of where this logic is requied.
if callable(self.is_base_form) and self.is_base_form(univ_pos, morphology):
return [string.lower()]
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
if not any((index_table.get(univ_pos), exc_table.get(univ_pos), rules_table.get(univ_pos))):
if univ_pos == "propn":
return [string]
else:
return [string.lower()]
lemmas = self.lemmatize(
string,
index_table.get(univ_pos, {}),
exc_table.get(univ_pos, {}),
rules_table.get(univ_pos, []),
)
return lemmas
def noun(self, string, morphology=None):
return self(string, "noun", morphology)
def verb(self, string, morphology=None):
return self(string, "verb", morphology)
def adj(self, string, morphology=None):
return self(string, "adj", morphology)
def det(self, string, morphology=None):
return self(string, "det", morphology)
def pron(self, string, morphology=None):
return self(string, "pron", morphology)
def adp(self, string, morphology=None):
return self(string, "adp", morphology)
def num(self, string, morphology=None):
return self(string, "num", morphology)
def punct(self, string, morphology=None):
|
def lookup(self, string, orth=None):
"""Look up a lemma in the table, if available. If no lemma is found,
the original string is returned.
string (unicode): The original string.
orth (int): Optional hash of the string to look up. If not set, the
string will be used and hashed.
RETURNS (unicode): The lemma if the string was found, otherwise the
original string.
"""
lookup_table = self.lookups.get_table("lemma_lookup", {})
key = orth if orth is not None else string
if key in lookup_table:
return lookup_table[key]
return string
def lemmatize(self, string, index, exceptions, rules):
orig = string
string = string.lower()
forms = []
oov_forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index or not form.isalpha():
forms.append(form)
else:
oov_forms.append(form)
# Remove duplicates but preserve the ordering of applied "rules"
forms = list(OrderedDict.fromkeys(forms))
# Put exceptions at the front of the list, so they get priority.
# This is a dodgy heuristic -- but it's the best we can do until we get
# frequencies on this. We can at least prune out problematic exceptions,
# if they shadow more frequent analyses.
for form in exceptions.get(string, []):
if form not in forms:
forms.insert(0, form)
if not forms:
forms.extend(oov_forms)
if not forms:
forms.append(orig)
return forms
| return self(string, "punct", morphology) |
calc.rs | use rng::rng_maker;
use rand::Rng;
use rayon::prelude::*;
fn | (x: f64, y: f64) -> bool {
(x * x + y * y).sqrt() <= 1.0
}
pub fn calc(iterations: u64) -> f64 {
let hits = (1..iterations)
.into_par_iter()
.map_with(rng_maker(), |rng, _| {
if in_circle(rng.gen(), rng.gen()) {
1
} else {
0
}
})
.sum::<u64>();
let pi = 4.0 * (hits as f64 / iterations as f64);
pi
}
| in_circle |
default.ts | import {action, observable} from 'mobx'
import {RecordStorage} from './index'
import {Injectable} from 'type-injector'
export class RecordContainer<T> {
@observable
_: T
constructor(value?: T) {
if (value) {
this._ = value
}
}
}
export default class | <T> implements RecordStorage<T>, Injectable {
@observable
protected map: Map<keyof T, RecordContainer<any>> = new Map()
@action
get<K extends keyof T>(key: K): RecordContainer<T[K]> {
return this.getContainer(key)
}
@action
getWithDefault<K extends keyof T>(key: K, defaultValue: T[K]): RecordContainer<T[K]> {
const container = this.getContainer(key)
const maybeValue = container._
if (!maybeValue) {
container._ = defaultValue
}
return container
}
@action
set<K extends keyof T>(key: K, value: T[K]): this {
const container = this.getContainer(key)
container._ = value
return this
}
@action
clear(): void {
this.map = new Map()
}
@action
getContainer<K extends keyof T>(key: K): RecordContainer<T[K]> {
const maybeRecord = this.map.get(key)
if (!maybeRecord) {
this.map.set(key, new RecordContainer())
}
const record = this.map.get(key)
if (!record) {
throw new Error(`there is no record for ${key}`)
}
return record
}
postConstructor() {
}
awakeAfterInjection() {
}
}
| DefaultRecordStorage |
node_bench_test.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rafttest
import (
"context"
"testing"
"time"
"github.com/tudyzhb/etcd/raft/v3"
)
func BenchmarkProposal3Nodes(b *testing.B) {
peers := []raft.Peer{{ID: 1, Context: nil}, {ID: 2, Context: nil}, {ID: 3, Context: nil}}
nt := newRaftNetwork(1, 2, 3)
nodes := make([]*node, 0)
for i := 1; i <= 3; i++ {
n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
nodes = append(nodes, n)
}
// get ready and warm up
time.Sleep(50 * time.Millisecond)
b.ResetTimer()
for i := 0; i < b.N; i++ {
nodes[0].Propose(context.TODO(), []byte("somedata"))
} |
for _, n := range nodes {
if n.state.Commit != uint64(b.N+4) {
continue
}
}
b.StopTimer()
for _, n := range nodes {
n.stop()
}
} | |
main.go | package main
import (
"fmt"
"os"
"os/user"
"github.com/tophatsteve/monkey/repl"
)
func main() {
user, err := user.Current()
if err != nil {
panic(err) | repl.Start(os.Stdin, os.Stdout)
} | }
fmt.Printf("Hello %s! This is the Monkey programming language!\n", user.Username)
fmt.Printf("Feel free to type in commands\n") |
general.go | // Copyright 2017 Factom Foundation
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package msgsupport
//https://docs.google.com/spreadsheets/d/1wy9JDEqyM2uRYhZ6Y1e9C3hIDm2prIILebztQ5BGlr8/edit#gid=1997221100
import (
"fmt"
"github.com/DCNT-developer/dcnt/common/constants"
"github.com/DCNT-developer/dcnt/common/interfaces"
"github.com/DCNT-developer/dcnt/common/messages"
"github.com/DCNT-developer/dcnt/common/messages/electionMsgs"
)
func UnmarshalMessage(data []byte) (interfaces.IMsg, error) {
_, msg, err := UnmarshalMessageData(data)
return msg, err
}
func CreateMsg(messageType byte) interfaces.IMsg {
switch messageType {
case constants.EOM_MSG:
return new(messages.EOM)
case constants.ACK_MSG:
return new(messages.Ack)
case constants.COMMIT_CHAIN_MSG:
return new(messages.CommitChainMsg)
case constants.COMMIT_ENTRY_MSG:
return new(messages.CommitEntryMsg)
case constants.DIRECTORY_BLOCK_SIGNATURE_MSG:
return new(messages.DirectoryBlockSignature)
case constants.FACTOID_TRANSACTION_MSG:
return new(messages.FactoidTransaction)
case constants.HEARTBEAT_MSG:
return new(messages.Heartbeat)
case constants.MISSING_MSG:
return new(messages.MissingMsg)
case constants.MISSING_MSG_RESPONSE:
return new(messages.MissingMsgResponse)
case constants.MISSING_DATA:
return new(messages.MissingData)
case constants.DATA_RESPONSE:
return new(messages.DataResponse)
case constants.REVEAL_ENTRY_MSG:
return new(messages.RevealEntryMsg)
case constants.REQUEST_BLOCK_MSG:
return new(messages.RequestBlock)
case constants.DBSTATE_MISSING_MSG:
return new(messages.DBStateMissing)
case constants.DBSTATE_MSG:
return new(messages.DBStateMsg)
case constants.ADDSERVER_MSG:
return new(messages.AddServerMsg)
case constants.CHANGESERVER_KEY_MSG:
return new(messages.ChangeServerKeyMsg)
case constants.REMOVESERVER_MSG:
return new(messages.RemoveServerMsg)
case constants.BOUNCE_MSG:
return new(messages.Bounce)
case constants.BOUNCEREPLY_MSG:
return new(messages.BounceReply)
case constants.SYNC_MSG:
return new(electionMsgs.SyncMsg)
case constants.VOLUNTEERAUDIT:
msg := new(electionMsgs.FedVoteVolunteerMsg)
msg.SetFullBroadcast(true)
return msg
case constants.VOLUNTEERPROPOSAL:
msg := new(electionMsgs.FedVoteProposalMsg)
msg.SetFullBroadcast(true)
return msg
case constants.VOLUNTEERLEVELVOTE:
msg := new(electionMsgs.FedVoteLevelMsg)
msg.SetFullBroadcast(true)
return msg
default:
return nil
}
}
func UnmarshalMessageData(data []byte) (newdata []byte, msg interfaces.IMsg, err error) |
// GeneralFactory is used to get around package import loops.
type GeneralFactory struct {
}
var _ interfaces.IGeneralMsg = (*GeneralFactory)(nil)
func (GeneralFactory) CreateMsg(messageType byte) interfaces.IMsg {
return CreateMsg(messageType)
}
func (GeneralFactory) MessageName(Type byte) string {
return constants.MessageName(Type)
}
func (GeneralFactory) UnmarshalMessageData(data []byte) (newdata []byte, msg interfaces.IMsg, err error) {
return UnmarshalMessageData(data)
}
func (GeneralFactory) UnmarshalMessage(data []byte) (interfaces.IMsg, error) {
return UnmarshalMessage(data)
}
| {
if data == nil {
return nil, nil, fmt.Errorf("No data provided")
}
if len(data) == 0 {
return nil, nil, fmt.Errorf("No data provided")
}
messageType := data[0]
msg = CreateMsg(messageType)
if msg == nil {
fmt.Printf("***** Marshal Failed to create message for %d %s", messageType, constants.MessageName(messageType))
return data, nil, fmt.Errorf("Unknown message type %d %x", messageType, data[0])
}
newdata, err = msg.UnmarshalBinaryData(data[:])
if err != nil {
fmt.Printf("***** Marshal Failed to unmarshal %d %s %x\n",
messageType,
constants.MessageName(messageType),
data)
return data, nil, err
}
return newdata, msg, nil
} |
Data Visualization.py | """
Author : nkalyan🤠
implementing Python Scripts on reading and returning the name no of mails that sent each day in week
and plot/display them in bar graph
I wrote code In counting to count the number of emails sent by each distinct user. That code may be helpful for this assignment.
"""
import matplotlib.pyplot as plt
from os import getcwd
def file_path():
"""Method that ask the users file name and returns it"""
file_name = input("Enter the file name:")
return file_name
def pop_values(filename):
"""Method the reads file and returning value"""
file_name = filename
try: # look for exception
fp = open(file_name, "r")
except FileNotFoundError: # if found exception display error
print("File Does not exist, please check your file name")
exit()
else: # if no exceptions thrown then performs this block
with fp:
for line in fp:
line = line.strip("\n")
offset = line.find("From")
offset1 = line.find("@")
line = line[-24:]
offset3 = line.find("@")
if offset == 0 and offset1 > 0 and offset3 == -1:
lin | def main():
"""Calls the all functions that necessary to get the output"""
name = file_path() # calls the file path method
dictionary = {'Sun': 0, 'Mon': 0, 'Tue': 0, 'Wed': 0, 'Thu': 0, 'Fri': 0, 'Sat': 0} # store the day val in dict
value = pop_values(name)
count = 0
for i in value:
if i in dictionary:
dictionary[i] += 1
count += len(i)
val = dictionary.values()
keys = dictionary.keys()
zp = zip(dictionary.keys(), dictionary.values())
for item in val:
i = val
j = keys
plt.bar(j, i, align='center', alpha=0.5)
plt.ylabel('Number of messages')
plt.title('Emails per day')
plt.show() # method that shows the bar graph of our code result
if __name__ == '__main__':
"""calls the main method"""
main()
| e = line[:-21]
yield line
|
packed_func.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{
convert::TryFrom,
ffi::{CStr, CString},
os::raw::{c_char, c_void},
};
use crate::{errors::ValueDowncastError, ffi::*};
pub use crate::ffi::TVMValue;
pub trait PackedFunc:
Fn(&[ArgValue]) -> Result<RetValue, crate::errors::FuncCallError> + Send + Sync
{
}
impl<T> PackedFunc for T where
T: Fn(&[ArgValue]) -> Result<RetValue, crate::errors::FuncCallError> + Send + Sync
{
}
/// Calls a packed function and returns a `RetValue`.
///
/// # Example
///
/// `call_packed!(my_tvm_func, &mut arg1, &mut arg2)`
#[macro_export]
macro_rules! call_packed {
($fn:expr, $($args:expr),+) => {
$fn(&[$($args.into(),)+])
};
($fn:expr) => {
$fn(&Vec::new())
};
}
/// Constructs a derivative of a TVMPodValue.
macro_rules! TVMPODValue {
{
$(#[$m:meta])+
$name:ident $(<$a:lifetime>)? {
$($extra_variant:ident ( $variant_type:ty ) ),+ $(,)?
},
match $value:ident {
$($tvm_type:ident => { $from_tvm_type:expr })+
},
match &self {
$($self_type:ident ( $val:ident ) => { $from_self_type:expr })+
}
$(,)?
} => {
$(#[$m])+
#[derive(Clone, Debug)]
pub enum $name $(<$a>)? {
Int(i64),
UInt(i64),
Float(f64),
Null,
DataType(DLDataType),
String(*mut c_char),
Context(TVMContext),
Handle(*mut c_void),
ArrayHandle(TVMArrayHandle),
ObjectHandle(*mut c_void),
ModuleHandle(TVMModuleHandle),
FuncHandle(TVMFunctionHandle),
NDArrayHandle(*mut c_void),
$($extra_variant($variant_type)),+
}
impl $(<$a>)? $name $(<$a>)? {
pub fn from_tvm_value($value: TVMValue, type_code: u32) -> Self {
use $name::*;
#[allow(non_upper_case_globals)]
unsafe {
match type_code as _ {
DLDataTypeCode_kDLInt => Int($value.v_int64),
DLDataTypeCode_kDLUInt => UInt($value.v_int64),
DLDataTypeCode_kDLFloat => Float($value.v_float64),
TVMArgTypeCode_kTVMNullptr => Null,
TVMArgTypeCode_kTVMDataType => DataType($value.v_type),
TVMArgTypeCode_kTVMContext => Context($value.v_ctx),
TVMArgTypeCode_kTVMOpaqueHandle => Handle($value.v_handle),
TVMArgTypeCode_kTVMDLTensorHandle => ArrayHandle($value.v_handle as TVMArrayHandle),
TVMArgTypeCode_kTVMObjectHandle => ObjectHandle($value.v_handle),
TVMArgTypeCode_kTVMModuleHandle => ModuleHandle($value.v_handle),
TVMArgTypeCode_kTVMPackedFuncHandle => FuncHandle($value.v_handle),
TVMArgTypeCode_kTVMNDArrayHandle => NDArrayHandle($value.v_handle),
$( $tvm_type => { $from_tvm_type } ),+
_ => unimplemented!("{}", type_code),
}
}
}
pub fn to_tvm_value(&self) -> (TVMValue, TVMArgTypeCode) {
use $name::*;
match self {
Int(val) => (TVMValue { v_int64: *val }, DLDataTypeCode_kDLInt),
UInt(val) => (TVMValue { v_int64: *val as i64 }, DLDataTypeCode_kDLUInt),
Float(val) => (TVMValue { v_float64: *val }, DLDataTypeCode_kDLFloat),
Null => (TVMValue{ v_int64: 0 },TVMArgTypeCode_kTVMNullptr),
DataType(val) => (TVMValue { v_type: *val }, TVMArgTypeCode_kTVMDataType),
Context(val) => (TVMValue { v_ctx: val.clone() }, TVMArgTypeCode_kTVMContext),
String(val) => {
(
TVMValue { v_handle: *val as *mut c_void },
TVMArgTypeCode_kTVMStr,
)
}
Handle(val) => (TVMValue { v_handle: *val }, TVMArgTypeCode_kTVMOpaqueHandle),
ArrayHandle(val) => {
(
TVMValue { v_handle: *val as *const _ as *mut c_void },
TVMArgTypeCode_kTVMNDArrayHandle,
)
},
ObjectHandle(val) => (TVMValue { v_handle: *val }, TVMArgTypeCode_kTVMObjectHandle),
ModuleHandle(val) =>
(TVMValue { v_handle: *val }, TVMArgTypeCode_kTVMModuleHandle),
FuncHandle(val) => (
TVMValue { v_handle: *val },
TVMArgTypeCode_kTVMPackedFuncHandle
),
NDArrayHandle(val) =>
(TVMValue { v_handle: *val }, TVMArgTypeCode_kTVMNDArrayHandle),
$( $self_type($val) => { $from_self_type } ),+
}
}
}
}
}
TVMPODValue! {
/// A borrowed TVMPODValue. Can be constructed using `into()` but the preferred way
/// to obtain a `ArgValue` is automatically via `call_packed!`.
ArgValue<'a> {
Bytes(&'a TVMByteArray),
Str(&'a CStr),
},
match value {
TVMArgTypeCode_kTVMBytes => { Bytes(&*(value.v_handle as *const TVMByteArray)) }
TVMArgTypeCode_kTVMStr => { Str(CStr::from_ptr(value.v_handle as *const i8)) }
},
match &self {
Bytes(val) => {
(TVMValue { v_handle: *val as *const _ as *mut c_void }, TVMArgTypeCode_kTVMBytes)
}
Str(val) => { (TVMValue { v_handle: val.as_ptr() as *mut c_void }, TVMArgTypeCode_kTVMStr) }
}
}
TVMPODValue! {
/// An owned TVMPODValue. Can be converted from a variety of primitive and object types.
/// Can be downcasted using `try_from` if it contains the desired type.
///
/// # Example
///
/// ```
/// use std::convert::{TryFrom, TryInto};
/// use tvm_sys::RetValue;
///
/// let a = 42u32;
/// let b: u32 = tvm_sys::RetValue::from(a).try_into().unwrap();
///
/// let s = "hello, world!";
/// let t: RetValue = s.to_string().into();
/// assert_eq!(String::try_from(t).unwrap(), s);
/// ```
RetValue {
Bytes(TVMByteArray),
Str(&'static CStr),
},
match value {
TVMArgTypeCode_kTVMBytes => { Bytes(*(value.v_handle as *const TVMByteArray)) }
TVMArgTypeCode_kTVMStr => { Str(CStr::from_ptr(value.v_handle as *mut i8)) }
},
match &self {
Bytes(val) =>
{ (TVMValue { v_handle: val as *const _ as *mut c_void }, TVMArgTypeCode_kTVMBytes ) }
Str(val) =>
{ (TVMValue { v_str: val.as_ptr() }, TVMArgTypeCode_kTVMStr ) }
}
}
#[macro_export]
macro_rules! try_downcast {
($val:ident -> $into:ty, $( |$pat:pat| { $converter:expr } ),+ ) => {
match $val {
$( $pat => { Ok($converter) } )+
_ => Err($crate::errors::ValueDowncastError {
actual_type: format!("{:?}", $val),
expected_type: stringify!($into),
}),
}
};
}
/// Creates a conversion to a `ArgValue` for a primitive type and DLDataTypeCode.
macro_rules! impl_pod_value {
($variant:ident, $inner_ty:ty, [ $( $type:ty ),+ ] ) => {
$(
impl<'a> From<$type> for ArgValue<'a> {
fn from(val: $type) -> Self {
Self::$variant(val as $inner_ty)
}
}
impl<'a, 'v> From<&'a $type> for ArgValue<'v> {
fn from(val: &'a $type) -> Self {
Self::$variant(*val as $inner_ty)
}
}
impl<'a> TryFrom<ArgValue<'a>> for $type {
type Error = $crate::errors::ValueDowncastError;
fn try_from(val: ArgValue<'a>) -> Result<Self, Self::Error> {
try_downcast!(val -> $type, |ArgValue::$variant(val)| { val as $type })
}
}
impl<'a, 'v> TryFrom<&'a ArgValue<'v>> for $type {
type Error = $crate::errors::ValueDowncastError;
fn try_from(val: &'a ArgValue<'v>) -> Result<Self, Self::Error> {
try_downcast!(val -> $type, |ArgValue::$variant(val)| { *val as $type })
}
}
impl From<$type> for RetValue {
fn from(val: $type) -> Self {
Self::$variant(val as $inner_ty)
}
}
impl TryFrom<RetValue> for $type {
type Error = $crate::errors::ValueDowncastError;
fn try_from(val: RetValue) -> Result<Self, Self::Error> {
try_downcast!(val -> $type, |RetValue::$variant(val)| { val as $type })
}
}
)+
};
}
impl_pod_value!(Int, i64, [i8, i16, i32, i64, isize]);
impl_pod_value!(UInt, i64, [u8, u16, u32, u64, usize]);
impl_pod_value!(Float, f64, [f32, f64]);
impl_pod_value!(DataType, DLDataType, [DLDataType]);
impl_pod_value!(Context, TVMContext, [TVMContext]);
impl<'a> From<&'a str> for ArgValue<'a> {
fn from(s: &'a str) -> Self {
Self::String(CString::new(s).unwrap().into_raw())
}
}
impl<'a> From<String> for ArgValue<'a> {
fn from(s: String) -> Self {
Self::String(CString::new(s).unwrap().into_raw())
}
}
impl<'a> From<&'a CStr> for ArgValue<'a> {
fn from(s: &'a CStr) -> Self {
Self::Str(s)
}
}
impl<'a> From<CString> for ArgValue<'a> {
fn from(s: CString) -> Self {
Self::String(s.into_raw())
}
}
impl<'a> From<&'a TVMByteArray> for ArgValue<'a> {
fn from(s: &'a TVMByteArray) -> Self {
Self::Bytes(s)
}
}
impl<'a> TryFrom<ArgValue<'a>> for &'a str {
type Error = ValueDowncastError;
fn try_from(val: ArgValue<'a>) -> Result<Self, Self::Error> {
try_downcast!(val -> &str, |ArgValue::Str(s)| { s.to_str().unwrap() })
}
}
impl<'a, 'v> TryFrom<&'a ArgValue<'v>> for &'v str {
type Error = ValueDowncastError;
fn try_from(val: &'a ArgValue<'v>) -> Result<Self, Self::Error> {
try_downcast!(val -> &str, |ArgValue::Str(s)| { s.to_str().unwrap() })
}
}
/// Converts an unspecialized handle to a ArgValue.
impl<T> From<*const T> for ArgValue<'static> {
fn from(ptr: *const T) -> Self {
Self::Handle(ptr as *mut c_void)
}
}
/// Converts an unspecialized mutable handle to a ArgValue.
impl<T> From<*mut T> for ArgValue<'static> {
fn from(ptr: *mut T) -> Self {
Self::Handle(ptr as *mut c_void)
}
}
impl<'a> From<&'a mut DLTensor> for ArgValue<'a> {
fn from(arr: &'a mut DLTensor) -> Self {
Self::ArrayHandle(arr as *mut DLTensor)
}
}
impl<'a> From<&'a DLTensor> for ArgValue<'a> {
fn from(arr: &'a DLTensor) -> Self {
Self::ArrayHandle(arr as *const _ as *mut DLTensor)
}
}
impl TryFrom<RetValue> for String {
type Error = ValueDowncastError;
fn try_from(val: RetValue) -> Result<String, Self::Error> {
try_downcast!(
val -> String,
|RetValue::String(s)| { unsafe { CString::from_raw(s).into_string().unwrap() }},
|RetValue::Str(s)| { s.to_str().unwrap().to_string() }
)
}
}
impl From<String> for RetValue {
fn from(s: String) -> Self {
Self::String(std::ffi::CString::new(s).unwrap().into_raw()) | }
impl From<TVMByteArray> for RetValue {
fn from(arr: TVMByteArray) -> Self {
Self::Bytes(arr)
}
}
impl TryFrom<RetValue> for TVMByteArray {
type Error = ValueDowncastError;
fn try_from(val: RetValue) -> Result<Self, Self::Error> {
try_downcast!(val -> TVMByteArray, |RetValue::Bytes(val)| { val })
}
}
impl Default for RetValue {
fn default() -> Self {
Self::Int(0)
}
}
impl TryFrom<RetValue> for std::ffi::CString {
type Error = ValueDowncastError;
fn try_from(val: RetValue) -> Result<CString, Self::Error> {
try_downcast!(val -> std::ffi::CString,
|RetValue::Str(val)| { val.into() })
}
}
// Implementations for bool.
impl<'a> From<bool> for ArgValue<'a> {
fn from(s: bool) -> Self {
(s as i64).into()
}
}
impl From<bool> for RetValue {
fn from(s: bool) -> Self {
(s as i64).into()
}
}
impl TryFrom<RetValue> for bool {
type Error = ValueDowncastError;
fn try_from(val: RetValue) -> Result<bool, Self::Error> {
try_downcast!(val -> bool,
|RetValue::Int(val)| { !(val == 0) })
}
}
impl<'a> TryFrom<ArgValue<'a>> for bool {
type Error = ValueDowncastError;
fn try_from(val: ArgValue<'a>) -> Result<bool, Self::Error> {
try_downcast!(val -> bool, |ArgValue::Int(val)| { !(val == 0) })
}
}
impl From<()> for RetValue {
fn from(_: ()) -> Self {
RetValue::Null
}
}
impl TryFrom<RetValue> for () {
type Error = ValueDowncastError;
fn try_from(val: RetValue) -> Result<(), Self::Error> {
try_downcast!(val -> bool,
|RetValue::Null| { () })
}
} | } |
file_module.py | def file_func():
| return "Hi from file_func" |
|
compiletest.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_type = "bin"]
#![feature(phase, slicing_syntax, globs)]
#![deny(warnings)]
extern crate test;
extern crate getopts;
#[phase(plugin, link)] extern crate log;
extern crate regex;
use std::os;
use std::io;
use std::io::fs;
use std::str::FromStr;
use getopts::{optopt, optflag, reqopt};
use common::Config;
use common::{Pretty, DebugInfoGdb, DebugInfoLldb, Codegen};
use util::logv;
use regex::Regex;
pub mod procsrv;
pub mod util;
pub mod header;
pub mod runtest;
pub mod common;
pub mod errors;
pub fn main() {
let args = os::args();
let config = parse_config(args);
if config.valgrind_path.is_none() && config.force_valgrind {
panic!("Can't find Valgrind to run Valgrind tests");
}
log_config(&config);
run_tests(&config);
}
pub fn parse_config(args: Vec<String> ) -> Config {
let groups : Vec<getopts::OptGroup> =
vec!(reqopt("", "compile-lib-path", "path to host shared libraries", "PATH"),
reqopt("", "run-lib-path", "path to target shared libraries", "PATH"),
reqopt("", "rustc-path", "path to rustc to use for compiling", "PATH"),
optopt("", "clang-path", "path to executable for codegen tests", "PATH"),
optopt("", "valgrind-path", "path to Valgrind executable for Valgrind tests", "PROGRAM"),
optflag("", "force-valgrind", "fail if Valgrind tests cannot be run under Valgrind"),
optopt("", "llvm-bin-path", "path to directory holding llvm binaries", "DIR"),
reqopt("", "src-base", "directory to scan for test files", "PATH"),
reqopt("", "build-base", "directory to deposit test outputs", "PATH"),
reqopt("", "aux-base", "directory to find auxiliary test files", "PATH"),
reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET"),
reqopt("", "mode", "which sort of compile tests to run",
"(compile-fail|run-fail|run-pass|run-pass-valgrind|pretty|debug-info)"),
optflag("", "ignored", "run tests marked as ignored"),
optopt("", "runtool", "supervisor program to run tests under \
(eg. emulator, valgrind)", "PROGRAM"),
optopt("", "host-rustcflags", "flags to pass to rustc for host", "FLAGS"),
optopt("", "target-rustcflags", "flags to pass to rustc for target", "FLAGS"),
optflag("", "verbose", "run tests verbosely, showing all output"),
optopt("", "logfile", "file to log test execution to", "FILE"),
optopt("", "save-metrics", "file to save metrics to", "FILE"),
optopt("", "ratchet-metrics", "file to ratchet metrics against", "FILE"),
optopt("", "ratchet-noise-percent",
"percent change in metrics to consider noise", "N"),
optflag("", "jit", "run tests under the JIT"),
optopt("", "target", "the target to build for", "TARGET"),
optopt("", "host", "the host to build for", "HOST"),
optopt("", "gdb-version", "the version of GDB used", "VERSION STRING"),
optopt("", "lldb-version", "the version of LLDB used", "VERSION STRING"),
optopt("", "android-cross-path", "Android NDK standalone path", "PATH"),
optopt("", "adb-path", "path to the android debugger", "PATH"),
optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH"),
optopt("", "lldb-python-dir", "directory containing LLDB's python module", "PATH"),
optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite", "A.B"),
optflag("h", "help", "show this message"));
assert!(!args.is_empty());
let argv0 = args[0].clone();
let args_ = args.tail();
if args[1].as_slice() == "-h" || args[1].as_slice() == "--help" {
let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0);
println!("{}", getopts::usage(message.as_slice(), groups.as_slice()));
println!("");
panic!()
}
let matches =
&match getopts::getopts(args_.as_slice(), groups.as_slice()) {
Ok(m) => m,
Err(f) => panic!("{}", f)
};
if matches.opt_present("h") || matches.opt_present("help") {
let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0);
println!("{}", getopts::usage(message.as_slice(), groups.as_slice()));
println!("");
panic!()
}
fn opt_path(m: &getopts::Matches, nm: &str) -> Path {
Path::new(m.opt_str(nm).unwrap())
}
let filter = if !matches.free.is_empty() {
let s = matches.free[0].as_slice();
match regex::Regex::new(s) {
Ok(re) => Some(re),
Err(e) => {
println!("failed to parse filter /{}/: {}", s, e);
panic!()
}
}
} else {
None
};
Config {
compile_lib_path: matches.opt_str("compile-lib-path").unwrap(),
run_lib_path: matches.opt_str("run-lib-path").unwrap(),
rustc_path: opt_path(matches, "rustc-path"),
clang_path: matches.opt_str("clang-path").map(|s| Path::new(s)),
valgrind_path: matches.opt_str("valgrind-path"),
force_valgrind: matches.opt_present("force-valgrind"),
llvm_bin_path: matches.opt_str("llvm-bin-path").map(|s| Path::new(s)),
src_base: opt_path(matches, "src-base"),
build_base: opt_path(matches, "build-base"),
aux_base: opt_path(matches, "aux-base"),
stage_id: matches.opt_str("stage-id").unwrap(),
mode: FromStr::from_str(matches.opt_str("mode")
.unwrap()
.as_slice()).expect("invalid mode"),
run_ignored: matches.opt_present("ignored"),
filter: filter,
cfail_regex: Regex::new(errors::EXPECTED_PATTERN).unwrap(),
logfile: matches.opt_str("logfile").map(|s| Path::new(s)),
save_metrics: matches.opt_str("save-metrics").map(|s| Path::new(s)),
ratchet_metrics:
matches.opt_str("ratchet-metrics").map(|s| Path::new(s)),
ratchet_noise_percent:
matches.opt_str("ratchet-noise-percent")
.and_then(|s| from_str::<f64>(s.as_slice())),
runtool: matches.opt_str("runtool"),
host_rustcflags: matches.opt_str("host-rustcflags"),
target_rustcflags: matches.opt_str("target-rustcflags"),
jit: matches.opt_present("jit"),
target: opt_str2(matches.opt_str("target")),
host: opt_str2(matches.opt_str("host")),
gdb_version: extract_gdb_version(matches.opt_str("gdb-version")),
lldb_version: extract_lldb_version(matches.opt_str("lldb-version")),
android_cross_path: opt_path(matches, "android-cross-path"),
adb_path: opt_str2(matches.opt_str("adb-path")),
adb_test_dir: opt_str2(matches.opt_str("adb-test-dir")),
adb_device_status:
"arm-linux-androideabi" ==
opt_str2(matches.opt_str("target")).as_slice() &&
"(none)" !=
opt_str2(matches.opt_str("adb-test-dir")).as_slice() &&
!opt_str2(matches.opt_str("adb-test-dir")).is_empty(),
lldb_python_dir: matches.opt_str("lldb-python-dir"),
test_shard: test::opt_shard(matches.opt_str("test-shard")),
verbose: matches.opt_present("verbose"),
}
}
pub fn log_config(config: &Config) {
let c = config;
logv(c, format!("configuration:"));
logv(c, format!("compile_lib_path: {}", config.compile_lib_path));
logv(c, format!("run_lib_path: {}", config.run_lib_path));
logv(c, format!("rustc_path: {}", config.rustc_path.display()));
logv(c, format!("src_base: {}", config.src_base.display()));
logv(c, format!("build_base: {}", config.build_base.display()));
logv(c, format!("stage_id: {}", config.stage_id));
logv(c, format!("mode: {}", config.mode));
logv(c, format!("run_ignored: {}", config.run_ignored));
logv(c, format!("filter: {}",
opt_str(&config.filter
.as_ref()
.map(|re| {
re.to_string().into_string()
}))));
logv(c, format!("runtool: {}", opt_str(&config.runtool)));
logv(c, format!("host-rustcflags: {}",
opt_str(&config.host_rustcflags)));
logv(c, format!("target-rustcflags: {}",
opt_str(&config.target_rustcflags)));
logv(c, format!("jit: {}", config.jit));
logv(c, format!("target: {}", config.target));
logv(c, format!("host: {}", config.host));
logv(c, format!("android-cross-path: {}",
config.android_cross_path.display()));
logv(c, format!("adb_path: {}", config.adb_path));
logv(c, format!("adb_test_dir: {}", config.adb_test_dir));
logv(c, format!("adb_device_status: {}",
config.adb_device_status));
match config.test_shard {
None => logv(c, "test_shard: (all)".to_string()),
Some((a,b)) => logv(c, format!("test_shard: {}.{}", a, b))
}
logv(c, format!("verbose: {}", config.verbose));
logv(c, format!("\n"));
}
pub fn opt_str<'a>(maybestr: &'a Option<String>) -> &'a str {
match *maybestr {
None => "(none)",
Some(ref s) => s.as_slice(),
}
}
pub fn opt_str2(maybestr: Option<String>) -> String {
match maybestr {
None => "(none)".to_string(),
Some(s) => s,
}
}
pub fn run_tests(config: &Config) {
if config.target.as_slice() == "arm-linux-androideabi" {
match config.mode {
DebugInfoGdb => {
println!("arm-linux-androideabi debug-info \
test uses tcp 5039 port. please reserve it");
}
_ =>{}
}
//arm-linux-androideabi debug-info test uses remote debugger
//so, we test 1 task at once.
// also trying to isolate problems with adb_run_wrapper.sh ilooping
os::setenv("RUST_TEST_TASKS","1");
}
match config.mode {
DebugInfoLldb => {
// Some older versions of LLDB seem to have problems with multiple
// instances running in parallel, so only run one test task at a
// time.
os::setenv("RUST_TEST_TASKS", "1");
}
_ => { /* proceed */ }
}
let opts = test_opts(config);
let tests = make_tests(config);
// sadly osx needs some file descriptor limits raised for running tests in
// parallel (especially when we have lots and lots of child processes).
// For context, see #8904
io::test::raise_fd_limit();
let res = test::run_tests_console(&opts, tests.into_iter().collect());
match res {
Ok(true) => {}
Ok(false) => panic!("Some tests failed"),
Err(e) => {
println!("I/O failure during tests: {}", e);
}
}
}
pub fn test_opts(config: &Config) -> test::TestOpts {
test::TestOpts {
filter: match config.filter {
None => None,
Some(ref filter) => Some(filter.clone()),
},
run_ignored: config.run_ignored,
logfile: config.logfile.clone(),
run_tests: true,
run_benchmarks: true,
ratchet_metrics: config.ratchet_metrics.clone(),
ratchet_noise_percent: config.ratchet_noise_percent.clone(),
save_metrics: config.save_metrics.clone(),
test_shard: config.test_shard.clone(),
nocapture: false,
color: test::AutoColor,
}
}
pub fn make_tests(config: &Config) -> Vec<test::TestDescAndFn> |
pub fn is_test(config: &Config, testfile: &Path) -> bool {
// Pretty-printer does not work with .rc files yet
let valid_extensions =
match config.mode {
Pretty => vec!(".rs".to_string()),
_ => vec!(".rc".to_string(), ".rs".to_string())
};
let invalid_prefixes = vec!(".".to_string(), "#".to_string(), "~".to_string());
let name = testfile.filename_str().unwrap();
let mut valid = false;
for ext in valid_extensions.iter() {
if name.ends_with(ext.as_slice()) {
valid = true;
}
}
for pre in invalid_prefixes.iter() {
if name.starts_with(pre.as_slice()) {
valid = false;
}
}
return valid;
}
pub fn make_test(config: &Config, testfile: &Path, f: || -> test::TestFn)
-> test::TestDescAndFn {
test::TestDescAndFn {
desc: test::TestDesc {
name: make_test_name(config, testfile),
ignore: header::is_test_ignored(config, testfile),
should_fail: false
},
testfn: f(),
}
}
pub fn make_test_name(config: &Config, testfile: &Path) -> test::TestName {
// Try to elide redundant long paths
fn shorten(path: &Path) -> String {
let filename = path.filename_str();
let p = path.dir_path();
let dir = p.filename_str();
format!("{}/{}", dir.unwrap_or(""), filename.unwrap_or(""))
}
test::DynTestName(format!("[{}] {}", config.mode, shorten(testfile)))
}
pub fn make_test_closure(config: &Config, testfile: &Path) -> test::TestFn {
let config = (*config).clone();
// FIXME (#9639): This needs to handle non-utf8 paths
let testfile = testfile.as_str().unwrap().to_string();
test::DynTestFn(proc() {
runtest::run(config, testfile)
})
}
pub fn make_metrics_test_closure(config: &Config, testfile: &Path) -> test::TestFn {
let config = (*config).clone();
// FIXME (#9639): This needs to handle non-utf8 paths
let testfile = testfile.as_str().unwrap().to_string();
test::DynMetricFn(proc(mm) {
runtest::run_metrics(config, testfile, mm)
})
}
fn extract_gdb_version(full_version_line: Option<String>) -> Option<String> {
match full_version_line {
Some(ref full_version_line)
if full_version_line.as_slice().trim().len() > 0 => {
let full_version_line = full_version_line.as_slice().trim();
let re = Regex::new(r"(^|[^0-9])([0-9]\.[0-9])([^0-9]|$)").unwrap();
match re.captures(full_version_line) {
Some(captures) => {
Some(captures.at(2).to_string())
}
None => {
println!("Could not extract GDB version from line '{}'",
full_version_line);
None
}
}
},
_ => None
}
}
fn extract_lldb_version(full_version_line: Option<String>) -> Option<String> {
// Extract the major LLDB version from the given version string.
// LLDB version strings are different for Apple and non-Apple platforms.
// At the moment, this function only supports the Apple variant, which looks
// like this:
//
// LLDB-179.5 (older versions)
// lldb-300.2.51 (new versions)
//
// We are only interested in the major version number, so this function
// will return `Some("179")` and `Some("300")` respectively.
match full_version_line {
Some(ref full_version_line)
if full_version_line.as_slice().trim().len() > 0 => {
let full_version_line = full_version_line.as_slice().trim();
let re = Regex::new(r"[Ll][Ll][Dd][Bb]-([0-9]+)").unwrap();
match re.captures(full_version_line) {
Some(captures) => {
Some(captures.at(1).to_string())
}
None => {
println!("Could not extract LLDB version from line '{}'",
full_version_line);
None
}
}
},
_ => None
}
}
| {
debug!("making tests from {}",
config.src_base.display());
let mut tests = Vec::new();
let dirs = fs::readdir(&config.src_base).unwrap();
for file in dirs.iter() {
let file = file.clone();
debug!("inspecting file {}", file.display());
if is_test(config, &file) {
let t = make_test(config, &file, || {
match config.mode {
Codegen => make_metrics_test_closure(config, &file),
_ => make_test_closure(config, &file)
}
});
tests.push(t)
}
}
tests
} |
redisbackend.py | import os
import sys
import time
import hashlib
import zlib
import random
import string
import subprocess as sb
import redis
import json
from collections import Counter
digestsize = 20
class RedisDataStore:
def __init__(self, loc, db=0):
self.conn = redis.StrictRedis(loc, db=db)
def post_experiment(self, jobhash, N, params):
"""
Sets (in order) the:
jobs:githashes
params:sources
experiments:times
then adds experiments to jobs:new
N: number of repeats requested
params: JSON param string
"""
r = self.conn
self.check_githash(jobhash)
if params.strip() == "" or params is None:
params = '{}'
# cleanedparams = yaml.dump(yaml.load(params)).strip()
print(params)
cleanedparams = json.dumps(json.loads(params)).strip()
cleanedparams = zlib.compress(cleanedparams)
paramhash = self.hash(cleanedparams)
exp = jobhash + '|' + paramhash
r.hset('params:sources', paramhash, cleanedparams)
r.hset('experiments:times', exp, r.time()[0])
r.lpush('jobs:new', *([exp]*N))
def check_githash(self, jobhash):
r = self.conn
if not os.path.exists('.git'):
return
githash = sb.check_output('git rev-parse HEAD'.split()).strip()
storedgithash = r.hget('jobs:githashes', jobhash)
if storedgithash is not None and githash != storedgithash:
print('ERROR: This jobfile has already been run ' +
'under a different version of the code.')
sys.exit(-1)
# githash = githash + ' + ' + storedgithash
r.hset('jobs:githashes', jobhash, githash)
def post_jobfile(self, source, desc):
"""
Posts job in jobs:sources
source: path to source or [partial] existing hash
desc: string description saved to jobs:descs
"""
r = self.conn
jobhash = self.get_jobhash(source)
if r.hexists('jobs:sources', jobhash):
print("WARNING: This jobfile has already been submitted.\n" +
"Modifying file and resubmitting.")
N = 12
rstr = "\n#" + ''.join(
random.choice(string.ascii_uppercase +
string.digits) for x in range(N))
if not os.path.exists(source):
print("ERROR: Cannot change source {} quiting.".format(source))
sys.exit(-1)
sb.check_call('echo "{}" >> {}'.format(rstr, source), shell=True)
jobhash = self.get_jobhash(source)
r.hset('jobs:sources', jobhash, self.get_jobfile_disk(source))
r.hset('jobs:descs', jobhash, desc)
r.hset('jobs:times', jobhash, r.time()[0])
print "Posted hash: %s" % jobhash[:8]
#if not os.path.exists('.exps'):
#os.makedirs('.exps')
#newfile = os.path.join('.exps', jobhash+'.py')
#if not os.path.exists(newfile):
#with open(newfile,'w') as fid:
#fid.write(zlib.decompress(self.get_jobfile(source)))
return jobhash
def describe_jobfile(self, source, desc):
""" Describes job in jobs:descs:<hash>
Needs r: redis object
source: path to source or [partial] existing hash
desc: short textual description.
"""
r = self.conn
jobhash = self.get_jobhash(source)
if r.hexists('jobs:descs', jobhash):
old_desc = r.hget('jobs:descs', jobhash)
if desc != old_desc:
print("Warning: This job already has description:")
cont = raw_input("Would you like to override? [y/n]: ")
if cont.upper().strip()[0] == 'Y':
print("Overwriting.")
else:
print("Exiting.")
sys.exit(0)
r.hset('jobs:descs', jobhash, desc)
def get_description(self, jobhash):
""" Gets job description in jobs:descs:<hash> """
return self.conn.hget('jobs:descs', jobhash)
def get_jobfile_disk(self, val):
""" Returns compressed source from file path"""
if os.path.exists(val):
with open(val,'r') as fid:
return zlib.compress(fid.read())
sys.exit('Could not find valid source that began with hash %s' % val)
def get_jobfile_db(self, val):
""" Returns compressed source from (partial) hash"""
r = self.conn
if len(val) == digestsize:
return r.hget('jobs:sources', val)
for h in r.hkeys('jobs:sources'):
if h.startswith(val):
return r.hget('jobs:sources', h)
sys.exit('Could not find valid source that began with hash %s' % val)
def get_jobhash(self, val):
""" Returns hash from file path or (partial) hash"""
if len(val) == digestsize and val.isalnum():
return val
if os.path.exists(val):
with open(val,'r') as fid:
return self.hash(fid.read())
r = self.conn
for h in r.hkeys('jobs:sources'):
if h.startswith(val):
return h
sys.exit('Could not find valid hash that began with hash %s' % val)
def get_params(self, phash):
""" Returns value of the parameter hash from params:sources """
return zlib.decompress(self.conn.hget('params:sources', phash))
def hash(self, data):
return hashlib.sha1(data).hexdigest()
def kill_workers(self):
r = self.conn
if r.zcard('workers:hb') == 0:
print 'No living clients to kill.'
sys.exit(0)
assert not r.exists('workers:stop')
r.set('workers:stop','ALL')
print('Waiting for all workers to stop...')
try:
num = r.zcard('workers:hb')
while num > 0:
print("...%d workers remaining." % num)
time.sleep(1)
num = r.zcard('workers:hb')
print("All workers stopped.")
except KeyboardInterrupt:
print("Stopping")
finally:
r.delete('workers:stop')
def job_status(self, argv):
r = self.conn
if len(argv) == 3:
verbose=True
else:
verbose=False
new = r.llen('jobs:new') or '0'
working = r.llen('jobs:working') or '0'
done = r.get('jobs:numdone') or '0'
failed = r.get('jobs:failed') or '0'
if not verbose:
print("\t%s jobs pending\n\t%s running\n\t%s completed\n\t%s failed"%
(new, working, done, failed))
else:
print("Pending jobs (%s):" % new)
joblist = r.lrange('jobs:new', 0, -1)
jobcounts = Counter(joblist)
for h,count in jobcounts.iteritems():
print('\t%4d: %s' % (count, h[:8]))
print("\nIn-progress jobs (%s):"% working)
joblist = r.lrange('jobs:working', 0, -1)
jobcounts = Counter(joblist)
for h,count in jobcounts.iteritems():
print('\t%4d: %s' % (count, h[:8]))
print("\nDone jobs (%s)" % done)
#keys = r.keys('jobs:done:*')
#for k in sorted(keys):
#print('\t%4s: %s' % (r.llen(k),k.split(':')[-1][:8]))
print("\nFailed jobs (%s)" % failed)
def worker_status(self, argv):
r = self.conn
clients = r.zrevrange('workers:hb', 0, -1)
num = len(clients)
if len(argv) == 3:
verbose=True
else:
verbose=False
if num == 0:
print('There are currently no clients alive.')
elif not verbose:
print("There are %d clients alive." % num)
else:
print("The %d clients alive are:" % num)
curr_time = r.time()
for x in clients:
cl = x #js.loads(zlib.decompress(x))
print '\t{0:<15} with hb {1:3.1f} seconds ago'\
.format(cl, curr_time[0] + (curr_time[1]*1e-6) - int(r.zscore('workers:hb',x)))
def select_jobfile(self, sel=None, fullhashes=False):
return self.select_jobfiles(sel, fullhashes)[0]
def select_jobfiles(self, sel=None, fullhashes=False):
r = self.conn
hashes = sorted(r.hkeys('jobs:sources'), key=lambda x: int(r.hget('jobs:times', x) or '0'))
if sel is None:
for i, d in enumerate(hashes):
desc = r.hget('jobs:descs', d) or ''
if fullhashes:
print "%4d. %s %s" % (i, d, desc)
else:
print "%4d. %s %s" % (i, d[:5], desc)
sel = raw_input("Choose a dataset or range of datasets or 'q' to exit: ")
sel = [x.strip() for x in sel.split('-')]
if len(sel) == 1:
if not sel[0].isdigit() or int(sel[0]) not in range(i+1):
sys.exit()
a = b = int(sel[0])
else:
a,b = int(sel[0]), int(sel[1])
else:
a,b = sel, sel
return [hashes[i] for i in range(a,b+1)]
def clean_jobfiles(self):
for res in self.select_jobfiles():
self.conn.hdel('jobs:descs', res)
self.conn.hdel('jobs:sources', res)
self.conn.hdel('jobs:times', res)
self.conn.hdel('jobs:githashes', res)
def gc(self):
r = self.conn
r.delete('jobs:failed')
r.delete('jobs:numdone')
clients = r.zrevrange('workers:hb', 0, -1)
num = len(clients)
if num == 0:
r.delete('jobs:working')
print("Done!")
def push_heartbeat(self, idstring):
self.conn.zadd('workers:hb', self.conn.time()[0], idstring)
def remove_heartbeat(self, idstring):
self.conn.zrem('workers:hb', idstring)
def query_stop(self, host):
|
def remove_working_job(self, exp):
self.conn.lrem('jobs:working', 1, exp)
def reload_working_job(self, exp):
self.conn.lrem('jobs:working', 1, exp)
if exp is not None:
self.conn.lpush('jobs:new', exp)
def poll_work(self):
return self.conn.rpoplpush('jobs:new', 'jobs:working')
def job_fail(self):
self.conn.incr('jobs:failed')
def job_succeed(self):
self.conn.incr('jobs:numdone')
| cmd = self.conn.get('workers:stop')
if cmd == 'ALL' or cmd == host:
return True
else:
return False |
main.go | // ubuntu@ubuntu-VirtualBox:~$ curl -v http://localhost:3333/
// * Trying 127.0.0.1...
// * TCP_NODELAY set
// * Connected to localhost (127.0.0.1) port 3333 (#0)
// > GET / HTTP/1.1
// > Host: localhost:3333
// > User-Agent: curl/7.58.0
// > Accept: */*
// >
// < HTTP/1.1 200 OK
// < Date: Tue, 19 Jan 2021 10:19:38 GMT
// < Content-Length: 17
// < Content-Type: text/plain; charset=utf-8
// <
// * Connection #0 to host localhost left intact
// welcome anonymous
// ubuntu@ubuntu-VirtualBox:~$ curl -v http://localhost:3333/admin/adminReadOnly
// * Trying 127.0.0.1...
// * TCP_NODELAY set
// * Connected to localhost (127.0.0.1) port 3333 (#0)
// > GET /admin/adminReadOnly HTTP/1.1
// > Host: localhost:3333
// > User-Agent: curl/7.58.0
// > Accept: */*
// >
// < HTTP/1.1 401 Unauthorized
// < Content-Type: text/plain; charset=utf-8
// < X-Content-Type-Options: nosniff
// < Date: Tue, 19 Jan 2021 10:29:57 GMT
// < Content-Length: 13
// <
// Unauthorized
// * Connection #0 to host localhost left intact
// ubuntu@ubuntu-VirtualBox:~$ curl -H"Authorization: BEARER eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1aWQiOiIxMjMiLCJuYW1lIjoiTWlrZSBKU09OIiwicm9sZXMiOlsiVVNFUiIsIkFETUlOX1JFQURfT05MWSJdLCJleHAiOjE2MTEwNTI3NzQsImlhdCI6MTYxMTA1MjE3NH0.MD8npUtLNC88sEc8TobfGTKiIaovlQkmgqYheXZrhJE" -v http://localhost:3333/admin/adminReadOnly
// * Trying 127.0.0.1...
// * TCP_NODELAY set
// * Connected to localhost (127.0.0.1) port 3333 (#0)
// > GET /admin/adminReadOnly HTTP/1.1
// > Host: localhost:3333
// > User-Agent: curl/7.58.0
// > Accept: */*
// > Authorization: BEARER eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1aWQiOiIxMjMiLCJuYW1lIjoiTWlrZSBKU09OIiwicm9sZXMiOlsiVVNFUiIsIkFETUlOX1JFQURfT05MWSJdLCJleHAiOjE2MTEwNTI3NzQsImlhdCI6MTYxMTA1MjE3NH0.MD8npUtLNC88sEc8TobfGTKiIaovlQkmgqYheXZrhJE
// >
// < HTTP/1.1 200 OK
// < Date: Tue, 19 Jan 2021 10:31:36 GMT
// < Content-Length: 40
// < Content-Type: text/plain; charset=utf-8
// <
// * Connection #0 to host localhost left intact
// protected area - read only admin. hi 123
package main
import (
"fmt"
"net/http"
"log"
"os"
"crypto/x509"
"encoding/pem"
"time"
"github.com/dgrijalva/jwt-go"
jwtauth "github.com/distributed-go/go-toolkit/authentication"
"github.com/gorilla/mux"
)
var (
tokenAuth jwtauth.JWTAuth
privateKeyRS256String = `-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBALxo3PCjFw4QjgOX06QCJIJBnXXNiEYwDLxxa5/7QyH6y77nCRQy
J3x3UwF9rUD0RCsp4sNdX5kOQ9PUyHyOtCUCAwEAAQJARjFLHtuj2zmPrwcBcjja
IS0Q3LKV8pA0LoCS+CdD+4QwCxeKFq0yEMZtMvcQOfqo9x9oAywFClMSlLRyl7ng
gQIhAOyerGbcdQxxwjwGpLS61Mprf4n2HzjwISg20cEEH1tfAiEAy9dXmgQpDPir
C6Q9QdLXpNgSB+o5CDqfor7TTyTCovsCIQDNCfpu795luDYN+dvD2JoIBfrwu9v2
ZO72f/pm/YGGlQIgUdRXyW9kH13wJFNBeBwxD27iBiVj0cbe8NFUONBUBmMCIQCN
jVK4eujt1lm/m60TlEhaWBC3p+3aPT2TqFPUigJ3RQ==
-----END RSA PRIVATE KEY-----
`
publicKeyRS256String = `-----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALxo3PCjFw4QjgOX06QCJIJBnXXNiEYw
DLxxa5/7QyH6y77nCRQyJ3x3UwF9rUD0RCsp4sNdX5kOQ9PUyHyOtCUCAwEAAQ==
-----END PUBLIC KEY-----
`
)
func init() {
privateKeyBlock, _ := pem.Decode([]byte(privateKeyRS256String))
privateKey, err := x509.ParsePKCS1PrivateKey(privateKeyBlock.Bytes)
if err != nil {
log.Fatalf(err.Error())
}
publicKeyBlock, _ := pem.Decode([]byte(publicKeyRS256String))
publicKey, err := x509.ParsePKIXPublicKey(publicKeyBlock.Bytes)
if err != nil {
log.Fatalf(err.Error())
}
tokenAuth = jwtauth.NewJWTAuth(jwtauth.Config{
JwtAuthAlgo: "RS256",
JwtParser: &jwt.Parser{},
SignKey: privateKey,
VerifyKey: publicKey,
JwtExpiry: time.Minute * 10,
JwtRefreshExpiry: time.Minute * 60,
})
// For debugging/example purposes, we generate and print
// a sample jwt token with claims `user_id:123` here:
accessToken, refreshToken, err := tokenAuth.GenTokenPair(&jwtauth.AppClaims{
UserID: "123",
Name: "Mike JSON",
Roles: []jwtauth.Role{jwtauth.Role("USER"), jwtauth.Role("ADMIN_READ_ONLY")},
}, &jwtauth.RefreshClaims{
UserID: "123",
Roles: []jwtauth.Role{jwtauth.Role("USER"), jwtauth.Role("ADMIN_READ_ONLY")},
})
if err != nil {
fmt.Println("ERROR: ", err)
os.Exit(1)
}
fmt.Printf("AccessToken: %s \nRefreshToken: %s \n", accessToken, refreshToken)
}
func main() |
func router() http.Handler {
r := mux.NewRouter()
// =========== Public routes ===========
r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("welcome anonymous"))
}).Methods("GET")
// =========== Protected routes with USER ROLES ===========
subrouterAdmin := r.PathPrefix("/admin").Subrouter()
// Seek, verify and validate JWT tokens
subrouterAdmin.Use(tokenAuth.Verify())
// Handle valid / invalid tokens. In this example, we use
// the provided authenticator middleware, but you can write your
// own very easily, look at the Authenticator method in jwtauth.go
// and tweak it, its not scary.
subrouterAdmin.Use(tokenAuth.Authenticate)
// This middleware checks if the token has the appropriate ROLE to access
// the resources. It will return 403 if given role is not present in the JWT Token
subrouterAdmin.Use(tokenAuth.RequiresRole(jwtauth.Role("ADMIN_READ_ONLY"))) // try changing role to something else
subrouterAdmin.HandleFunc("/adminReadOnly", func(w http.ResponseWriter, r *http.Request) {
_, claims, _ := tokenAuth.TokenFromContext(r.Context())
w.Write([]byte(fmt.Sprintf("protected area - read only admin. hi %v", claims["uid"])))
}).Methods("GET")
return r
}
| {
srv := &http.Server{
Handler: router(),
Addr: "127.0.0.1:3333",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
srv.ListenAndServe()
} |
State.ts | enum State {
NOT_STARTED,
STARTED, | FAILED
}
export default State | FINISHED, |
ultrahd.py | # -*- coding: utf-8 -*-
'''
Covenant Add-on
| the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['ultrahdindir.com']
self.base_link = 'http://ultrahdindir.com'
self.post_link = '/index.php?do=search'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title'].replace(':','').lower()
year = data['year']
query = '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.post_link)
post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query)
r = client.request(url, post=post)
r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i]
r = [(i[0].attrs['href'], i[0].content) for i in r if i]
hostDict = hostprDict + hostDict
for item in r:
try:
name = item[1]
y = re.findall('\((\d{4})\)', name)[0]
if not y == year: raise Exception()
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name)
s = s[0] if s else '0'
data = client.request(item[0])
data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'})
data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
data[0].content, re.DOTALL)
u = [(i[0], i[1], s) for i in data if i]
for name, url, size in u:
try:
if '4K' in name:
quality = '4K'
elif '1080p' in name:
quality = '1080p'
elif '720p' in name:
quality = '720p'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in name):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts']
for i in name):
quality = 'CAM'
else: quality = '720p'
info = []
if '3D' in name or '.3D.' in url: info.append('3D'); quality = '1080p'
if any(i in ['hevc', 'h265', 'x265'] for i in name): info.append('HEVC')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', size)[-1]
div = 1 if size.endswith(('Gb', 'GiB', 'GB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except:
pass
info = ' | '.join(info)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']):continue
if 'ftp' in url: host = 'COV'; direct = True;
else: direct = False; host= 'turbobit.net'
#if not host in hostDict: continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en',
'url': url, 'info': info, 'direct': direct, 'debridonly': True})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url | This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
|
half_ack_bolt.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''half ack bolt'''
from heronpy.api.bolt.bolt import Bolt
class HalfAckBolt(Bolt):
"""Half of data tuples will be acked and the other half will be failed"""
# pylint: disable=unused-argument
def initialize(self, config, context):
self.total = 0
def | (self, tup):
self.total += 1
if self.total % 2 == 0:
self.logger.debug("Failing a tuple: %s" % str(tup))
self.fail(tup)
else:
self.logger.debug("Acking a tuple: %s" % str(tup))
self.ack(tup)
def process_tick(self, tup):
self.log("Got tick tuple!")
self.log("Total received: %d" % self.total)
| process |
address.rs | // Rust test file autogenerated with cargo build (src/build_spectests.rs).
// Please do NOT modify it by hand, as it will be reseted on next build.
// Test based on spectests/address.wast
#![allow(
warnings,
dead_code
)]
use std::panic;
use wabt::wat2wasm;
use crate::webassembly::{instantiate, compile, ImportObject, ResultObject, Instance, Export};
use super::_common::{
spectest_importobject,
NaNCheck,
};
// Line 3
fn create_module_1() -> ResultObject {
let module_str = "(module
(type (;0;) (func (param i32) (result i32)))
(type (;1;) (func (param i32)))
(func (;0;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_u)
(func (;1;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_u)
(func (;2;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_u offset=1)
(func (;3;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_u offset=2)
(func (;4;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_u offset=25)
(func (;5;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_s)
(func (;6;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_s)
(func (;7;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_s offset=1)
(func (;8;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_s offset=2)
(func (;9;) (type 0) (param i32) (result i32)
get_local 0
i32.load8_s offset=25)
(func (;10;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_u)
(func (;11;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_u align=1)
(func (;12;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_u offset=1 align=1)
(func (;13;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_u offset=2)
(func (;14;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_u offset=25)
(func (;15;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_s)
(func (;16;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_s align=1)
(func (;17;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_s offset=1 align=1)
(func (;18;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_s offset=2)
(func (;19;) (type 0) (param i32) (result i32)
get_local 0
i32.load16_s offset=25)
(func (;20;) (type 0) (param i32) (result i32)
get_local 0
i32.load)
(func (;21;) (type 0) (param i32) (result i32)
get_local 0
i32.load align=1)
(func (;22;) (type 0) (param i32) (result i32)
get_local 0
i32.load offset=1 align=1)
(func (;23;) (type 0) (param i32) (result i32)
get_local 0
i32.load offset=2 align=2)
(func (;24;) (type 0) (param i32) (result i32)
get_local 0
i32.load offset=25)
(func (;25;) (type 1) (param i32)
get_local 0
i32.load8_u offset=4294967295
drop)
(func (;26;) (type 1) (param i32)
get_local 0
i32.load8_s offset=4294967295
drop)
(func (;27;) (type 1) (param i32)
get_local 0
i32.load16_u offset=4294967295
drop)
(func (;28;) (type 1) (param i32)
get_local 0
i32.load16_s offset=4294967295
drop)
(func (;29;) (type 1) (param i32)
get_local 0
i32.load offset=4294967295
drop)
(memory (;0;) 1)
(export \"8u_good1\" (func 0))
(export \"8u_good2\" (func 1))
(export \"8u_good3\" (func 2))
(export \"8u_good4\" (func 3))
(export \"8u_good5\" (func 4))
(export \"8s_good1\" (func 5))
(export \"8s_good2\" (func 6))
(export \"8s_good3\" (func 7))
(export \"8s_good4\" (func 8))
(export \"8s_good5\" (func 9))
(export \"16u_good1\" (func 10))
(export \"16u_good2\" (func 11))
(export \"16u_good3\" (func 12))
(export \"16u_good4\" (func 13))
(export \"16u_good5\" (func 14))
(export \"16s_good1\" (func 15))
(export \"16s_good2\" (func 16))
(export \"16s_good3\" (func 17))
(export \"16s_good4\" (func 18))
(export \"16s_good5\" (func 19))
(export \"32_good1\" (func 20))
(export \"32_good2\" (func 21))
(export \"32_good3\" (func 22))
(export \"32_good4\" (func 23))
(export \"32_good5\" (func 24))
(export \"8u_bad\" (func 25))
(export \"8s_bad\" (func 26))
(export \"16u_bad\" (func 27))
(export \"16s_bad\" (func 28))
(export \"32_bad\" (func 29))
(data (;0;) (i32.const 0) \"abcdefghijklmnopqrstuvwxyz\"))
";
let wasm_binary = wat2wasm(module_str.as_bytes()).expect("WAST not valid or malformed");
instantiate(wasm_binary, spectest_importobject()).expect("WASM can't be instantiated")
}
fn start_module_1(result_object: &ResultObject) {
result_object.instance.start();
}
// Line 104
fn c1_l104_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c1_l104_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i32);
}
// Line 105
fn c2_l105_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c2_l105_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i32);
}
// Line 106
fn c3_l106_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c3_l106_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 98 as i32);
}
// Line 107
fn c4_l107_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c4_l107_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 99 as i32);
}
// Line 108
fn c5_l108_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c5_l108_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i32);
}
// Line 110
fn c6_l110_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c6_l110_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i32);
}
// Line 111
fn c7_l111_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c7_l111_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i32);
}
// Line 112
fn c8_l112_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c8_l112_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 98 as i32);
}
// Line 113
fn c9_l113_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c9_l113_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 99 as i32);
}
// Line 114
fn c10_l114_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c10_l114_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i32);
}
// Line 116
fn c11_l116_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c11_l116_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i32);
}
// Line 117
fn c12_l117_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c12_l117_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i32);
}
// Line 118
fn c13_l118_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c13_l118_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25442 as i32);
}
// Line 119
fn c14_l119_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c14_l119_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25699 as i32);
}
// Line 120
fn c15_l120_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c15_l120_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i32);
}
// Line 122
fn c16_l122_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c16_l122_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i32);
}
// Line 123
fn c17_l123_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c17_l123_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i32);
}
// Line 124
fn c18_l124_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c18_l124_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25442 as i32);
}
// Line 125
fn c19_l125_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c19_l125_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25699 as i32);
}
// Line 126
fn c20_l126_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c20_l126_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i32);
}
// Line 128
fn c21_l128_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c21_l128_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1684234849 as i32);
}
// Line 129
fn c22_l129_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c22_l129_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1684234849 as i32);
}
// Line 130
fn c23_l130_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c23_l130_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1701077858 as i32);
}
// Line 131
fn c24_l131_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c24_l131_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1717920867 as i32);
}
// Line 132
fn c25_l132_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c25_l132_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i32);
}
// Line 134
fn c26_l134_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c26_l134_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 135
fn c27_l135_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c27_l135_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 136
fn c28_l136_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c28_l136_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 137
fn c29_l137_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c29_l137_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 138
fn c30_l138_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c30_l138_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 140
fn c31_l140_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c31_l140_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 141
fn c32_l141_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c32_l141_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 142
fn c33_l142_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c33_l142_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 143
fn c34_l143_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c34_l143_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 144
fn c35_l144_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c35_l144_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 146
fn c36_l146_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c36_l146_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 147
fn c37_l147_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c37_l147_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 148
fn c38_l148_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c38_l148_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 149
fn c39_l149_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c39_l149_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 150
fn c40_l150_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c40_l150_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 152
fn c41_l152_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c41_l152_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 153
fn c42_l153_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c42_l153_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 154
fn c43_l154_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c43_l154_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 155
fn c44_l155_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c44_l155_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 156
fn c45_l156_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c45_l156_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 158
fn c46_l158_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c46_l158_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 159
fn c47_l159_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c47_l159_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 160
fn c48_l160_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c48_l160_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 161
fn c49_l161_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c49_l161_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 162
fn c50_l162_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c50_l162_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65507 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 164
fn c51_l164_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c51_l164_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 165
fn c52_l165_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c52_l165_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 166
fn c53_l166_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c53_l166_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 167
fn c54_l167_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c54_l167_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 168
fn c55_l168_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c55_l168_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 170
fn c56_l170_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c56_l170_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 171
fn c57_l171_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c57_l171_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 172
fn c58_l172_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c58_l172_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 173
fn c59_l173_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c59_l173_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 174
fn c60_l174_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c60_l174_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 176
fn c61_l176_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c61_l176_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 177
fn c62_l177_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c62_l177_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 178
fn c63_l178_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c63_l178_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 179
fn c64_l179_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c64_l179_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 180
fn c65_l180_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c65_l180_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 182
fn c66_l182_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c66_l182_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 183
fn c67_l183_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c67_l183_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 184
fn c68_l184_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c68_l184_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 185
fn c69_l185_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c69_l185_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 186
fn c70_l186_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c70_l186_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 188
fn c71_l188_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c71_l188_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 189
fn c72_l189_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c72_l189_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 190
fn c73_l190_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c73_l190_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 191
fn c74_l191_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c74_l191_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
assert_eq!(result, 0 as i32);
}
// Line 192
fn c75_l192_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c75_l192_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65508 as i32, &result_object.instance);
}
#[test]
fn c75_l192_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c75_l192_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 194
fn c76_l194_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c76_l194_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c76_l194_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c76_l194_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 195
fn c77_l195_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c77_l195_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c77_l195_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c77_l195_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 196
fn c78_l196_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c78_l196_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c78_l196_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c78_l196_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 197
fn c79_l197_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c79_l197_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c79_l197_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c79_l197_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 198
fn c80_l198_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c80_l198_action_invoke");
let func_index = match result_object.module.info.exports.get("32_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c80_l198_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c80_l198_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 200
fn c81_l200_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c81_l200_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c81_l200_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c81_l200_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 201
fn c82_l201_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c82_l201_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c82_l201_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c82_l201_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 202
fn c83_l202_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c83_l202_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c83_l202_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c83_l202_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 203
fn c84_l203_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c84_l203_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c84_l203_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c84_l203_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 204
fn c85_l204_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c85_l204_action_invoke");
let func_index = match result_object.module.info.exports.get("32_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c85_l204_assert_trap() {
let result_object = create_module_1();
let result = panic::catch_unwind(|| {
c85_l204_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 207
#[test]
fn c86_l207_assert_malformed() {
let wasm_binary = [40, 109, 101, 109, 111, 114, 121, 32, 49, 41, 40, 102, 117, 110, 99, 32, 40, 100, 114, 111, 112, 32, 40, 105, 51, 50, 46, 108, 111, 97, 100, 32, 111, 102, 102, 115, 101, 116, 61, 52, 50, 57, 52, 57, 54, 55, 50, 57, 54, 32, 40, 105, 51, 50, 46, 99, 111, 110, 115, 116, 32, 48, 41, 41, 41, 41];
let compilation = compile(wasm_binary.to_vec());
assert!(compilation.is_err(), "WASM should not compile as is malformed");
}
// Line 216
#[test]
fn test_module_1() {
let result_object = create_module_1();
// We group the calls together
start_module_1(&result_object);
c1_l104_action_invoke(&result_object);
c2_l105_action_invoke(&result_object);
c3_l106_action_invoke(&result_object);
c4_l107_action_invoke(&result_object);
c5_l108_action_invoke(&result_object);
c6_l110_action_invoke(&result_object);
c7_l111_action_invoke(&result_object);
c8_l112_action_invoke(&result_object);
c9_l113_action_invoke(&result_object);
c10_l114_action_invoke(&result_object);
c11_l116_action_invoke(&result_object);
c12_l117_action_invoke(&result_object);
c13_l118_action_invoke(&result_object);
c14_l119_action_invoke(&result_object);
c15_l120_action_invoke(&result_object);
c16_l122_action_invoke(&result_object);
c17_l123_action_invoke(&result_object);
c18_l124_action_invoke(&result_object);
c19_l125_action_invoke(&result_object);
c20_l126_action_invoke(&result_object);
c21_l128_action_invoke(&result_object);
c22_l129_action_invoke(&result_object);
c23_l130_action_invoke(&result_object);
c24_l131_action_invoke(&result_object);
c25_l132_action_invoke(&result_object);
c26_l134_action_invoke(&result_object);
c27_l135_action_invoke(&result_object);
c28_l136_action_invoke(&result_object);
c29_l137_action_invoke(&result_object);
c30_l138_action_invoke(&result_object);
c31_l140_action_invoke(&result_object);
c32_l141_action_invoke(&result_object);
c33_l142_action_invoke(&result_object);
c34_l143_action_invoke(&result_object);
c35_l144_action_invoke(&result_object);
c36_l146_action_invoke(&result_object);
c37_l147_action_invoke(&result_object);
c38_l148_action_invoke(&result_object);
c39_l149_action_invoke(&result_object);
c40_l150_action_invoke(&result_object);
c41_l152_action_invoke(&result_object);
c42_l153_action_invoke(&result_object);
c43_l154_action_invoke(&result_object);
c44_l155_action_invoke(&result_object);
c45_l156_action_invoke(&result_object);
c46_l158_action_invoke(&result_object);
c47_l159_action_invoke(&result_object);
c48_l160_action_invoke(&result_object);
c49_l161_action_invoke(&result_object);
c50_l162_action_invoke(&result_object);
c51_l164_action_invoke(&result_object);
c52_l165_action_invoke(&result_object);
c53_l166_action_invoke(&result_object);
c54_l167_action_invoke(&result_object);
c55_l168_action_invoke(&result_object);
c56_l170_action_invoke(&result_object);
c57_l171_action_invoke(&result_object);
c58_l172_action_invoke(&result_object);
c59_l173_action_invoke(&result_object);
c60_l174_action_invoke(&result_object);
c61_l176_action_invoke(&result_object);
c62_l177_action_invoke(&result_object);
c63_l178_action_invoke(&result_object);
c64_l179_action_invoke(&result_object);
c65_l180_action_invoke(&result_object);
c66_l182_action_invoke(&result_object);
c67_l183_action_invoke(&result_object);
c68_l184_action_invoke(&result_object);
c69_l185_action_invoke(&result_object);
c70_l186_action_invoke(&result_object);
c71_l188_action_invoke(&result_object);
c72_l189_action_invoke(&result_object);
c73_l190_action_invoke(&result_object);
c74_l191_action_invoke(&result_object);
}
fn create_module_2() -> ResultObject {
let module_str = "(module
(type (;0;) (func (param i32) (result i64)))
(type (;1;) (func (param i32)))
(func (;0;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_u)
(func (;1;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_u)
(func (;2;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_u offset=1)
(func (;3;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_u offset=2)
(func (;4;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_u offset=25)
(func (;5;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_s)
(func (;6;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_s)
(func (;7;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_s offset=1)
(func (;8;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_s offset=2)
(func (;9;) (type 0) (param i32) (result i64)
get_local 0
i64.load8_s offset=25)
(func (;10;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_u)
(func (;11;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_u align=1)
(func (;12;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_u offset=1 align=1)
(func (;13;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_u offset=2)
(func (;14;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_u offset=25)
(func (;15;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_s)
(func (;16;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_s align=1)
(func (;17;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_s offset=1 align=1)
(func (;18;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_s offset=2)
(func (;19;) (type 0) (param i32) (result i64)
get_local 0
i64.load16_s offset=25)
(func (;20;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_u)
(func (;21;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_u align=1)
(func (;22;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_u offset=1 align=1)
(func (;23;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_u offset=2 align=2)
(func (;24;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_u offset=25)
(func (;25;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_s)
(func (;26;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_s align=1)
(func (;27;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_s offset=1 align=1)
(func (;28;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_s offset=2 align=2)
(func (;29;) (type 0) (param i32) (result i64)
get_local 0
i64.load32_s offset=25)
(func (;30;) (type 0) (param i32) (result i64)
get_local 0
i64.load)
(func (;31;) (type 0) (param i32) (result i64)
get_local 0
i64.load align=1)
(func (;32;) (type 0) (param i32) (result i64)
get_local 0
i64.load offset=1 align=1)
(func (;33;) (type 0) (param i32) (result i64)
get_local 0
i64.load offset=2 align=2)
(func (;34;) (type 0) (param i32) (result i64)
get_local 0
i64.load offset=25)
(func (;35;) (type 1) (param i32)
get_local 0
i64.load8_u offset=4294967295
drop)
(func (;36;) (type 1) (param i32)
get_local 0
i64.load8_s offset=4294967295
drop)
(func (;37;) (type 1) (param i32)
get_local 0
i64.load16_u offset=4294967295
drop)
(func (;38;) (type 1) (param i32)
get_local 0
i64.load16_s offset=4294967295
drop)
(func (;39;) (type 1) (param i32)
get_local 0
i64.load32_u offset=4294967295
drop)
(func (;40;) (type 1) (param i32)
get_local 0
i64.load32_s offset=4294967295
drop)
(func (;41;) (type 1) (param i32)
get_local 0
i64.load offset=4294967295
drop)
(memory (;0;) 1)
(export \"8u_good1\" (func 0))
(export \"8u_good2\" (func 1))
(export \"8u_good3\" (func 2))
(export \"8u_good4\" (func 3))
(export \"8u_good5\" (func 4))
(export \"8s_good1\" (func 5))
(export \"8s_good2\" (func 6))
(export \"8s_good3\" (func 7))
(export \"8s_good4\" (func 8))
(export \"8s_good5\" (func 9))
(export \"16u_good1\" (func 10))
(export \"16u_good2\" (func 11))
(export \"16u_good3\" (func 12))
(export \"16u_good4\" (func 13))
(export \"16u_good5\" (func 14))
(export \"16s_good1\" (func 15))
(export \"16s_good2\" (func 16))
(export \"16s_good3\" (func 17))
(export \"16s_good4\" (func 18))
(export \"16s_good5\" (func 19))
(export \"32u_good1\" (func 20))
(export \"32u_good2\" (func 21))
(export \"32u_good3\" (func 22))
(export \"32u_good4\" (func 23))
(export \"32u_good5\" (func 24))
(export \"32s_good1\" (func 25))
(export \"32s_good2\" (func 26))
(export \"32s_good3\" (func 27))
(export \"32s_good4\" (func 28))
(export \"32s_good5\" (func 29))
(export \"64_good1\" (func 30))
(export \"64_good2\" (func 31))
(export \"64_good3\" (func 32))
(export \"64_good4\" (func 33))
(export \"64_good5\" (func 34))
(export \"8u_bad\" (func 35))
(export \"8s_bad\" (func 36))
(export \"16u_bad\" (func 37))
(export \"16s_bad\" (func 38))
(export \"32u_bad\" (func 39))
(export \"32s_bad\" (func 40))
(export \"64_bad\" (func 41))
(data (;0;) (i32.const 0) \"abcdefghijklmnopqrstuvwxyz\"))
";
let wasm_binary = wat2wasm(module_str.as_bytes()).expect("WAST not valid or malformed");
instantiate(wasm_binary, spectest_importobject()).expect("WASM can't be instantiated")
}
fn start_module_2(result_object: &ResultObject) {
result_object.instance.start();
}
// Line 355
fn c88_l355_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c88_l355_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i64);
}
// Line 356
fn c89_l356_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c89_l356_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i64);
}
// Line 357
fn c90_l357_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c90_l357_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 98 as i64);
}
// Line 358
fn c91_l358_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c91_l358_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 99 as i64);
}
// Line 359
fn c92_l359_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c92_l359_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i64);
}
// Line 361
fn c93_l361_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c93_l361_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i64);
}
// Line 362
fn c94_l362_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c94_l362_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 97 as i64);
}
// Line 363
fn c95_l363_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c95_l363_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 98 as i64);
}
// Line 364
fn c96_l364_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c96_l364_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 99 as i64);
}
// Line 365
fn c97_l365_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c97_l365_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i64);
}
// Line 367
fn c98_l367_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c98_l367_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i64);
}
// Line 368
fn c99_l368_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c99_l368_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i64);
}
// Line 369
fn c100_l369_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c100_l369_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25442 as i64);
}
// Line 370
fn c101_l370_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c101_l370_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25699 as i64);
}
// Line 371
fn c102_l371_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c102_l371_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i64);
}
// Line 373
fn c103_l373_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c103_l373_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i64);
}
// Line 374
fn c104_l374_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c104_l374_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25185 as i64);
}
// Line 375
fn c105_l375_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c105_l375_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25442 as i64);
}
// Line 376
fn c106_l376_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c106_l376_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 25699 as i64);
}
// Line 377
fn c107_l377_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c107_l377_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i64);
}
// Line 379
fn c108_l379_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c108_l379_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1684234849 as i64);
}
// Line 380
fn c109_l380_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c109_l380_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1684234849 as i64);
}
// Line 381
fn c110_l381_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c110_l381_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1701077858 as i64);
}
// Line 382
fn c111_l382_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c111_l382_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1717920867 as i64);
}
// Line 383
fn c112_l383_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c112_l383_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i64);
}
// Line 385
fn c113_l385_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c113_l385_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1684234849 as i64);
}
// Line 386
fn c114_l386_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c114_l386_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1684234849 as i64);
}
// Line 387
fn c115_l387_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c115_l387_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1701077858 as i64);
}
// Line 388
fn c116_l388_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c116_l388_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 1717920867 as i64);
}
// Line 389
fn c117_l389_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c117_l389_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i64);
}
// Line 391
fn c118_l391_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c118_l391_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 7523094288207667809 as i64);
}
// Line 392
fn c119_l392_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c119_l392_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 7523094288207667809 as i64);
}
// Line 393
fn c120_l393_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c120_l393_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 7595434461045744482 as i64);
}
// Line 394
fn c121_l394_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c121_l394_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 7667774633883821155 as i64);
}
// Line 395
fn c122_l395_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c122_l395_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 122 as i64);
}
// Line 397
fn c123_l397_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c123_l397_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 398
fn c124_l398_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c124_l398_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 399
fn c125_l399_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c125_l399_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 400
fn c126_l400_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c126_l400_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 401
fn c127_l401_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c127_l401_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 403
fn c128_l403_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c128_l403_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 404
fn c129_l404_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c129_l404_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 405
fn c130_l405_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c130_l405_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 406
fn c131_l406_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c131_l406_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 407
fn c132_l407_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c132_l407_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 409
fn c133_l409_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c133_l409_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 410
fn c134_l410_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c134_l410_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 411
fn c135_l411_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c135_l411_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 412
fn c136_l412_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c136_l412_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 413
fn c137_l413_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c137_l413_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 415
fn c138_l415_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c138_l415_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 416
fn c139_l416_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c139_l416_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 417
fn c140_l417_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c140_l417_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 418
fn c141_l418_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c141_l418_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 419
fn c142_l419_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c142_l419_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 421
fn c143_l421_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c143_l421_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 422
fn c144_l422_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c144_l422_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 423
fn c145_l423_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c145_l423_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 424
fn c146_l424_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c146_l424_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 425
fn c147_l425_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c147_l425_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 427
fn c148_l427_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c148_l427_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 428
fn c149_l428_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c149_l428_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 429
fn c150_l429_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c150_l429_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 430
fn c151_l430_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c151_l430_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 431
fn c152_l431_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c152_l431_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 433
fn c153_l433_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c153_l433_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 434
fn c154_l434_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c154_l434_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 435
fn c155_l435_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c155_l435_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 436
fn c156_l436_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c156_l436_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 437
fn c157_l437_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c157_l437_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65503 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 439
fn c158_l439_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c158_l439_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 440
fn c159_l440_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c159_l440_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 441
fn c160_l441_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c160_l441_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 442
fn c161_l442_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c161_l442_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 443
fn c162_l443_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c162_l443_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 445
fn c163_l445_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c163_l445_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 446
fn c164_l446_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c164_l446_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 447
fn c165_l447_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c165_l447_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 448
fn c166_l448_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c166_l448_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 449
fn c167_l449_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c167_l449_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 451
fn c168_l451_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c168_l451_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 452
fn c169_l452_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c169_l452_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 453
fn c170_l453_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c170_l453_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 454
fn c171_l454_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c171_l454_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 455
fn c172_l455_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c172_l455_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 457
fn c173_l457_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c173_l457_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 458
fn c174_l458_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c174_l458_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 459
fn c175_l459_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c175_l459_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 460
fn c176_l460_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c176_l460_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 461
fn c177_l461_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c177_l461_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 463
fn c178_l463_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c178_l463_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 464
fn c179_l464_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c179_l464_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 465
fn c180_l465_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c180_l465_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 466
fn c181_l466_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c181_l466_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 467
fn c182_l467_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c182_l467_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 469
fn c183_l469_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c183_l469_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 470
fn c184_l470_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c184_l470_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 471
fn c185_l471_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c185_l471_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 472
fn c186_l472_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c186_l472_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 473
fn c187_l473_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c187_l473_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 475
fn c188_l475_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c188_l475_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 476
fn c189_l476_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c189_l476_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 477
fn c190_l477_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c190_l477_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 478
fn c191_l478_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c191_l478_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> i64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
assert_eq!(result, 0 as i64);
}
// Line 479
fn c192_l479_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c192_l479_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65504 as i32, &result_object.instance);
}
#[test]
fn c192_l479_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c192_l479_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 481
fn c193_l481_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c193_l481_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c193_l481_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c193_l481_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 482
fn c194_l482_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c194_l482_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c194_l482_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c194_l482_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 483
fn c195_l483_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c195_l483_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c195_l483_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c195_l483_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 484
fn c196_l484_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c196_l484_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c196_l484_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c196_l484_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 485
fn c197_l485_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c197_l485_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c197_l485_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c197_l485_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 486
fn c198_l486_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c198_l486_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c198_l486_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c198_l486_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 487
fn c199_l487_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c199_l487_action_invoke");
let func_index = match result_object.module.info.exports.get("64_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c199_l487_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c199_l487_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 489
fn c200_l489_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c200_l489_action_invoke");
let func_index = match result_object.module.info.exports.get("8u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c200_l489_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c200_l489_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 490
fn c201_l490_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c201_l490_action_invoke");
let func_index = match result_object.module.info.exports.get("8s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c201_l490_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c201_l490_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 491
fn c202_l491_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c202_l491_action_invoke");
let func_index = match result_object.module.info.exports.get("16u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c202_l491_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c202_l491_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 492
fn c203_l492_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c203_l492_action_invoke");
let func_index = match result_object.module.info.exports.get("16s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c203_l492_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c203_l492_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 493
fn c204_l493_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c204_l493_action_invoke");
let func_index = match result_object.module.info.exports.get("32u_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c204_l493_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c204_l493_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 494
fn c205_l494_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c205_l494_action_invoke");
let func_index = match result_object.module.info.exports.get("32s_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c205_l494_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c205_l494_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 495
fn c206_l495_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c206_l495_action_invoke");
let func_index = match result_object.module.info.exports.get("64_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c206_l495_assert_trap() {
let result_object = create_module_2();
let result = panic::catch_unwind(|| {
c206_l495_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 499
#[test]
fn test_module_2() {
let result_object = create_module_2();
// We group the calls together
start_module_2(&result_object);
c88_l355_action_invoke(&result_object);
c89_l356_action_invoke(&result_object);
c90_l357_action_invoke(&result_object);
c91_l358_action_invoke(&result_object);
c92_l359_action_invoke(&result_object);
c93_l361_action_invoke(&result_object);
c94_l362_action_invoke(&result_object);
c95_l363_action_invoke(&result_object);
c96_l364_action_invoke(&result_object);
c97_l365_action_invoke(&result_object);
c98_l367_action_invoke(&result_object);
c99_l368_action_invoke(&result_object);
c100_l369_action_invoke(&result_object);
c101_l370_action_invoke(&result_object);
c102_l371_action_invoke(&result_object);
c103_l373_action_invoke(&result_object);
c104_l374_action_invoke(&result_object);
c105_l375_action_invoke(&result_object);
c106_l376_action_invoke(&result_object);
c107_l377_action_invoke(&result_object);
c108_l379_action_invoke(&result_object);
c109_l380_action_invoke(&result_object);
c110_l381_action_invoke(&result_object);
c111_l382_action_invoke(&result_object);
c112_l383_action_invoke(&result_object);
c113_l385_action_invoke(&result_object);
c114_l386_action_invoke(&result_object);
c115_l387_action_invoke(&result_object);
c116_l388_action_invoke(&result_object);
c117_l389_action_invoke(&result_object);
c118_l391_action_invoke(&result_object);
c119_l392_action_invoke(&result_object);
c120_l393_action_invoke(&result_object);
c121_l394_action_invoke(&result_object);
c122_l395_action_invoke(&result_object);
c123_l397_action_invoke(&result_object);
c124_l398_action_invoke(&result_object);
c125_l399_action_invoke(&result_object);
c126_l400_action_invoke(&result_object);
c127_l401_action_invoke(&result_object);
c128_l403_action_invoke(&result_object);
c129_l404_action_invoke(&result_object);
c130_l405_action_invoke(&result_object);
c131_l406_action_invoke(&result_object);
c132_l407_action_invoke(&result_object);
c133_l409_action_invoke(&result_object);
c134_l410_action_invoke(&result_object);
c135_l411_action_invoke(&result_object);
c136_l412_action_invoke(&result_object);
c137_l413_action_invoke(&result_object);
c138_l415_action_invoke(&result_object);
c139_l416_action_invoke(&result_object);
c140_l417_action_invoke(&result_object);
c141_l418_action_invoke(&result_object);
c142_l419_action_invoke(&result_object);
c143_l421_action_invoke(&result_object);
c144_l422_action_invoke(&result_object);
c145_l423_action_invoke(&result_object);
c146_l424_action_invoke(&result_object);
c147_l425_action_invoke(&result_object);
c148_l427_action_invoke(&result_object);
c149_l428_action_invoke(&result_object);
c150_l429_action_invoke(&result_object);
c151_l430_action_invoke(&result_object);
c152_l431_action_invoke(&result_object);
c153_l433_action_invoke(&result_object);
c154_l434_action_invoke(&result_object);
c155_l435_action_invoke(&result_object);
c156_l436_action_invoke(&result_object);
c157_l437_action_invoke(&result_object);
c158_l439_action_invoke(&result_object);
c159_l440_action_invoke(&result_object);
c160_l441_action_invoke(&result_object);
c161_l442_action_invoke(&result_object);
c162_l443_action_invoke(&result_object);
c163_l445_action_invoke(&result_object);
c164_l446_action_invoke(&result_object);
c165_l447_action_invoke(&result_object);
c166_l448_action_invoke(&result_object);
c167_l449_action_invoke(&result_object);
c168_l451_action_invoke(&result_object);
c169_l452_action_invoke(&result_object);
c170_l453_action_invoke(&result_object);
c171_l454_action_invoke(&result_object);
c172_l455_action_invoke(&result_object);
c173_l457_action_invoke(&result_object);
c174_l458_action_invoke(&result_object);
c175_l459_action_invoke(&result_object);
c176_l460_action_invoke(&result_object);
c177_l461_action_invoke(&result_object);
c178_l463_action_invoke(&result_object);
c179_l464_action_invoke(&result_object);
c180_l465_action_invoke(&result_object);
c181_l466_action_invoke(&result_object);
c182_l467_action_invoke(&result_object);
c183_l469_action_invoke(&result_object);
c184_l470_action_invoke(&result_object);
c185_l471_action_invoke(&result_object);
c186_l472_action_invoke(&result_object);
c187_l473_action_invoke(&result_object);
c188_l475_action_invoke(&result_object);
c189_l476_action_invoke(&result_object);
c190_l477_action_invoke(&result_object);
c191_l478_action_invoke(&result_object);
}
fn create_module_3() -> ResultObject {
let module_str = "(module
(type (;0;) (func (param i32) (result f32)))
(type (;1;) (func (param i32)))
(func (;0;) (type 0) (param i32) (result f32)
get_local 0
f32.load)
(func (;1;) (type 0) (param i32) (result f32)
get_local 0
f32.load align=1)
(func (;2;) (type 0) (param i32) (result f32)
get_local 0
f32.load offset=1 align=1)
(func (;3;) (type 0) (param i32) (result f32)
get_local 0
f32.load offset=2 align=2)
(func (;4;) (type 0) (param i32) (result f32)
get_local 0
f32.load offset=8)
(func (;5;) (type 1) (param i32)
get_local 0
f32.load offset=4294967295
drop)
(memory (;0;) 1)
(export \"32_good1\" (func 0))
(export \"32_good2\" (func 1))
(export \"32_good3\" (func 2))
(export \"32_good4\" (func 3))
(export \"32_good5\" (func 4))
(export \"32_bad\" (func 5))
(data (;0;) (i32.const 0) \"\\00\\00\\00\\00\\00\\00\\a0\\7f\\01\\00\\d0\\7f\"))
";
let wasm_binary = wat2wasm(module_str.as_bytes()).expect("WAST not valid or malformed");
instantiate(wasm_binary, spectest_importobject()).expect("WASM can't be instantiated")
}
fn start_module_3(result_object: &ResultObject) {
result_object.instance.start();
}
// Line 523
fn c208_l523_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c208_l523_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 524
fn c209_l524_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c209_l524_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 525
fn c210_l525_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c210_l525_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 526
fn c211_l526_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c211_l526_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 527
fn c212_l527_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c212_l527_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert!(result.is_nan());
assert_eq!(result.is_sign_positive(), (f32::from_bits(2144337921)).is_sign_positive());
}
// Line 529
fn c213_l529_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c213_l529_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65524 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 530
fn c214_l530_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c214_l530_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65524 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 531
fn c215_l531_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c215_l531_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65524 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 532
fn c216_l532_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c216_l532_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65524 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 533
fn c217_l533_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c217_l533_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65524 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 535
fn c218_l535_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c218_l535_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65525 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 536
fn c219_l536_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c219_l536_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65525 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 537
fn c220_l537_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c220_l537_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65525 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 538
fn c221_l538_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c221_l538_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f32 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65525 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f32);
}
// Line 539
fn c222_l539_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c222_l539_action_invoke");
let func_index = match result_object.module.info.exports.get("32_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65525 as i32, &result_object.instance);
}
#[test]
fn c222_l539_assert_trap() {
let result_object = create_module_3();
let result = panic::catch_unwind(|| {
c222_l539_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 541
fn c223_l541_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c223_l541_action_invoke");
let func_index = match result_object.module.info.exports.get("32_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c223_l541_assert_trap() {
let result_object = create_module_3();
let result = panic::catch_unwind(|| {
c223_l541_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 542
fn c224_l542_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c224_l542_action_invoke");
let func_index = match result_object.module.info.exports.get("32_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c224_l542_assert_trap() {
let result_object = create_module_3();
let result = panic::catch_unwind(|| {
c224_l542_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 546
#[test]
fn test_module_3() {
let result_object = create_module_3();
// We group the calls together
start_module_3(&result_object);
c208_l523_action_invoke(&result_object);
c209_l524_action_invoke(&result_object);
c210_l525_action_invoke(&result_object);
c211_l526_action_invoke(&result_object);
c212_l527_action_invoke(&result_object);
c213_l529_action_invoke(&result_object);
c214_l530_action_invoke(&result_object);
c215_l531_action_invoke(&result_object);
c216_l532_action_invoke(&result_object);
c217_l533_action_invoke(&result_object);
c218_l535_action_invoke(&result_object);
c219_l536_action_invoke(&result_object);
c220_l537_action_invoke(&result_object);
c221_l538_action_invoke(&result_object);
}
fn create_module_4() -> ResultObject {
let module_str = "(module
(type (;0;) (func (param i32) (result f64)))
(type (;1;) (func (param i32)))
(func (;0;) (type 0) (param i32) (result f64)
get_local 0
f64.load)
(func (;1;) (type 0) (param i32) (result f64)
get_local 0
f64.load align=1)
(func (;2;) (type 0) (param i32) (result f64)
get_local 0
f64.load offset=1 align=1)
(func (;3;) (type 0) (param i32) (result f64)
get_local 0
f64.load offset=2 align=2)
(func (;4;) (type 0) (param i32) (result f64)
get_local 0
f64.load offset=18)
(func (;5;) (type 1) (param i32)
get_local 0
f64.load offset=4294967295
drop)
(memory (;0;) 1)
(export \"64_good1\" (func 0))
(export \"64_good2\" (func 1))
(export \"64_good3\" (func 2))
(export \"64_good4\" (func 3))
(export \"64_good5\" (func 4))
(export \"64_bad\" (func 5))
(data (;0;) (i32.const 0) \"\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\f4\\7f\\01\\00\\00\\00\\00\\00\\fc\\7f\"))
";
let wasm_binary = wat2wasm(module_str.as_bytes()).expect("WAST not valid or malformed");
instantiate(wasm_binary, spectest_importobject()).expect("WASM can't be instantiated")
}
fn start_module_4(result_object: &ResultObject) {
result_object.instance.start();
}
// Line 570
fn c226_l570_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c226_l570_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 571
fn c227_l571_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c227_l571_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 572
fn c228_l572_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c228_l572_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 573
fn c229_l573_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c229_l573_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 574
fn c230_l574_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c230_l574_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
assert!(result.is_nan());
assert_eq!(result.is_sign_positive(), (f64::from_bits(9222246136947933185)).is_sign_positive());
}
// Line 576
fn c231_l576_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c231_l576_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65510 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 577
fn c232_l577_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c232_l577_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65510 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 578
fn c233_l578_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c233_l578_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65510 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 579
fn c234_l579_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c234_l579_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65510 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 580
fn c235_l580_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c235_l580_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65510 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 582
fn c236_l582_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c236_l582_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good1") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65511 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 583
fn c237_l583_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c237_l583_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good2") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65511 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 584
fn c238_l584_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c238_l584_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good3") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65511 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 585
fn c239_l585_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c239_l585_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good4") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) -> f64 = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65511 as i32, &result_object.instance);
assert_eq!(result, 0.0 as f64);
}
// Line 586
fn c240_l586_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c240_l586_action_invoke");
let func_index = match result_object.module.info.exports.get("64_good5") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(65511 as i32, &result_object.instance);
}
#[test]
fn c240_l586_assert_trap() {
let result_object = create_module_4();
let result = panic::catch_unwind(|| {
c240_l586_action_invoke(&result_object); | assert!(result.is_err());
}
// Line 588
fn c241_l588_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c241_l588_action_invoke");
let func_index = match result_object.module.info.exports.get("64_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(0 as i32, &result_object.instance);
}
#[test]
fn c241_l588_assert_trap() {
let result_object = create_module_4();
let result = panic::catch_unwind(|| {
c241_l588_action_invoke(&result_object);
});
assert!(result.is_err());
}
// Line 589
fn c242_l589_action_invoke(result_object: &ResultObject) {
println!("Executing function {}", "c242_l589_action_invoke");
let func_index = match result_object.module.info.exports.get("64_bad") {
Some(&Export::Function(index)) => index,
_ => panic!("Function not found"),
};
let invoke_fn: fn(i32, &Instance) = get_instance_function!(result_object.instance, func_index);
let result = invoke_fn(1 as i32, &result_object.instance);
}
#[test]
fn c242_l589_assert_trap() {
let result_object = create_module_4();
let result = panic::catch_unwind(|| {
c242_l589_action_invoke(&result_object);
});
assert!(result.is_err());
}
#[test]
fn test_module_4() {
let result_object = create_module_4();
// We group the calls together
start_module_4(&result_object);
c226_l570_action_invoke(&result_object);
c227_l571_action_invoke(&result_object);
c228_l572_action_invoke(&result_object);
c229_l573_action_invoke(&result_object);
c230_l574_action_invoke(&result_object);
c231_l576_action_invoke(&result_object);
c232_l577_action_invoke(&result_object);
c233_l578_action_invoke(&result_object);
c234_l579_action_invoke(&result_object);
c235_l580_action_invoke(&result_object);
c236_l582_action_invoke(&result_object);
c237_l583_action_invoke(&result_object);
c238_l584_action_invoke(&result_object);
c239_l585_action_invoke(&result_object);
} | }); |
regular.rs | // Copyright 2020-2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use crate::{
constants::{INPUT_OUTPUT_COUNT_RANGE, IOTA_SUPPLY},
input::Input,
output::Output,
payload::{option_payload_pack, option_payload_packed_len, option_payload_unpack, Payload},
Error,
};
use bee_common::packable::{Packable, Read, Write};
use iterator_sorted::is_sorted;
use alloc::{boxed::Box, vec::Vec};
/// A transaction regular essence consuming inputs, creating outputs and carrying an optional payload.
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct RegularEssence {
inputs: Box<[Input]>,
outputs: Box<[Output]>,
payload: Option<Payload>,
}
impl RegularEssence {
/// The essence kind of a `RegularEssence`
pub const KIND: u8 = 0;
/// Create a new `RegularEssenceBuilder` to build a `RegularEssence`.
pub fn builder() -> RegularEssenceBuilder {
RegularEssenceBuilder::new()
}
/// Return the inputs of a `RegularEssence`.
pub fn inputs(&self) -> &[Input] {
&self.inputs
}
/// Return the outputs of a `RegularEssence`.
pub fn outputs(&self) -> &[Output] {
&self.outputs
}
/// Return the optional payload of a `RegularEssence`.
pub fn payload(&self) -> &Option<Payload> {
&self.payload
}
}
impl Packable for RegularEssence {
type Error = Error;
fn packed_len(&self) -> usize {
0u16.packed_len()
+ self.inputs.iter().map(Packable::packed_len).sum::<usize>()
+ 0u16.packed_len()
+ self.outputs.iter().map(Packable::packed_len).sum::<usize>()
+ option_payload_packed_len(self.payload.as_ref())
}
fn pack<W: Write>(&self, writer: &mut W) -> Result<(), Self::Error> {
(self.inputs.len() as u16).pack(writer)?;
for input in self.inputs.iter() {
input.pack(writer)?;
}
(self.outputs.len() as u16).pack(writer)?;
for output in self.outputs.iter() {
output.pack(writer)?;
}
option_payload_pack(writer, self.payload.as_ref())?;
Ok(())
}
fn unpack_inner<R: Read + ?Sized, const CHECK: bool>(reader: &mut R) -> Result<Self, Self::Error> |
}
/// A builder to build a `RegularEssence`.
#[derive(Debug, Default)]
#[must_use]
pub struct RegularEssenceBuilder {
inputs: Vec<Input>,
outputs: Vec<Output>,
payload: Option<Payload>,
}
impl RegularEssenceBuilder {
/// Creates a new `RegularEssenceBuilder`.
pub fn new() -> Self {
Self::default()
}
/// Adds inputs to a `RegularEssenceBuilder`
pub fn with_inputs(mut self, inputs: Vec<Input>) -> Self {
self.inputs = inputs;
self
}
/// Add an input to a `RegularEssenceBuilder`.
pub fn add_input(mut self, input: Input) -> Self {
self.inputs.push(input);
self
}
/// Add outputs to a `RegularEssenceBuilder`.
pub fn with_outputs(mut self, outputs: Vec<Output>) -> Self {
self.outputs = outputs;
self
}
/// Add an output to a `RegularEssenceBuilder`.
pub fn add_output(mut self, output: Output) -> Self {
self.outputs.push(output);
self
}
/// Add a payload to a `RegularEssenceBuilder`.
pub fn with_payload(mut self, payload: Payload) -> Self {
self.payload = Some(payload);
self
}
/// Finishes a `RegularEssenceBuilder` into a `RegularEssence`.
pub fn finish(self) -> Result<RegularEssence, Error> {
if !INPUT_OUTPUT_COUNT_RANGE.contains(&self.inputs.len()) {
return Err(Error::InvalidInputOutputCount(self.inputs.len()));
}
if !INPUT_OUTPUT_COUNT_RANGE.contains(&self.outputs.len()) {
return Err(Error::InvalidInputOutputCount(self.outputs.len()));
}
if !matches!(self.payload, None | Some(Payload::Indexation(_))) {
// Unwrap is fine because we just checked that the Option is not None.
return Err(Error::InvalidPayloadKind(self.payload.unwrap().kind()));
}
for input in self.inputs.iter() {
match input {
Input::Utxo(u) => {
if self.inputs.iter().filter(|i| *i == input).count() > 1 {
return Err(Error::DuplicateUtxo(u.clone()));
}
}
_ => return Err(Error::InvalidInputKind(input.kind())),
}
}
// Inputs must be lexicographically sorted in their serialised forms.
if !is_sorted(self.inputs.iter().map(Packable::pack_new)) {
return Err(Error::TransactionInputsNotSorted);
}
let mut total: u64 = 0;
for output in self.outputs.iter() {
match output {
Output::SignatureLockedSingle(single) => {
// The addresses must be unique in the set of SignatureLockedSingleOutputs.
if self
.outputs
.iter()
.filter(|o| matches!(o, Output::SignatureLockedSingle(s) if s.address() == single.address()))
.count()
> 1
{
return Err(Error::DuplicateAddress(*single.address()));
}
total = total
.checked_add(single.amount())
.ok_or_else(|| Error::InvalidAccumulatedOutput((total + single.amount()) as u128))?;
}
Output::SignatureLockedDustAllowance(dust_allowance) => {
// The addresses must be unique in the set of SignatureLockedDustAllowanceOutputs.
if self
.outputs
.iter()
.filter(
|o| matches!(o, Output::SignatureLockedDustAllowance(s) if s.address() == dust_allowance.address()),
)
.count()
> 1
{
return Err(Error::DuplicateAddress(*dust_allowance.address()));
}
total = total.checked_add(dust_allowance.amount()).ok_or_else(|| {
Error::InvalidAccumulatedOutput(total as u128 + dust_allowance.amount() as u128)
})?;
}
_ => return Err(Error::InvalidOutputKind(output.kind())),
}
// Accumulated output balance must not exceed the total supply of tokens.
if total > IOTA_SUPPLY {
return Err(Error::InvalidAccumulatedOutput(total as u128));
}
}
// Outputs must be lexicographically sorted in their serialised forms.
if !is_sorted(self.outputs.iter().map(Packable::pack_new)) {
return Err(Error::TransactionOutputsNotSorted);
}
Ok(RegularEssence {
inputs: self.inputs.into_boxed_slice(),
outputs: self.outputs.into_boxed_slice(),
payload: self.payload,
})
}
}
| {
let inputs_len = u16::unpack_inner::<R, CHECK>(reader)? as usize;
if CHECK && !INPUT_OUTPUT_COUNT_RANGE.contains(&inputs_len) {
return Err(Error::InvalidInputOutputCount(inputs_len));
}
let mut inputs = Vec::with_capacity(inputs_len);
for _ in 0..inputs_len {
inputs.push(Input::unpack_inner::<R, CHECK>(reader)?);
}
let outputs_len = u16::unpack_inner::<R, CHECK>(reader)? as usize;
if CHECK && !INPUT_OUTPUT_COUNT_RANGE.contains(&outputs_len) {
return Err(Error::InvalidInputOutputCount(outputs_len));
}
let mut outputs = Vec::with_capacity(outputs_len);
for _ in 0..outputs_len {
outputs.push(Output::unpack_inner::<R, CHECK>(reader)?);
}
let mut builder = Self::builder().with_inputs(inputs).with_outputs(outputs);
if let (_, Some(payload)) = option_payload_unpack::<R, CHECK>(reader)? {
builder = builder.with_payload(payload);
}
builder.finish()
} |
about-routing.module.ts | /*
Authors : initappz (Rahul Jograna)
Website : https://initappz.com/ | This App Template Source code is licensed as per the
terms found in the Website https://initappz.com/license
Copyright and Good Faith Purchasers © 2020-present initappz.
*/
import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { AboutComponent } from './about.component';
const routes: Routes = [
{
path: '',
component: AboutComponent
}
];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule]
})
export class AboutRoutingModule { } | App Name : ionic 5 groceryee app
Created : 10-Sep-2020 |
dense_features_v2.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.feature_column import base_feature_layer as kfc
from keras.feature_column import dense_features
from keras.utils import tf_contextlib
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.DenseFeatures", v1=[])
class DenseFeatures(dense_features.DenseFeatures):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with
FeatureColumns. At the first layer of the model, this column oriented data
should be converted to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V2 version of this layer that uses name_scopes to create
variables instead of variable_scopes. But this approach currently lacks
support for partitioned variables. In that case, use the V1 version instead.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords",
10000),
dimensions=16)
columns = [price, keywords_embedded, ...]
feature_layer = tf.keras.layers.DenseFeatures(columns)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(
dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self, feature_columns, trainable=True, name=None, **kwargs):
"""Creates a DenseFeatures object.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes
derived from `DenseColumn` such as `numeric_column`,
`embedding_column`, `bucketized_column`, `indicator_column`. If you
have categorical features, you can wrap them with an
`embedding_column` or `indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super().__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs
)
self._state_manager = _StateManagerImplV2(self, self.trainable)
def build(self, _):
for column in self._feature_columns:
with tf.name_scope(column.name):
column.create_state(self._state_manager)
# We would like to call Layer.build and not _DenseFeaturesHelper.build.
super(kfc._BaseFeaturesLayer, self).build(None)
class _StateManagerImplV2(tf.__internal__.feature_column.StateManager):
"""Manages the state of DenseFeatures."""
def create_variable(
self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None,
):
|
@tf_contextlib.contextmanager
def no_manual_dependency_tracking_scope(obj):
"""A context that disables manual dependency tracking for the given `obj`.
Sometimes library methods might track objects on their own and we might want
to disable that and do the tracking on our own. One can then use this
context manager to disable the tracking the library method does and do your
own tracking.
For example:
class TestLayer(tf.keras.Layer):
def build():
with no_manual_dependency_tracking_scope(self):
var = self.add_weight("name1") # Creates a var and doesn't track it
# We track variable with name `name2`
self._track_trackable("name2", var)
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies manually.
"""
previous_value = getattr(obj, "_manual_tracking", True)
obj._manual_tracking = False
try:
yield
finally:
obj._manual_tracking = previous_value
| if name in self._cols_to_vars_map[feature_column]:
raise ValueError("Variable already exists.")
# We explicitly track these variables since `name` is not guaranteed to
# be unique and disable manual tracking that the add_weight call does.
with no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
)
if isinstance(var, tf.__internal__.tracking.Trackable):
self._layer._track_trackable(var, feature_column.name + "/" + name)
self._cols_to_vars_map[feature_column][name] = var
return var |
start_sample.py | import requests
import logging
import time
logging.basicConfig(format='%(asctime)s - %(levelname)s : %(message)s', level=logging.INFO)
def | ():
"""
Send stream instructions to streamer servers in the form of HTTP POST requests.
The streamer_0 is dedicated to the Twitter sample API, so we don't send any keywords or language to track
"""
while True:
try:
r = requests.post("http://streamer_0:5000/stream", json={})
break
except requests.exceptions.ConnectionError:
logging.error("Could not connect to server streamer_0, retrying")
time.sleep(2)
continue
logging.info("'http://streamer_0:5000/stream', response = {}".format(r.status_code))
if r.status_code != 200:
time.sleep(2)
stream()
if __name__ == "__main__":
while True:
stream()
time.sleep(3600 * 24)
| stream |
server.py | #!/usr/bin/env python3
from flask import Flask, render_template, request
from werkzeug import secure_filename
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST': | fname = secure_filename(f.filename)
f.save(fname)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False) | f = request.files.get('file') |
file.go | package file
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
// Provider implements a file-based image storage
type Provider struct {
path string
}
// New returns a new Provider instance
func | (path string) (*Provider, error) {
if _, err := os.Stat(path); err != nil {
return nil, err
}
return &Provider{
path,
}, nil
}
// Get returns the image data for an image id
func (p *Provider) Get(ctx context.Context, id string) ([]byte, error) {
imageData, err := ioutil.ReadFile(filepath.Join(p.path, fmt.Sprintf("%s.jpg", id)))
if err != nil {
return nil, err
}
return imageData, nil
}
| New |
ultimate_question.rs | extern crate question;
use question::{Answer, Question};
fn main() | {
let question = "What is the answer to the Ultimate Question of Life, \
the Universe, and Everything?";
let answer = Question::new(question).ask().unwrap();
let correct = Answer::RESPONSE(String::from("42"));
assert_eq!(answer, correct);
} |
|
quit.rs | use crate::parser::{ParserMessage, UciError};
use engine::Engine;
use std::error::Error;
use std::io::Write;
pub fn run_command(
_writer: &mut dyn Write,
args: &str,
_engine: &mut Engine,
) -> Result<Option<ParserMessage>, Box<dyn Error>> | {
// There must be no arguments after "quit"
if !args.trim().is_empty() {
return Err(Box::new(UciError::InvalidArgument(
args.trim_end().to_string(),
)));
}
Ok(Some(ParserMessage::Quit))
} |
|
test_wallet.py | import pytest
from vcx.error import VcxError, ErrorCode
from vcx.api.wallet import *
import json
TYPE = "record type"
EMPTY_TYPE = ""
ID = "123"
EMPTY_ID = ""
VALUE = "record value"
VALUE_NEW = "RecordValueNew"
EMPTY_VALUE = ""
TAGS = "{\"tagName1\":\"str1\",\"tagName2\":\"5\",\"tagName3\":\"12\"}"
OPTIONS = json.dumps({"retrieveType": True, "retrieveValue": True, "retrieveTags": True})
TAGS_EMPTY = ""
TAGS_EMPTY_JSON = "{}"
TAGS_MALFORMED_JSON = "{\"e\":}"
QUERY_JSON = {"tagName1": "str1"}
SEARCHED_RECORD = {
"id": "RecordId",
"type": None,
"value": "RecordValue",
"tags": TAGS
}
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_get_token_info():
info = await Wallet.get_token_info(0)
assert info
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_send_tokens():
receipt = await Wallet.send_tokens(0,1,"address")
assert receipt
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_create_payment_address():
address = await Wallet.create_payment_address()
assert address
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_create_payment_address_with_seed():
address = await Wallet.create_payment_address("0000000000000000000000WHATEVER00")
assert address
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_validate_payment_address():
await Wallet.validate_payment_address('sov:1:1234')
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def | ():
await Wallet.add_record(TYPE, ID, VALUE, TAGS)
await Wallet.update_record_value(TYPE, ID, VALUE_NEW)
await Wallet.update_record_tags(TYPE, ID, TAGS_EMPTY_JSON)
await Wallet.add_record_tags(TYPE, ID, TAGS)
await Wallet.delete_record_tags(TYPE, ID, ['one', 'two'])
await Wallet.delete_record(TYPE, ID)
record = {
"id": ID,
"type": TYPE,
"value": VALUE,
"tags": None,
}
assert (json.loads(await Wallet.get_record(TYPE, ID, OPTIONS)) == record)
@pytest.mark.asyncio
async def test_wallet_search():
search_handle = await Wallet.open_search(TYPE, QUERY_JSON, None)
assert (search_handle == 1)
searched_record = await Wallet.search_next_records(search_handle, 1)
assert (json.loads(searched_record) == SEARCHED_RECORD)
await Wallet.close_search(search_handle)
with pytest.raises(VcxError) as e:
await Wallet.export("/tmp/output.wallet", "backupKey")
@pytest.mark.asyncio
async def test_import_wallet_failures(vcx_init_test_mode, cleanup):
with pytest.raises(VcxError) as e:
await Wallet.import_wallet('Invalid Json')
assert ErrorCode.InvalidJson == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.IOError == e.value.error_code
cleanup(True)
config = {'wallet_key': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingWalletName == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingWalletKey == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingExportedWalletPath == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'exported_wallet_path': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingBackupKey == e.value.error_code
cleanup(True)
| test_wallet_storage |
async_global.rs | use
{
crate :: { SpawnHandle, LocalSpawnHandle, JoinHandle } ,
futures_task :: { FutureObj, LocalFutureObj, Spawn, LocalSpawn, SpawnError } ,
};
/// An executor that spawns tasks on async-global-executor. In contrast to the other executors, this one
/// is not self contained, because async-global-executor does not provide an API that allows that,
/// so the threadpool is global.
///
/// It works on Wasm.
//
#[ derive( Copy, Clone, Default ) ]
//
#[ cfg_attr( nightly, doc(cfg( feature = "async_global" )) ) ]
//
pub struct AsyncGlobal;
impl AsyncGlobal
{
/// Create a new AsyncGlobal wrapper, forwards to `Default::default`.
///
pub fn new() -> Self
{
Self::default()
}
/// Wrapper around [async_global_executor::block_on]. This is not available on Wasm
/// as Wasm does not have threads and you're not allowed to block the only thread you have.
//
// TODO: is target_arch = "wasm32" not a better way to express this?
//
#[cfg(not(target_os = "unknown"))]
#[ cfg_attr( nightly, doc(cfg(not( target_os = "unknown" ))) ) ]
//
pub fn block_on<F: std::future::Future>(future: F) -> F::Output
{
async_global_executor::block_on( future )
}
}
#[ cfg( target_arch = "wasm32" ) ]
//
impl Spawn for AsyncGlobal
{
fn spawn_obj( &self, future: FutureObj<'static, ()> ) -> Result<(), SpawnError>
{
async_global_executor::spawn_local( future ).detach();
Ok(())
}
}
#[ cfg(not( target_arch = "wasm32" )) ]
//
impl Spawn for AsyncGlobal
{
fn spawn_obj( &self, future: FutureObj<'static, ()> ) -> Result<(), SpawnError>
{
async_global_executor::spawn( future ).detach();
Ok(())
}
}
#[ cfg( not(target_arch = "wasm32") ) ]
//
impl<Out: 'static + Send> SpawnHandle<Out> for AsyncGlobal
{
fn spawn_handle_obj( &self, future: FutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
Ok( JoinHandle{ inner: crate::join_handle::InnerJh::AsyncGlobal
{
task: Some( async_global_executor::spawn(future) ),
}})
}
}
#[ cfg( target_arch = "wasm32" ) ]
//
impl<Out: 'static + Send> SpawnHandle<Out> for AsyncGlobal
{
fn spawn_handle_obj( &self, future: FutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
Ok( JoinHandle{ inner: crate::join_handle::InnerJh::AsyncGlobal
{
task: Some( async_global_executor::spawn_local(future) ),
}})
}
}
impl<Out: 'static> LocalSpawnHandle<Out> for AsyncGlobal
{
fn | ( &self, future: LocalFutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
Ok( JoinHandle{ inner: crate::join_handle::InnerJh::AsyncGlobal
{
task: Some( async_global_executor::spawn_local(future) ),
}})
}
}
impl LocalSpawn for AsyncGlobal
{
fn spawn_local_obj( &self, future: LocalFutureObj<'static, ()> ) -> Result<(), SpawnError>
{
let _ = async_global_executor::spawn_local( future ).detach();
Ok(())
}
}
impl std::fmt::Debug for AsyncGlobal
{
fn fmt( &self, f: &mut std::fmt::Formatter<'_> ) -> std::fmt::Result
{
write!( f, "AsyncGlobal executor" )
}
}
| spawn_handle_local_obj |
application.py | # Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.0.4
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x08\x19\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x07\xabIDATX\xc3\xad\
W[P\x93g\x1a\xf6\xca\xce\xec\xcc\xf6b/\xbc\xd9\
\xe9\xce\xecn\xbd\xda\xd9\x9b\xb5\xce\xba;{\xb0\xad\xcc\
z\xb1\xce\xce:\xb3vTpu\xdb\xe2\x81\xd6\xb6T\
\x04\xbb\xa5 m\xc1\x82\x06\x08\x07QB\x80\x80\x80\x02\
!\x81\x10\x92@H\x10s$!gr\x80\x04B \
\x9c\x09G\xb5Tx\xf6\xfb~\x13\x160X\x8b}g\
\x9e\xf9/\x92\xfc\xcf\xfb>\xcf\xfb\xbe\xdf\x97]\x00v\
\xfd\x98 \xf1\x0b\x82\x14\x02\x03\xc1u\x82\x03\xcf\xfd\xfe\
\x8fH\xbc\x9b \xe1W\xaf\xef\xb5*\x8c\xd6e\xdb\x02\
`\x19\x1e[\x09'\xf13\xfa\x19\x81\x22\xfc\xdc>v\
H~\x8a\xa0\xb9\xb6Y\x1c2\xcf\xadB9\xfe\x1dD\
\xf6Q\xd8\xc7\xe6\xe8\x87\x86={\xf6XSR\xae,\
\xca::\x10N\xe2\xe5I\xc3\xc41\x04\xb7>I\xf9\
,`\x9b]YSM\x03M\xb6\x114\xeb\xfb 1\
y`\x19\x9d\xc5\xbb\xef\xbe?\xc5\xab\xbe\x83\xf1\x89)\
LO\xcf\xae\x92\xef\xd7\xbct\x02\x11\x9f\x0f\xbe\x1d\xe3\
\xb2\x04CO\xb43@\x8b{\x06\xcd=.4\xeb\xec\
\xa8W\xf6 \x87S\x852^5C\xbc\xb0\xf4\x90\x81\
\xc1`\x5c&\xbfK|\xe1\x04H\x1c$8A\xfd\xdd\
\xeas'\xf1\xb9'\x04H\x87\x97\xc1\xd7\xbb \x22U\
7\xdc7\xa2\xb8N\x88,V>\xccV\xdb:q\x04\
,\x16k,\xfc\xce\xe7'\x10\x916\x93\x95?F}\
\xa5\xfe\x12\xc4o\xf4Y1\xb6\x02~\xef Z{\x9c\
\xe0?0\xa1L(CF\x0e\x1b\xb2\x0e\xf9&\xd2\xf9\
\xc5e\xcc-,!4\xbf\x88\xbd{\xf7Z\xc9;~\
\xbam\x02$~C\x90F=5\x13iu\xb3\x80\xd2\
?\x0f\xcb\xc4\xe2\x9aP\xa1Z\xb4l\xf1Y\xa0\xb6\xa0\
\xa6]\x8d/\xb2sq\xb7\x9e\xff\x0c1%\x9d\x09\xcd\
cbj\x06\x83C\x81'\xe4\xdd\xbc-\xd3\xb0;\x92\
\x033&\xd4S\xb5\xd3\xfbXO\x88\xc5\x03!\x88,\
CP\xbaF\xd0\xed\x09B\xe5\x9bB\x9bs\xfc\xa9\xcf\
Z\x1b\xee*t\xc8\xbc\xc9E\x09\xa7l\x93\xcf\x9b\x88\
'\xa7\x11\x18\x1d\xc3\x80o\x08\xa2\xd6\xd6%\xc2Q\xdb\
(\x12\x87\xc6\x1f\xaf\x82/b\x94M\x89$\x90\x22\xea\
R-\x9aB\xab\xe8\x18y\x04\xa1\xc5\xcf\x10St\xf6\
\x0d\xa3\xd3\xe1\x87\xd4<\x80\x16\xbd\x03\x0d]\x06\x14\xd5\
\x0a\x90\x91\x95\x0d/y\xf1\xc6\xaa\xa9\xd4\xb3s\x0bL\
\xc5\x94\xd8\xdd\xef\x85\xc9b\x05\xb7\xbc\x12\xa5\xe5\x95K\
\x13\xf3\xcb\xab#\x0f\x017\xd9\x11\xe6\xd9\x15\x84\x97\x15\
\x13\x06\xcb<\xd0h\xf2\xa3\xdd\xee_'\x96;\x86 \
\xb3x\xd7}\xe6\x08\xa4\xf8<3\x1b*\x8d6\xaa\xdc\
S3!\x8c\x8e\x8d3\x15\xd3&\xe47\x09\xf1\xc1\xc5\
\x8fQs\xaf\x01\xbee`\xfc\x11\xa0#\x13#\xf2\xce\
\xa1\xbe]\xb9\xb8Q\x01\x83\x81ttM\xa7\x1e\x0ag\
\x80\xa9\xb8\xdd\xea\x83\xd8\xe8B\x93\xca\xcc\xf8|\xe5\xcb\
,\x88\xda$Q\x89\xa7g\xe7\x18\x1b\x86\x86G`w\
8I\x82:$|\xf8!\xae\xb3\x0b\xe1\x99\x5c\x80o\
\x09\xd0\x90\xde\xe1\x0f,\x81\xab\x1f\xc4}\xef\x04\xdd\x07\
\x1da\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6!H\xcc\xfdO\
}\xee\xd4\x22\x9dU\x84\xaa\x9a\xbaM>G\xe4\x8e\xf8\
<<\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2b\x9c~/\
\x1e=\x03\x01\xf4/\x02\x83\x84\xbc\xc5\xff-\xee:C\
(Q\x91\xf7\xf6\x05\xf1N\xdc\xbf}\x843i\xe3 \
\x18\xf43\xab\xe0\xc9Th58\xd1\xd8\xdd\x0b\x9eX\
\x89\xac\x5c\xf63>G\xaa\x9e\x9c\x9ee\xe4\xee\xf7\x0e\
\xa2\xd7lAC\x03\x1f'b\xe3 \xe9\xd6\xc0E\xcf\
\x01R\x90$\xb8\x86\xb2\x9e\x00n\xb4\xdbP\xd1\x1bD\
\x85\xce\x8bJ~\x0bm\xbe\x9b['\xd1\xa0\x99\xf8\x16\
e\x22\x05\xee)\xf4(\x13\xc8\x90x5\x0b\x1a\xad>\
\xaa\xdcc\x13\x93\xf0\x0d\x0d\xc3f\xef\x83\xb4]\x8e\xc4\
K\x97\x90\xc3\xca\xc3\xd4c\xc0NzI1N\xfa\x89\
\x94\x7f[;\x84|\x85\x13%j\x1fJ\xd5\x03\xe8\xf2\
0\xa3(\x22\xf8\xf93\x09t\x8f.\xa1\xa8\xbe\x15\xa5\
|\x09\xb2J*\xf0\xcf\xe3qQ\xe5\xf6\x07F\xd1\xe7\
\xf2@\xab7 \xfdj\x06\x92\xbfH\x83\xcd7\x02'\
\xa9\xda@\x1aL\xe0{\x88R\x9d\x1fE\xdd\xfd\x0cq\
A\x97\x1b\xc5\xdd\x1e\x88\x9cA\xfc\xf9\xcd\xb7]\x84\xeb\
l\xb4C\xd0(\xf7N#\xa7\xfc\x1e\xb2K\xab\xf1Q\
\xeaWH\xfeo\xea\xfaXQ\xb9G\x82\xe3\xf0\x0c\xf8\
`4\x99Q\xc9\xab\xc2\xfbg\xcfA\xfe@\x03?\xe9\
n\xb2\x8d\x19\xb9oi\x06\x19\xd2\x9b*/r\xe5\x0e\
\xe4u\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca)\xc2S\
\xb8\xdd)\xdc+v\x04\x90Q\xc8\xc5\x95ky8\x11\
\x9f\x80\x9b\xb7n3c\x15\x91\xdbjs@\x22m\xc7\
\x85\x84\x0fPt\xbb\x0c\xf3+\x80\x9f4X\xf7$ \
\x1c|\x84J\xd3\x188\xfaa\x86\x9cV\xfdU\xb3\x1e\
\xac\x0e;\xb8:\x1f\xd9!\x1ez/\xe0\x13\xbc\xba]\
\x02&\xbe\xc1\x83\x94o\xd88\x9f\x9c\x8a\x03\x7f=\x04\
c\xaf\x99\xe9n*\xb7F\xd7\x83\xa4\xcb\xc9H\xff:\
\x8b\x8c\xd5<S\xb5q\xf6\xa9\xdc5\xf6i\x5c\x97Y\
\x19\xd9\xbfn!\xa7\xa0\xd4\x82t\xbe\x1aW\x9b4`\
\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf_\xa7g\xc0;\xe1u\x1f\
5\xcc5\xddf|\x94\x96\x85\xb8s\x17\xf1\x97C1\
L\xd5t\x99\xf0\xaa\xaaq\xfa\xf4\x19h\xcc\x0e\x8c\x92\
-6\x14\x1e\xabZ\xc7\x0cx\xe6qp\x0d#L\xa3\
e\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6^\x94t9\xd0f\xf7\
\xaf\x1e=\x11KG.o\xc3y\x135,\x5c\x99\x1a\
\xf1\x97>\xc7\xd1\xd83\xf881\x09\x86^\x13\x1a\x9b\
\x04\xf8\xdd\x1b\xfbQO\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\
\x93`+]\x0c9\xf5\xbc\xf0\xbeg\xbd\xea\xcc\x16=\
JU\x1e\x08m\x01\x94\xd4\xf1C\xe1eS@\xf0\xca\
\xf7%`+nj\xc7\xa9\x84D\xc4\x1c9\x8a\xdc|\
6ZZ\xc58\x14\x13\x83/95\xc8\x14j\x98\xe6\
\xa2\xd5\xd2'\xf5\x9azL\x13\xa1Id\xb7\x99\x90\xdb\
nF\xb9\xda\x8d\x06\xa5v9,9=\xf9N\x13\xec\
\xd9r\xd4G\x0d;\xabF\x88c\xff9\x8f\xdf\xee\xfb\
=\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17p\xa3\xad\x07\x19\
\xc4OJ\x14\xe9n\xbaX\xa8\xef,\xfa\x94\x98P(\
\xb7@\xe9\x0e<\xf9W\xec)*w-\xc1g\x04\xfb\
\xb6\xb9\xe4D\x8d\xbe\xcc\xb2Z\xfc\xe3\xe4\x19\x1c<\xf4\
7\xb0r\xf3\xb0\xef\xc0\x1fP \xd1!\x89'e*\
\xa6K\x85>\xbf!\xd5F\xe4.\x90[!\xb0\x0c\xae\
\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87o<\xaf<\xe7\x96\x15\
5\x9ciE\xe5\xf8\xfb\xb1X\x1c?\x19\x877\xf6\xef\
\xc7\x8d:\x11\x92\xab\xa4\x0c!\xedp\xea5U!\x8b\
4[\xc9\x037*4n\xd4I:\x17\xc3rs\x08\
\x8em\x95\xfb\x87$\xe0Jesp\xe4\xf8)\x1c>\
|\x98\x8cc.2\x05*\x5c\x22\xd5\xd3]~M\xdc\
\x0b6\xe9tv\xa7\x1dw\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\
\x1a\x95\xfb\x22\xbdI\xfd\x80\x0bm\xf4\x042JxL\
\x0f\x9cKI\xc3\xb5\xa6.|\xc2me6Y\xf1\x83\
\x01\x5c\x97\x9a\xc1Q{ \xf3\x04\xd7\xce%&\x056\
\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce^NE\x81\
X\x85x\xf6]\x5c\xa9U\x90\xaa\xfb\xc0\x96\xdbP\xad\
u\xe3\xaeTA/\x10\xca\x0dr\xbf\xba\xd3j\xa3\x05\
\xb7\xa2Q\xf8\x1d\xafC\x8dO\xb9-\x88\xcb\xe6\xe1\x9a\
H\x8f\xaa\x1e/\x9a5\xe6\xc7\x7fz\xf3-Wx\xac\
\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5cu\x1f\xde\xcb\
\xafE\xb9v\x002g`\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\
\xd2\xa9\xdc;\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9dP\
\x0b9\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3/\x22\xf7\x0e\
\xff\xdam\x8a\xdda\x99\xd5\x1b\xb6\xd8k\xbb^2\xbe\
/\x89\xff\x01f\xb9_\xfc\x11\x80=\xcf\x00\x00\x00\x00\
IEND\xaeB`\x82\
\x00\x00\x03T\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x02\xe6IDATX\xc3\xd5\
\x97\xcdN\x13a\x14\x86\xeb5\x94\x95{q\xe1\xd2\xc4\
\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb10\xea\x05\x18\x96\
&bX\xb8\xb0\x91X \xd1\x9d\xbf\x89\xa4\x14\xb1R\
\xa4HE\x94\xfe\xd0\x02C\xff\xa6\x9d\x19\xa6e\x80\xe3\
y{\xfa\x85QJ\x82\xc9!\x86I\xde\x9c3\xa7\xf3\
\xcd\xfb\x9c\xf3M\x9bN\x84\x88\x22\xffS\x91s\x01\xc0\
\xc7\xd5\x90n\xff\xa5\xfb\xac\xc7==d\x0d\xa9\x02\xf0\
12<<\xbcj4::\xba\x19V<\x1e\xaf&\
\x93\xc9V:\x9dv\x13\x89Dk`` \xcdkn\
h\x02\xa48\xd2\xe1\xe1q\x99\xba\xef\xb7\xc9\xb2,\xda\
\xdf\xdf'\x86\xf1x\xcd\x18\xeb\x8a\x1a@?\xf3\xb0\x1c\
\xc7\xa5Lf\xb9\x0b\x14\x04\x01\xc5b\xb1:\xaf{p\
\x1a\x88S\x01\x1c\x1c\x10ww\xb2l\xdb\xa1\xf9\xf9\xcf\
d\x0e\xd7u\xe9\xf9\xc4D\x17B\x05\x00&{\xc1\xc9\
\xaa7\x1cJ\xce\xcdS\xf8p]\x0f\x8b\x17T\x00\x82\
\x10@gO\x14\xce\xed\xa6G\x1fgf\xe9\xf5\x9b\xb7\
\x14\x9f\x9c\xa4\xa9\xa9iz\xf7\xfe\x03E\xa3\xd1e^\
\x7fA\x05\xc0\xef\x10\xed\xb6%\x86\x85\x9a\xe3\x05\x94]\
\xcd\xd1\xe4\xf4+z2\xfe\x94\x9e\xc5^\xd0Lb\x0e\
\x8b\x17U\x00\xda\x81\x18\xf5\x13 <\xff\x90j\xcd6\
\x157\xab\x94/nS\x89c\x8d\xb7\x85\xd7~Q\x01\
\xf0y\xcc\xcd]\x1e\xb5\xc7{\xdb\xee\x9f;\xbe\xe4\x88\
]\xb8\xbd\xee\xe2\x94\xca3\xe0u\xe4\xc6uWb\xd8\
\x109\xea\xe63D\xd4\x01\xa7\x06\xe0\xf4:\xad9\x22\
\x98\x98hr\x80\x98kPS\x9d\x00\x00*-\xb91\
\xe2NS\x8c\x10\x0d\x04\xf2m\xfb(\xb6|E\x00\x9b\
;\xdbj\xfci\x8e<l\x88\x1a\xae9\x13\x80:\x8f\
\xb7T#*\xd7\xc5\x04\x06\x06\x005(\x9c\x17\xab\xbc\
%\xbb\xca\x13\xc0Ma\x0e\x15*rn\xcc~Z\x02\
hj\xdd\xad\xf1\x94'\x00S\xdc\x1cqm[@`\
\x9a\xab\x1cu\x9e\xeb\x81A\x15G\x11\xc0j\x891\x0c\
\xd6w\x04 \x0cd&b\xb6iu\x8b\xa8\xaa\x09P\
\xb6\xc5\xbc\xd0\x03\xf8\xbe)c\x87)`\x0c\x18\x84\x1c\
\x00[ME\x00t\x03S\x98\xad\x94\xc5\x1c\xe7F\xe6\
\x1c\x00\xc8q]\xa9\xa1\x08\x80\xfd\xfcV\x12s3\x01\
\x085\x18B\xe8\xda|\x8e)\xa8N\x00[\x00\x03\xc8\
\x98g6\x04\x002\xe6\x85\xde\xf8\x17\x0b\xfc,\xd8\x8a\
\x00\x18g:O\xb4T\x14#\x98\x02\x00\x02\x0c>\xfb\
\xc5S(\xf0C\xb8fI\xf7k\xf9R\x87\xd7\xbeT\
\x01\xc8U\x8f\xbaN\xadK\x0e\x90\xaf\x85\xde\xb7\xc2\x92\
=O\xa6\xb3\xde\xa3\xb1q\xeb\xda\xd0\xf5\x15\x98\xb3n\
\xa9\x00l4\xa4k\x18\xff\xe0\x11\x7fZ\x17S\xd4\x13\
\x0bYo\xe4\xee\xbd\xe2\xa5\xc1\xcbK|m\x8cu\x87\
5\xa8\xfa\xb7\x1c\xdde\xd9<\x8f\x1f\x19\xfe\x9e\xcf\x1e\
7\xbd\xc9\xbax&oF\x00h\xf2\xff\x81\x99\x94\x9e\
\xe9?\xbf\x19\x01B\xd3\xf4\xfc\xbd\x9c\x9e\xa5~\x03Q\
l%\xa1\x92\x95\x0aw\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x05:\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x04\xccIDATX\xc3\xb5\
\x97]L[e\x1c\xc6wo\xbc\xd9\xe5\x12I q\
\xd7&\xe3N\x13\xb8p\xd1\x85D\xbdP\xe3\x10\x18\xe5\
+.&J\x04'\x86\xaa\x8b\x99\xe0\xd0\xa2l\x19\x86\
9\x17\xdc\x1a\x16\x98\x80@l\xa6C\xca +\x83\x1e\
(\xcc\xda\xd1\x96\xd2\xd2J{\xfa\x01\xa5\xd0\xef\x16\x1e\
\xdf\xff\xdb\x1d\xc7\xcc\x04*\x87\x93<9o!\x9c\xe7\
\xf7<\xefG\x0f\x87\x00\x1c\xcaF\xcf\xbd\xfa\xe9\xbbL\
Z&a\x0fj`\xca\xd9\xe9y\xd9\x9a?]P\xf2\
\xa5\xc1\xe9\x8f\xa7W\xc3@0\x02\x84\xa2\x19\xad\xc72\
\x8a'\x81X\x22s\xbfyk\xdaK\x10r\x02\x1c{\
\xe7\xac\xda\x1c\xd8\xc8\x98\x12@\x84\x99\x85\xe3\x19\x911\
)\x1aKa%\x94D8\x9aBs\x87\xc6\xbe\x13\xc4\
\xff\x02\x90\x12\x93y$\xf1\xc8X\x92\xcf\x1f\x84]\x8c\
\xc2\xe5\x09\x22\x12K\xa3\xf4\xc3\xefM4uY\x01\xb0\
\xeb\xd86\xd5\x90\x9e:\xfc\xcc\xb9\xe7_.\x11?V\
\x9eEEU\x0d*\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\
\xac\xb6%\xfc\xb9\xe8\x87k\x15X\xf6\x04\x10\x08\xc6\xd2\
\xaf\x9c\xbep\x9fA\x1c\xd9\x15\x80]\x87\x99\x1a\x8a\x8a\
\x8a\xcc\x92Z[[\xdd\xa4\xafU\xad\xfe\xafT\xdf\xa6\
\x06\x06\x06195\x85\xd9\xb99\xe8&&PPP\
\x80!\xcdo|\xdeI\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1\
OA\xf4\x85\xf0C\xaf\xce\xcd\x00j\xf6\x02PCf\
\xd8\xe5\x8a\xc7\xe3\xf0z\xbdH\xa7\xd3\x98\x9c\x9cDe\
e5fg\x8d\xbc\x81\x07f\x1bt\xd3\x16\x0e@2\
-x\xf0\xdd\x8dQ\x8f\xac\x00\xe1p\x18F\xa3\x91\x8f\
S\xa9\x14~\xea\xedE\xe3'\x9fa\x86A8\x96\xdc\
Pwu\xe3LC#\xce5\x9d\xc7\xed\x91q\x5c\xbc\
>,/\xc0\xc6\xc6\x06\xf4z\xfdc@}}\xfdP\
2\x88\xd0F\x1cf\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\
\x11\x97\xbadn\x80\x00\xa6\xd8:\xd8~E\x22\x11\x94\
+*0\xae\x13@\xe7\x04mW\xda\xaa4\xbe|S\
\xe65@f:\x9d\x0e\xc3\xc3\xc3\xe8e\xf5\xf7\xf7\xf7\
C\xab\xd5\xa2\xaa\xba\x06cw\xf5\x90\x0e*w\x90\xed\
\x04\xb6\x0e\xda\xbbe\x06\xa0y\xb7\xdb\xed\x18\x1a\x1aB\
gg'zzz8PIi\x19ni\xf5\x10\xd7\
\x00o\x08\xb0\xf9\x00g\x00\xb8\xd0%3\xc0\xd6\xd6\x16\
\xdf\x09\x81@\x00\xa2(\xc2\xef\xf7cmm\x0d\xa7\x14\
\x95\xd0\xfc\xae\xe7\xa9\xc9|\xc1\x0b\x98=@\x9b\xdc\x00\
\xdbA677\xf9v\xa4V\x14\x15\xd5\xe8\xfbU\xe0\
\xa9\x1d\x81G\x00\xe7;\x0f\x00\x80\xcc%\x80$3O\
$\x12(+\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa06Z\
\xd5\x070\x05\xff\x98'\x93<=MI\xc9\xa9J\x0e\
\xa0\xb7\xb3\x03\x89=\xc5\xf8\x170\xb1\x00|q\xf5\x00\
\x00\xa4\xea\xc9\x98\x14\x8b\xc5P\xa6\xa8\x82zH\xc0\x98\
\x19\xb8k\x05\xe6\x9c\x99\xfb\xe7Wd\x04\x90\xd2Sj\
\x02\x88F\xa3\xdc<\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdc\
f\x87\xe4\xa0\x01\x1cd\xc4\x04(;d\x06H=\x9c\
s\x12\x99\xd3\xb9@ \xc5eU\xb8\xd8-\xa0\x7f:\
c\xae}\x90i\xe0\xa3v\x99\x00\xfe]=\xa5&\xad\
\xae\xaer\x88\xb7J*p\xb9W\xc0=\x1b\xb8~\x9e\
\x01\xee\xcc\x03g.\xed\x13@\xaa\x9dD\x8b\x8e\x92\xd3\
qL\xdf\x01+++X__\xe7\x10'Y\x03\xdf\
t\x09PO\x00\xbf\xcce\x1a\xb82\x064\xec\xa7\x01\
\xc9X\xda\xebdNi)9\x1dD\x04@\xf5\xd3\xcf\
\xde|[\x81\x96\xeb\x02O~u\x1c\xb8q\x0f\xf8q\
,\x9e~\xbdNm\xa67\xaa\xac\x00\x9ed,m7\
2%\x00\xd1#\xf2\xe4\x12\xcc\x1b'\x15h\xef\x11\xa0\
\xbcf[\x7fO5\xe2<q\x9a\xbf\x8ei\xf7\xfcJ\
&\x01\x90\xa9$i\xb5SB2\x0f\x06\x83p\xb9\x5c\
\xdc\x90^J\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf%\
\x9f}\xa1\x9cL;\x98\x8a\x99\x8e>\xc9xG\x00\x95\
J\xc5\x01\xa4\x15.\xcd7\x19RR:\xf7)\xb5\xc3\
\xe1\xe0\x22\xe3\xc5\xc5E\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\
\x93\xe9\xae\x00---n\xe9`\xa1\xd4\xd2\x97\x0d\x8d\
\x97\x97\x97\xe1\xf3\xf9`\xb3\xd9\xf8}ii\x89C\x10\
\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92R\x93\x11\x8d\xe9\
N\xdfxT;5`\xb5Zy\xf5\xd4\x0a\xfd\xce`\
0$\xf2\xf2\xf2\xee\xb3g\x1c\xd9\x17@SS\x93[\
\x9agJO\x22\x13\xaa\x9a\xc6\x16\x8b\x997@\x9fG\
GG#mmm\xde\xfc\xfc|\x13\xfb\xdbA\xa6\xb2\
\xbd\x9a\xff'@ss3\x9f\x02JG\x10T?U\
???\xcf\xeb\xd6h4\x91\xba\xba:\xe7\xc3\xb4]\
L\x1f0\x1d\xcd\xc6xG\x00\xa5R\xe9v:\x9d\xbc\
bJJo>\x94\xb4\xbe\xbe\xde\x99\x93\x93#\x99\x16\
gSuV\x00\x8d\x8d\x8dn\x8b\xc5\x82\x81\x81\x81H\
mm\xad377WV\xd3\xdd\x00\xf8\x7fFL\xc2\
A\x99n\xd7\xdfC9V\x18\x85p\xc8\x04\x00\x00\x00\
\x00IEND\xaeB`\x82\
\x00\x00\x05+\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x04\xbdIDATX\xc3\xed\
WkL\x93W\x18>#q\xc92\xe9\x16\x97\xa8T\
e8\x9d\x02\x15\xf6\x03\x872\x93\x01f,[p\xc4\
0\xff`\xa2.\x1a:\x1dN\x03\xba1\x89[\xb3\x80\
\xd9\x0c\x84\x02\x19X\x1c\x14\x8b\x85\xb2\x82\x95^\xe4f\
\x0b\x8e1\xf8\xc3F\xcb-\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7\
ji\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0da\xd9\xb2\x93\
<\xed\x97\xf3}\xfd\xde\xe7\xbc\xef\xf3^J\x00\x80\xfc\
\x93 \xff\x0a\x02t\x09(D\x14\xd9\x14q\x14\x01+\
F\x80\xae\xddd\xdd\xc6f\x22L\xf8\x95\xc4\x8bG\xc8\
\xa1\xd3\xf7\xc8\x8e\x97;82a+A \x85\x9c\xbe\
0H.\xdd\x80\x19@2\xabyM\xf4\xbe\xfbr\x13\
hd\x06\x91\x04^\xa3Q\xf4\x06\xee\x85G\xf5\xd0\xbd\
\x83\xcbM \x9b\x9d\xf6@t/\xbd\x162= \x89\
?H\xa5,\x1b\x01\x8c1y\xc1\xbb\x9d\x88K\xc6\xd7\
\xc6&\x0e\xa0\x10\xb9\xfdB\xfe\xc5+6F\x8c\x12\x5c\
N\x02\x93\xa7\xa7\xa7\x0d\xcc\xd39\xb9\x98c6\x14\x0a\
\xd2\xe4\xa3+A \x8c)\x9e*\xdf7G\xeb\xdc{\
\xb5\xcc\x89\x9e@D\x96T\x83+,\x0b6FH\x08\
\x13\xf5d*{.T\x03\x01\xf8\x037\xbf\xc0\x0e4\
*T\xdfb\x88R\xd5,X\x03t\x1d\x16\x08\x04z\
EU\xf5\xc8\xa0mt\xc2\xd4s\xf7!\xbesQ\x95\
\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc.\x03\
\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf>\xbf\xd2`\xb5\xdb\
\xed\x80\xf8y\xe4>\xc4^\xab\xb4\xb9\x88/\x86\x80'\
\xd3\xc0g\xf9\x8e\x19\xf5`\xd7^3\xbav\xdas\xee\
h\xd8\xc7\xc7G\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\
\xf6.\xe7\x967\xf7wsa\xd8\xbd\xe8^\x80/f\
\x9a\xa0\x86\xdf\xa96B\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\
\xe7\x1a\x8a\x98-~\xfem\x97T\x1ak__\x1f\xb8\
\xd0\xd1s\x07br\x15VN\xc4\x87\x97\xd4\x8c0\x14\
\xe9\x15\xb7\x1e8\x1c\x0e@\xa4\xd6\x191\x9e\x85\x9b\x05\
~m\xa9%\x1a[\x97\xd9\x0c\xe6.\x0a\xf3$\x14\xdf\
6\x8e{\xbd\x1e\xd1\xcdB\xc8\x09o\xa9\x04<\xd1\xbd\
V\xab\x15\x10w\x7f\x1b\x84\xf3\x92\x5c\xbbR\xa9\x84\xfa\
\xfaz0\x99L\x0cu\xdf5\xc1Q\xb1d\x18\xc9Q\
D>\xb6v\xcc\xb4@O\x93_~\xd3\xd6\xdf\xdf\x0f\
2\x99\x0cD\x22\x11\xa8T*\x90J\xa5\xa0\xd1h \
K[9\xbe\xe9\x95\xe0\x1f\xb8S\xafy,\xf3\x00\x97\
\x8e\x22\x9e\xc7\x86\xe6S)\x19\xf6\x82\x82\x02\xe6\xe2\xa0\
\xa0 \xe0\xf1x`\xb1X@[^\x01\xfb\xcf&\x0c\
-\xa6S\xceg\x94\xcf\x09L\x83\xe2[{\xe6\xc2`\
\x9a\xb2\x14\x14\x0a\x05\x88\xc5b\xc8\xcc\xcc\x84\xa2\xa2\x22\
P\xab\xd5\xd0\xd9\xd9\xc9`\xec\xfe\xc9\xb9\xc9\xdb\xa7u\
.\xb7\xcfK\x80\xae\xb7\xd8)p\x0e\xc0j\x97\xacx\
\x88\xca\x7f\x82\xe2)\x89\x0e>\x97+![\x96\x0f\x07\
c\xe3G\x84\x1f&\xd8\x92rd\x8eo\x1a\xbf\x07\xa3\
\xd1\x08-\xad-\xf0\xcb\xc0 \x1c8\xf1\xbe\x05\xb3b\
\xc1\x04\x5ci\x84\x85\x85\x84F\xdc&\xe72\xac,\xcf\
3\xb5\x13\xec;\xe3\xba\xd33\xaf\x82\xe5\xfez\x89\x06\
\x9e\xde\xfcb\x1b\xf7<\x92\x8d{f\xabO[\xca5\
\xedXCC=444\x80\xa5\xb7\x172\x14\xc5\xc3\
\xf3\xe9\xc0e<\x92\xe5(\x9e6]\xe5\x9c*2x\
}\xf4\x83.Zl\x121\x0c\x1b%\xeaq\xf7/\xcb\
'\xef\x05\x87_\xfe\xd3\xe4D\x0bLh\xf4\xc9>u\
\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x961\xae\x81\x09\
f\xf16m8h<I::e\xf8b\x81\x83D\
\xbdWC\xb6\x0a^\x9b*\xc3\x94\x5c\xb0B\x0f\xab$\
\xb4\x04\x9fJ\xaa\x9bC71(\xd4O\xf2\x0a\xc7t\
:\x1d\xd4\xd6\xd6\x82\xc9|\xdb\xb9a\x9b\xf7_\xeab\
\xb2\xe5~\x9cu\x1f\x0d\xf3\xb2\xd4N\xf2\xf6\xb1\xeb.\
\xb6\xae\x94\xc3\x90l\x97U\xc1KW\xab\x80\x9cMn\
Z\xd0\x1cI\xbd\xb1\xe7\x88\xb0\xef\xcaW\xc5PZZ\
\x0a\x1d?\xf6L\x04\x06\x87t<\xaa\x0b\xc2\x84F\x8d\
\x07\xc8o\x02\xd9\xf9\xaa~\x9a\xf10F\x8e6 \xaf\
\xbcJxCi\x00\x92(\x1d\x98\xcd\x95\xb3y\xc3}\
=\xbf\xf9Dj\xa6].\x97CSK+D\x1c{\
\xf7\xce\xf4\x14%\xae\xf1\x8a\xf5w\x9c\xf5p\x02\xc2\xd9\
\x0f\x89\xd1\x81\x03O\x8e\xf7\xdc\xd2i\xe7\xf3\xdfu\xfc\
o\x14.6\xd2\xef\xd8\x17iI\xbe,\x9d\xc8\xd3\x96\
;\xa7\x0f1\x8c%\xc6\xdf\x9f\xbaw_q5\xa0A\
l\xb5\x08\x8c\xf9\x94\xf1\xe0\xf03K\x9a|h\x13Z\
\xbd\xce\xa3\xd9kOH\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\
\xf9/\xee\xb9In\x00\xf6{>\xed\xf7\x08\x1e*>\
]\xe5X\xaa\xf1GZ\xf5\xb6Y\x0b\x11\x1d\xb3C\xc9\
\x918\x099\xf9\xa9\x96!\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff\
7\xfcO\x13\xf8\x1d\xe7\x87\x19\xb9D\xc3\x01\xcf\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x06m\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x064IDATx^\xad\x97[lT\xc7\
\x1d\xc6\x7fs\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2\
ic$BJ!\x22\xa1-\x95b\xa5/\xeeKh\
+\x95\xa6U\xa5\xc6`U\xaa\xda\xb4\xaa\xfaV\x09U\
\xca\x03\x94'\xda\x07\x84\x14)\xad\xc4\x8b\xa5R\x83y\
\x08\xc5\x189\x0ei\xd3\x84\x9a\x9bcj\xec\xb2\x04\x1b\
;\xbb\xf6z\x8f\xbd\xbb\xde\xb3g\xa6\xc3h\x85\xe5r\
l\x88\xc9'}\xfa\x9f\x9d\x87\xfd~\xf3\x9f\x99s\x11\
J)\x82$\x84x\x05x\x9e\xc7kH)\xf5w\xd6\
(' \xb8C\xbb\x01h\x97R\xbe\xc6cdY\xd6\
\x07\x1a\xf6\xbb@\xb7\x069\xff\x14\x00&\xfc\xb7\xed\xf5\
\xe2`]DDn\xce\x89\x8a+W\xaeP]S\x8d\
@\x00\xa0P\x08e(A)f\xd3i^\xa9\x17/\
\xbc\xb4Nl;\xf1\x1f\xb9G\x83|[CL<M\
\x07\xf6\xff`\x8b\xdd,%\xf8J2<<Lee\
%+\xc9u]\x1e\xc0n\xa9\xb0\x22\x1b\xa2*r?\
\xa7\xea\x81\xb5\x03\x08-\x05H\xa1\x0d\xf4]\xbcH.\
\x97\xc3/\x16QJ\x91\xcf\xe7Y\x5c\x5c\xa4P(P\
\xd4c\xb5\xb5\xb5\x94\x01X\x80\xf8\x82\xf6\x80\x01\x006\
D\x05\x1f\x0f\xbcK>;\x8f\x85D\x952\xe2\xb6\xc4\
\xb6\x04!!p>Sl\x8c;\x80D*\x04\xf0\x9c\
\x10\x02\xe0\xcb@\x05P\x0f4`\xc4Hi\x9f$\x02\
\x01N\x9c8!\x00\x81\x05\xd2\x87\x96\x96g\x09em\
\x14\xe5(\xa5\xb4A\x08XW\x19%\xe2\xd8DB\x16\
\xc3\x13s\x5c\xbc=A\xf7X\x8e\x5c$\xbe\xa9\xbd}\
\xf7\xef-\xcbZ\xdc\xb1cGYUU\x95\xd3\xd8\xd8\
\x18~\xe0\x86\x86\x86\xd0\xa5K\x97\xdc\xae\xae\xae\x08\xf0\
\xd6\xaa\x1d\x00\x13DU,\xc2s\xd51\xf2\x9eO\xa1\
(\x91Ja\x09A\xd8\xb1\x88\x86l\xe6r\x05\x12\xa2\
\x8e?\x9f\xff+\x0dM\x1b\x01\x22\xc0f\x96\x84\xef\xfb\
x\x9eGuu\xb5\x9ePK\xf4\xea\xd5\xab\x87\x84\x10\
(\xa5\xdeZ\x11\xc0\xb2A\x00\xb6-\x90\xda\xb6\x148\
\x08\xa4\x12X\xc2\x8c\x1b\x8fL\xb9\xec{\xf5;\xd47\
6\x11|/\xc1\x84g2\x19\xca\xcb\xcb\xcdf>v\
\xec\xd8&\xbd\x7f\x0e.A,\x01\xd0\xd9\xd9\xa9\x0e\x1d\
:\xa4l!\x08Y\x10\xb6-\x1c\xc7\xc6BP\xb4\xcd\
\x1a\x1b\x00\xc7\xb2\x888\x96\xae\x02`Yx\x10\xc0\xdc\
\xdc\x1c555\x06 \x1a\x8dr\xe4\xc8\x91\xcd\xc0\x03\
\x88\x1b\x1a\xa2\xc7b\xb9\xb0mt0f\x8d\xcb#6\
\xb1\xa8\xa3\xc7,2\x8b\x1e\x93\x99\x1cc\xa9y\xee\xcc\
.\xe8\xdfEr\xf9<\xab\xc8,A6\x9b5\xa7f\
\xe9\xffm\x0e\x1c8\xb0\x1e\xe8\x00X\x06\xa0\xb4t\x16\
\x8e\x0d\xe1\x90\xc0S\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\
\xa6}\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb:\x0d/\xb4s\xfb\
\xce$\xfd\xfd\xfd$\x93I\x94R\xe6\xfa\xf8\xf1\xe3\xe8\
\xba\xac3\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c>|\x98\xde\xde\
^\x12\x89\x84\x04,\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\
\x94Ok\xc7\xcf\xf8\xe6/\xdf&\xf6\xf57\x99|\xa6\
\x83k\xfe.\xae\xf1-dk\x17\xad{\x7fN^V\
s\xfaog\xd1wM\xee\xdc\x9d\xe2\x1b\xafvr\xfd\
\xfau\x03\xa0gk\xd6?\x16\x8b\x99\xebx<\x8e\xe3\
8%8\x04\xc0#\x00\x96%\x98\xcaA:\xde\xca\xfe\
\xdf\xbdM\xd5\xae\xd7(\x84b\x08\xdbBY\x82lA\
r\x7ff\x91O\xeef\x18\xb8\xear\xfa\x1fad\xd5\
^\xae\x8f\xdcg2\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\
\xcd\xcd\xcd\xb4\xb5\xb5\x19755\xa1\xa1\x14 \x83\x1f\
F\x16\xdcq\x15\xdf\xff\xe9o\xa8l\xd8H\xe2\xec;\
L\x8f^\xc3\x89\x94\xb1\xb5y\x07\x9b[\xb6\xf3Iy\
%c\x09\x97\xcff\xf2\xdc\x9d\xce2\xa1\xed\x88\x0dL\
'\xe7\xd8\xb7+\xca\xfa%\x003{=k\xea\xea\xea\
\x00\xccu*\x952\x00J+\x10\xa0\xb9Zp\xe1\x9d\
c(,\xca\xe6\xc6\xd9\x10\x8fR\x94\x92{\xc3}$\
e\x05\xdb\xda\x7fLM\xdb\xcb|<\x9cf\xd2_\xc0\
\xcdx,\xcck/x \x00\xb5t:B\xa1\x90\x09\
-\xdd\xea\x1f\x8e\x01*\xf8>`\xc1\xc6\xb8\xa0P\x1c\
#\x1c\x8bS\xb7\xa5\x96\x92xv}\x05\xe9\xac\xc7h\
\xff\x9f\x98\xae\xbcL\xcb\xf6\x83\xb8\x0ba\xbc\x82\xa4X\
\x94x\xda!\xc7B-\xaa\x80\xe3i\xa0\x96\xd5\x15\x01\
\x00\xd6\xc7C\x84\xca#\xfc\xbfjc!\x9e\xa9\x0cs\
\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb92G\x03(\x03\
ak\x00\x16K!\xa5\x1c%0*\x15\xa4\x5c\x05@\
X\xa5*\xcc\xf5#\xfapl\x86\xf1Y\x8f\xef\xfd\xfa\
\x8f\xdc\xca\xd4\xe0D\x5c\xa2\x11\x1b\xcf\x93\x14=\x07\xd3\
\x01\xa5\x90R\xf2PjY\x01V\x05\x10\x08L\x0d\x04\
\x18\x9dv\xf9\xd5_\x86\x18\xbd\xb7\x80=\x93g\xd3\xba\
2\xf2y_\xbbh\xea\xce\xaf\xd4p\xf9\xdd\xe0%\x00\
\x9ex\x09L\xb8\x10<\xa2\xd6/U\xf2\x87\x1f>\xcf\
\xf5O3D\x1b\xb7\xb1\xf3\xc5\x97Y\x12\x5cN`\x8e\
\xdbS\x01(\xc0\x12%\x00m\xd4R}\xb1\xb5\x96\xdd\
[\xe2t\xbf\x97\xa5j\xf7W\xf9\xd1\x1bo\x10\xa0\xb5\
\x03\x98\xb57\xd5\xd8\x08\x01\xd2\xcbSpSx\xf33\
\x14\xb3i\x0a\x19\x1f%\xfd\xd5\x82\xd6\x08\xf0\xf0)\xe7\
\xe3\xe73\x14\xe6u\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc13\
}\xfa\xd7r\x8c\xb2\x137\x03\xc7\x01\xb2\x1e\xfe\xad\x94\
\xcco\xf7DT\x03\xd8_p\x07\x08\x92\x09\xfd\xd7=\
?\xfd~B\xa6\xcf\xdf\xf6\xef\x02\xeev;\xfc\x92\x06\
\xa8\xe3s\xcau]\x1fpW\xed\x00@2\xab\x0a\x1f\
~*\xd3\xbd\xb7\xfc\xd4\xcdi9\x05\xf4\x03\x97th\
\xbf\x10\xa2\xd3\xb6\xed\xaf}\x9e%XXX\xf0\x07\x06\
\x06\xd2'O\x9e\x9c\x06\xba\x83\x00>\x1aI\xca\xad\xe3\
\xb3*\xd7;\xe2\xa7nL\xcb\xd1R\xe8Y\x1dt\x8b\
\x00=\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2N\xcf\xce\xce\
\x9e.\xbd\x1d\xdf\x08\x02\xe8\xee\xea)\x00\x8c\x04\x84\x06\
\x85\xaf\x08055U\xd0/\x22\xa9S\xa7N%\xc7\
\xc7\xc7/\x03g\x81~\x1d\xec\xae\xb8\x09K\xdfv\xda\
O&\x85\x01@\x08@aZ\xfc\xde\xe0`\xba\xbb\xbb\
;\xa5\xdf\x8a\xcc$\xd0^\xeds\xcda\xed\x9aw3\
n\x11`p\xf0\xfdt___\xfa\xcc\x993\xa6\xc5\
\xa5\xd0\x8fx\x02\x89\xb5\x9ec!D\x18x\x13\xd8O\
is\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa*_\xf2\xd8\x15\x9d\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x04\xa3\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x045IDATX\xc3\xe5\
\x97\xcd\x8fTE\x14\xc5\x7f\xb7\xea\xd6{\xaf\xdbn\xc7\
\xf9@\x9d\x89FM4\x99D\x8d\x1aH\x98\xc4\x8c\x1f\
\x1b\xfe\x02L\x5c\xf1\x07\x18\x16.M\x5ckX\xc3\x8e\
\xc4\x8d\x1b\x17\xce\x82htA\x5c\x18\x0d\xe2\xc4\xc6\x00\
=`PQ\x19`\x02\xa2\x0e\x0c\x83\xd3\xfd^\xf7\x94\
\x8b\xaa\xee\xf9`\xe6\x0d\x84Q\x16VR\xa9\xce{\xb7\
\xeb\x9e:\xf7\xd4\xa9z\xea\xbd\xe7~6\xe5>\xb7>\
\x80]\xbbv\xbd\x03\xec\xfd\x8f\xf2N5\x1a\x8d\x03\xeb\
\x19\xd8\xbb\xef\xbd\xa3;\x1f\x1fv\x00\x9c<:\xcf\xcc\
\x977X\x9c\xef\xdcS\xa6\xda\xa0\xf2\xdck\x03\xbc\xb8\
g\x10\x80\x8b\x7f\x16|\xf8\xee\x1e\x80\xdb\x00p\xfc\xec\
\x1c\xdf?0\x04x.\xfd\xb8\xc0\xfe\xb7\xceo\xcbr\
\x0f\x1dy\x9a\x0b#\x96\xd3\x9f\x1fd\xfc\xd5}\x9bk\
@E\xb0\x16@xp,#\xcb\xb2m\x0100\x96\
a\x8dP\x1b|\x14#%\x22\x14+\xd8\x18\x91\xd5\x95\
s\xe7\xce\x83*\xb8\x04\xd2\x14\xb2\x0c\xd2,\x8cI\x0a\
I\x12\xdew:\x90\xe7\x90\xb7\xa1\xd5\x82v+\x8em\
(r\xb2\xfa8\xd6\x0a\xe3\xaf\xbcIk\xf1\xfa\xe6\x00\
\xac\x15\xac\x15\x04\xb0F\xd8\xbd{\xe7\x16k\xeb\x86\xae\
\x80Z\xa8V\x81\xeamQ\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\
\x84\x01g\x055\x82\x08\xa8\x0a\x95,\xc3# \x1e\x08\
\xc0\xf0\x1e/\x02\xde#\x12&\x15|\x88#\xc4!\x1e\
<!^@MX\x18@\xd7J\x89\x06\xac\xa0\xdac\
\x00\x9a3\xbf\x05\x8aS\x07i\x02\x95\x04\xb24\xf6\x04\
\x12\x07N\xa1\xe8@^@+\x8f\xbd\x05K9\xb4s\
\xc8\x0bT\x87q=\x00*\xe5%p1@\xd509\
\xf9\xd2\xd6\x0a\xf3>\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8'a\
a\xbd\x1c%% \x00\xf0\x81\x8d4M\xa3:\xc3\xb3\
\x98\x11\x89l\x07\xdac\x09V\x98_)F\xfca\xcd\
r\x7fa\x1d-\xd1\x80:\x09TI\x18O4/\xe0\
\x9d\x85\xc4!\x89\xc3g\x09\x92i\xd8\x11\x89\xe2\x13\x87\
X\x8b\xefv\x91\xbc\x80\xbc\x03\xed\x02\xdfj#\xed\x02\
\xf2\x02\x9fwP\x1dE\xd5 x:\xebTx\x9b\x06\
\x9c3x\x0f\x03\x8f$\xbc\xfe\xf2\xf3wh\xe86h\
\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04R^\x82DM_\x84\
\x8f\x0d\xa58\xe7\xb6\xc5\x88\x9e\x18K\xb9v\xb3\x03\x08\
\x9dR\x11\xaa\x90\xb8P\xefZ\xc50}\xb1\xcb@\xc5\
\xb0\x0e\xf4&\xadW\xf9U.\xe1\xe1\xc6\xd22\xf5\xcc\
p}\xc9\x84-\xe9J\x19\x10\x9c\x1a\xc0s\xe5f\x97\
+7\xbb\xacQW?\xd7\xaad~\xc5'\xa2)\xac\
\x05\x15\xc3\x9c\x0b\xb5w\xa6l\x17\xa8\xc1\xa9 \xc8\x1a\
5\xaf\x9b5\x1a\x8fY1\x9e\xfe{\xe9\xef\x14\x00\xf1\
\x82\xef\x9bX0+WV\x02U!\xd1\x90\xfc\xe7S\
\xdf\xf2\xeb\x99\x13,-\xde\xb8\xa7\xfaWj\x03<\xf5\
\xecN\x9eya\x02\x0f\xa83[1\x10\x03|\x87\xf7\
\xf7\xbf\xc1\xc2\xc2\x02\xb7n\xdd\xa2(\x0aD\x04k-\
\xd6ZT\x15U\xc59\x87\xaab\xad\xc5\x98\xf0\xdf\xe5\
\xe5e\xf2<\xef\xf7#\xcd\xf9\xb8\xf2-\x18pVP\
\x17\x18\xdc1:\xb6rO8~\x9c\xe9\xe9i\x8c1\
x\xef\x99\x98\x98`rr\xf2\x8eY\xd81:\xd6\xdf\
\x86\xae\xd4\x09Up6\xac\xa2V\xaf\xf7k933\
\xc3\xd0\xd0\x10\xd6Z\xbc\xf74\x9b\xcd\xbb\x02P\xab\xd7\
p\xd1\x88\xb4\xd4\x88\x14\x9c\x0b'\x5c\xa0*\x00\xa8V\
\xabdY\xd6\xa7\xb87\xdeis\x1a\xa9\x17AK\xad\
8\x1e\xc7\xbd#\xb4\xd7\x8c1\x88D\xdf\x8f:\xb8\xab\
\x9b\xaf5\xa8\x0d\xf3\xf6\x18.=\x8e\x83)m\xe3\xd5\
\xdb\x12\xa9\xf7\xe5Vl\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\
\xdb\x02\xe0\xa1\x91a\xd4\xc2\xb5+\x97Y\x9c\xbf\xbe\x05\
\x036\xf8\xc0`\xad\x02\x0b\xdb\xc3\xc0P\xad\xc2\xec\xc5\
K\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa66\x04`$^\
J\x05\x12\x0b\xed\x91'\xa9=\x0co\x1f8\xc8f\xc7\
\x81':\xf1*\xe75\x1e2\x81\x14(\xbap\xf9\xea\
U\xce4\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1fN\x1d\x02\x0eo\
\x08\xe0\xb3\x8f>\xe0\xa7\xd3'W\x99\xe9\xda\xa3\x86U\
\xe6\xbb\x1e\x04\x1b<_\x1do|w\xee\x8f\xd9_\x0e\
\x01\x87\x1b\x8d\xc6_\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5s\
l}\xf25\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1?M\xf0\
K\xb9\xe8F\x89\xaf\x00\x00\x00\x00IEND\xaeB\
`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x08\
\x06\xc1Y\x87\
\x00o\
\x00p\x00e\x00n\x00.\x00p\x00n\x00g\
\x00\x07\
\x04\xcaW\xa7\
\x00n\
\x00e\x00w\x00.\x00p\x00n\x00g\
\x00\x08\
\x06|Z\x07\
\x00c\
\x00o\x00p\x00y\x00.\x00p\x00n\x00g\
\x00\x07\
\x0a\xc7W\x87\
\x00c\
\x00u\x00t\x00.\x00p\x00n\x00g\
\x00\x09\
\x0a\xa8\xbaG\
\x00p\
\x00a\x00s\x00t\x00e\x00.\x00p\x00n\x00g\
\x00\x08\
\x08\xc8Xg\
\x00s\
\x00a\x00v\x00e\x00.\x00p\x00n\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00(\x00\x00\x00\x00\x00\x01\x00\x00\x08\x1d\
\x00\x00\x01yCj\xf6\xed\
\x00\x00\x00<\x00\x00\x00\x00\x00\x01\x00\x00\x0bu\
\x00\x00\x01yCj\xf6\xec\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01yCj\xf6\xee\
\x00\x00\x00~\x00\x00\x00\x00\x00\x01\x00\x00\x1cS\
\x00\x00\x01yCj\xf6\xf0\
\x00\x00\x00f\x00\x00\x00\x00\x00\x01\x00\x00\x15\xe2\
\x00\x00\x01yCj\xf6\xee\
\x00\x00\x00R\x00\x00\x00\x00\x00\x01\x00\x00\x10\xb3\
\x00\x00\x01yCj\xf6\xed\
"
def | ():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| qInitResources |
resource_pool_test.go | /*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"errors"
"testing"
"time"
"github.com/XiaoMi/Gaea/util/sync2"
)
var lastID, count sync2.AtomicInt64
type TestResource struct {
num int64
closed bool
}
func (tr *TestResource) Close() {
if !tr.closed {
count.Add(-1)
tr.closed = true
}
}
func | () (Resource, error) {
count.Add(1)
return &TestResource{lastID.Add(1), false}, nil
}
func FailFactory() (Resource, error) {
return nil, errors.New("Failed")
}
func SlowFailFactory() (Resource, error) {
time.Sleep(10 * time.Millisecond)
return nil, errors.New("Failed")
}
func TestOpen(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(PoolFactory, 6, 6, time.Second)
p.SetCapacity(5)
var resources [10]Resource
// Test Get
for i := 0; i < 5; i++ {
r, err := p.Get(ctx)
resources[i] = r
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if p.Available() != int64(5-i-1) {
t.Errorf("expecting %d, received %d", 5-i-1, p.Available())
}
if p.WaitCount() != 0 {
t.Errorf("expecting 0, received %d", p.WaitCount())
}
if p.WaitTime() != 0 {
t.Errorf("expecting 0, received %d", p.WaitTime())
}
if lastID.Get() != int64(i+1) {
t.Errorf("Expecting %d, received %d", i+1, lastID.Get())
}
if count.Get() != int64(i+1) {
t.Errorf("Expecting %d, received %d", i+1, count.Get())
}
}
// Test that Get waits
ch := make(chan bool)
go func() {
for i := 0; i < 5; i++ {
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Get failed: %v", err)
}
resources[i] = r
}
for i := 0; i < 5; i++ {
p.Put(resources[i])
}
ch <- true
}()
for i := 0; i < 5; i++ {
// Sleep to ensure the goroutine waits
time.Sleep(10 * time.Millisecond)
p.Put(resources[i])
}
<-ch
if p.WaitCount() != 5 {
t.Errorf("Expecting 5, received %d", p.WaitCount())
}
if p.WaitTime() == 0 {
t.Errorf("Expecting non-zero")
}
if lastID.Get() != 5 {
t.Errorf("Expecting 5, received %d", lastID.Get())
}
// Test Close resource
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
r.Close()
p.Put(nil)
if count.Get() != 4 {
t.Errorf("Expecting 4, received %d", count.Get())
}
for i := 0; i < 5; i++ {
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Get failed: %v", err)
}
resources[i] = r
}
for i := 0; i < 5; i++ {
p.Put(resources[i])
}
if count.Get() != 5 {
t.Errorf("Expecting 5, received %d", count.Get())
}
if lastID.Get() != 6 {
t.Errorf("Expecting 6, received %d", lastID.Get())
}
// SetCapacity
p.SetCapacity(3)
if count.Get() != 3 {
t.Errorf("Expecting 3, received %d", count.Get())
}
if lastID.Get() != 6 {
t.Errorf("Expecting 6, received %d", lastID.Get())
}
if p.Capacity() != 3 {
t.Errorf("Expecting 3, received %d", p.Capacity())
}
if p.Available() != 3 {
t.Errorf("Expecting 3, received %d", p.Available())
}
p.SetCapacity(6)
if p.Capacity() != 6 {
t.Errorf("Expecting 6, received %d", p.Capacity())
}
if p.Available() != 6 {
t.Errorf("Expecting 6, received %d", p.Available())
}
for i := 0; i < 6; i++ {
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Get failed: %v", err)
}
resources[i] = r
}
for i := 0; i < 6; i++ {
p.Put(resources[i])
}
if count.Get() != 6 {
t.Errorf("Expecting 5, received %d", count.Get())
}
if lastID.Get() != 9 {
t.Errorf("Expecting 9, received %d", lastID.Get())
}
// Close
p.Close()
if p.Capacity() != 0 {
t.Errorf("Expecting 0, received %d", p.Capacity())
}
if p.Available() != 0 {
t.Errorf("Expecting 0, received %d", p.Available())
}
if count.Get() != 0 {
t.Errorf("Expecting 0, received %d", count.Get())
}
}
func TestShrinking(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(PoolFactory, 5, 5, time.Second)
var resources [10]Resource
// Leave one empty slot in the pool
for i := 0; i < 4; i++ {
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Get failed: %v", err)
}
resources[i] = r
}
done := make(chan bool)
go func() {
p.SetCapacity(3)
done <- true
}()
expected := `{"Capacity": 3, "Available": 0, "Active": 4, "InUse": 4, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0}`
for i := 0; i < 10; i++ {
time.Sleep(10 * time.Millisecond)
stats := p.StatsJSON()
if stats != expected {
if i == 9 {
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
}
}
}
// There are already 2 resources available in the pool.
// So, returning one should be enough for SetCapacity to complete.
p.Put(resources[3])
<-done
// Return the rest of the resources
for i := 0; i < 3; i++ {
p.Put(resources[i])
}
stats := p.StatsJSON()
expected = `{"Capacity": 3, "Available": 3, "Active": 3, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0}`
if stats != expected {
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
}
if count.Get() != 3 {
t.Errorf("Expecting 3, received %d", count.Get())
}
// Ensure no deadlock if SetCapacity is called after we start
// waiting for a resource
var err error
for i := 0; i < 3; i++ {
resources[i], err = p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
}
// This will wait because pool is empty
go func() {
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
p.Put(r)
done <- true
}()
// This will also wait
go func() {
p.SetCapacity(2)
done <- true
}()
time.Sleep(10 * time.Millisecond)
// This should not hang
for i := 0; i < 3; i++ {
p.Put(resources[i])
}
<-done
<-done
if p.Capacity() != 2 {
t.Errorf("Expecting 2, received %d", p.Capacity())
}
if p.Available() != 2 {
t.Errorf("Expecting 2, received %d", p.Available())
}
if p.WaitCount() != 1 {
t.Errorf("Expecting 1, received %d", p.WaitCount())
}
if count.Get() != 2 {
t.Errorf("Expecting 2, received %d", count.Get())
}
// Test race condition of SetCapacity with itself
p.SetCapacity(3)
for i := 0; i < 3; i++ {
resources[i], err = p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
}
// This will wait because pool is empty
go func() {
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
p.Put(r)
done <- true
}()
time.Sleep(10 * time.Millisecond)
// This will wait till we Put
go p.SetCapacity(2)
time.Sleep(10 * time.Millisecond)
go p.SetCapacity(4)
time.Sleep(10 * time.Millisecond)
// This should not hang
for i := 0; i < 3; i++ {
p.Put(resources[i])
}
<-done
err = p.SetCapacity(-1)
if err == nil {
t.Errorf("Expecting error")
}
err = p.SetCapacity(255555)
if err == nil {
t.Errorf("Expecting error")
}
if p.Capacity() != 4 {
t.Errorf("Expecting 4, received %d", p.Capacity())
}
if p.Available() != 4 {
t.Errorf("Expecting 4, received %d", p.Available())
}
}
func TestClosing(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(PoolFactory, 5, 5, time.Second)
var resources [10]Resource
for i := 0; i < 5; i++ {
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Get failed: %v", err)
}
resources[i] = r
}
ch := make(chan bool)
go func() {
p.Close()
ch <- true
}()
// Wait for goroutine to call Close
time.Sleep(10 * time.Millisecond)
stats := p.StatsJSON()
expected := `{"Capacity": 0, "Available": 0, "Active": 5, "InUse": 5, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0}`
if stats != expected {
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
}
// Put is allowed when closing
for i := 0; i < 5; i++ {
p.Put(resources[i])
}
// Wait for Close to return
<-ch
// SetCapacity must be ignored after Close
err := p.SetCapacity(1)
if err == nil {
t.Errorf("expecting error")
}
stats = p.StatsJSON()
expected = `{"Capacity": 0, "Available": 0, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0}`
if stats != expected {
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
}
if lastID.Get() != 5 {
t.Errorf("Expecting 5, received %d", count.Get())
}
if count.Get() != 0 {
t.Errorf("Expecting 0, received %d", count.Get())
}
}
func TestIdleTimeout(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(PoolFactory, 1, 1, 10*time.Millisecond)
defer p.Close()
r, err := p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if count.Get() != 1 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if p.IdleClosed() != 0 {
t.Errorf("Expecting 0, received %d", p.IdleClosed())
}
p.Put(r)
if lastID.Get() != 1 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if count.Get() != 1 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if p.IdleClosed() != 0 {
t.Errorf("Expecting 0, received %d", p.IdleClosed())
}
time.Sleep(20 * time.Millisecond)
if count.Get() != 0 {
t.Errorf("Expecting 0, received %d", count.Get())
}
if p.IdleClosed() != 1 {
t.Errorf("Expecting 1, received %d", p.IdleClosed())
}
r, err = p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if lastID.Get() != 2 {
t.Errorf("Expecting 2, received %d", count.Get())
}
if count.Get() != 1 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if p.IdleClosed() != 1 {
t.Errorf("Expecting 1, received %d", p.IdleClosed())
}
// sleep to let the idle closer run while all resources are in use
// then make sure things are still as we expect
time.Sleep(20 * time.Millisecond)
if lastID.Get() != 2 {
t.Errorf("Expecting 2, received %d", count.Get())
}
if count.Get() != 1 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if p.IdleClosed() != 1 {
t.Errorf("Expecting 1, received %d", p.IdleClosed())
}
p.Put(r)
r, err = p.Get(ctx)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if lastID.Get() != 2 {
t.Errorf("Expecting 2, received %d", count.Get())
}
if count.Get() != 1 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if p.IdleClosed() != 1 {
t.Errorf("Expecting 1, received %d", p.IdleClosed())
}
// the idle close thread wakes up every 1/100 of the idle time, so ensure
// the timeout change applies to newly added resources
p.SetIdleTimeout(1000 * time.Millisecond)
p.Put(r)
time.Sleep(20 * time.Millisecond)
if lastID.Get() != 2 {
t.Errorf("Expecting 2, received %d", count.Get())
}
if count.Get() != 1 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if p.IdleClosed() != 1 {
t.Errorf("Expecting 1, received %d", p.IdleClosed())
}
p.SetIdleTimeout(10 * time.Millisecond)
time.Sleep(20 * time.Millisecond)
if lastID.Get() != 2 {
t.Errorf("Expecting 2, received %d", count.Get())
}
if count.Get() != 0 {
t.Errorf("Expecting 1, received %d", count.Get())
}
if p.IdleClosed() != 2 {
t.Errorf("Expecting 2, received %d", p.IdleClosed())
}
}
func TestCreateFail(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(FailFactory, 5, 5, time.Second)
defer p.Close()
if _, err := p.Get(ctx); err.Error() != "Failed" {
t.Errorf("Expecting Failed, received %v", err)
}
stats := p.StatsJSON()
expected := `{"Capacity": 5, "Available": 5, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0}`
if stats != expected {
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
}
}
func TestSlowCreateFail(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(SlowFailFactory, 2, 2, time.Second)
defer p.Close()
ch := make(chan bool)
// The third Get should not wait indefinitely
for i := 0; i < 3; i++ {
go func() {
p.Get(ctx)
ch <- true
}()
}
for i := 0; i < 3; i++ {
<-ch
}
if p.Available() != 2 {
t.Errorf("Expecting 2, received %d", p.Available())
}
}
func TestTimeout(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(PoolFactory, 1, 1, time.Second)
defer p.Close()
r, err := p.Get(ctx)
if err != nil {
t.Fatal(err)
}
newctx, cancel := context.WithTimeout(ctx, 1*time.Millisecond)
_, err = p.Get(newctx)
cancel()
want := "resource pool timed out"
if err == nil || err.Error() != want {
t.Errorf("got %v, want %s", err, want)
}
p.Put(r)
}
func TestExpired(t *testing.T) {
lastID.Set(0)
count.Set(0)
p := NewResourcePool(PoolFactory, 1, 1, time.Second)
defer p.Close()
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1*time.Second))
r, err := p.Get(ctx)
if err == nil {
p.Put(r)
}
cancel()
want := "resource pool timed out"
if err == nil || err.Error() != want {
t.Errorf("got %v, want %s", err, want)
}
}
| PoolFactory |
exclusivity_correlation.py | """
===========================
Computes the correlation between pairwise distances and mean exclusivity ratings for randomly drawn pairs of norms.
===========================
Dr. Cai Wingfield
---------------------------
Embodied Cognition Lab
Department of Psychology
University of Lancaster
[email protected]
caiwingfield.net
---------------------------
2022
---------------------------
"""
from numpy import corrcoef, zeros
from numpy.random import default_rng, seed
from linguistic_distributional_models.utils.logging import print_progress
from linguistic_distributional_models.utils.maths import DistanceType, distance
from sensorimotor_norms.sensorimotor_norms import SensorimotorNorms, DataColNames
sn = SensorimotorNorms(use_breng_translation=False, verbose=True)
def | (n_draws: int):
rng = default_rng()
all_words = list(sn.iter_words())
random_words = rng.choice(all_words, 2 * n_draws, replace=True)
first_words = random_words[:n_draws]
second_words = random_words[n_draws:]
distances = zeros((n_draws,)) # Preallocate vectors to be correlated
mean_exclusivities = zeros((n_draws,))
for i in range(n_draws):
w1, w2 = first_words[i], second_words[i]
v1, v2 = sn.sensorimotor_vector_for_word(w1), sn.sensorimotor_vector_for_word(w2)
e1, e2 = sn.stat_for_word(w1, DataColNames.exclusivity_sensorimotor), sn.stat_for_word(w2, DataColNames.exclusivity_sensorimotor)
# For the pair
distances[i] = distance(v1, v2, DistanceType.cosine) # vector distance
mean_exclusivities[i] = (e1 + e2) / 2 # mean exclusivity
print_progress(i + 1, n_draws)
return corrcoef(distances, mean_exclusivities)
if __name__ == "__main__":
seed(451)
correlation = exclusivity_correlation(n_draws=10_000)
print(correlation)
| exclusivity_correlation |
forms.py | from django import forms
| password = forms.CharField(max_length=20, required=True) | class RegisterForm(forms.Form):
username = forms.CharField(max_length=20, required=True) |
base_env.py | import pybullet_data
import glob
import pybullet
import pybullet_utils.bullet_client as bc
import time
import numpy as np
from gym.utils import seeding
import gym
import os
import inspect
from myGym.envs.camera import Camera
import pkg_resources
currentdir = pkg_resources.resource_filename("myGym", "envs")
repodir = pkg_resources.resource_filename("myGym", "./")
class BaseEnv(gym.Env):
"""
The base class for environments without rendering
Parameters:
:param gui_on: (bool) Whether or not to use PyBullet built-in GUI
:param objects_dir_path: (str) Path to directory with URDF files for objects
:param max_steps: (int) The maximum number of actions per episode
:param show_bounding_boxes_gui: (bool) Whether or not to show bounding boxes in GUI
:param changing_light_gui: (bool) Whether or not to change light in GUI
:param shadows_on_gui: (bool) Whether or not to show shadows in GUI
"""
metadata = {'render.modes': [
'human', 'rgb_array'], 'video.frames_per_second': 50}
def __init__(self,
gui_on=True,
objects_dir_path=pkg_resources.resource_filename("myGym", "envs/"),
max_steps=1024,
show_bounding_boxes_gui=False,
changing_light_gui=False,
shadows_on_gui=True,
timestep=1./240.
):
self.gui_on = gui_on
self.max_steps = max_steps
self.show_bounding_boxes_gui = show_bounding_boxes_gui
self.changing_light_gui = changing_light_gui
self.shadows_on_gui = shadows_on_gui
# Set episode information
self.episode_start_time = None
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_final_reward = []
self.episode_final_distance = []
self.episode_number = 0
self.episode_steps = 0
self.episode_max_time = 300
self.episode_info = ""
# Set general params
self.time_step = 1. / 240.
#self.time_step = timestep
self.urdf_root = pybullet_data.getDataPath()
self.observation = {}
# Set objects information
self.objects_dir_path = objects_dir_path
self.env_objects = []
self.scene_objects_uids = {}
self.all_objects_filenames = self._get_all_urdf_filenames(self.objects_dir_path)
# Set GUI
self._connect_to_physics_server()
# Set env params and load models
self._set_physics()
self._setup_scene()
self._set_observation_space()
self._set_action_space()
def _connect_to_physics_server(self):
"""
Connect to the PyBullet physics server in SHARED_MEMORY, GUI or DIRECT mode
"""
if self.gui_on:
self.p = bc.BulletClient(connection_mode=pybullet.GUI)
# if (self.p < 0):
# self.p = bc.BulletClient(connection_mode=p.GUI)
self._set_gui_mode()
else:
self.p = bc.BulletClient(connection_mode=pybullet.DIRECT)
self.p.setPhysicsEngineParameter(enableFileCaching=0)
def _set_gui_mode(self):
"""
Set GUI parameters: camera, shadows, extra elements
"""
self.p.resetDebugVisualizerCamera(3.3, -40, -41, [0.0, 0.0, 0.33])
self.p.configureDebugVisualizer(self.p.COV_ENABLE_SHADOWS, self.shadows_on_gui)
self.p.configureDebugVisualizer(self.p.COV_ENABLE_GUI, 0)
def _set_physics(self):
"""
Set physics engine parameters
"""
self.p.setGravity(0, 0, -9.81)
self.p.setPhysicsEngineParameter(solverResidualThreshold=0.001, numSolverIterations=150, numSubSteps=10, useSplitImpulse=1, collisionFilterMode=1, constraintSolverType=self.p.CONSTRAINT_SOLVER_LCP_DANTZIG, globalCFM=0.000001)
self.p.setTimeStep(self.time_step)
self.p.setRealTimeSimulation(0)
self.p.setPhysicsEngineParameter(enableConeFriction=1)
print(self.p.getPhysicsEngineParameters())
def _setup_scene(self):
"""
Set up scene elements (furniture, objects, robots)
"""
raise NotImplementedError
def _set_observation_space(self):
"""
Set limits of observations
"""
raise NotImplementedError
def _set_action_space(self):
"""
Set limits of actions
"""
raise NotImplementedError
def _get_observation(self):
"""
Get info about the state of the environment
Returns:
:return observation: (object) Observation of the environment
"""
raise NotImplementedError
def step(self, action):
"""
Apply action on the environment
Parameters:
:param action: (object) An action provided by the agent
Returns:
:return observation: (object)
:return reward: (float)
:return done: (bool):
:return info: (dict):
"""
raise NotImplementedError
def _add_scene_object_uid(self, scene_object_uid, name):
"""
Call this method in order to enable texturization of object
Parameters:
:param scene_object: (int)
"""
self.scene_objects_uids[scene_object_uid] = name
def get_scene_object_uid_by_name(self, name):
for uid, object_name in self.scene_objects_uids.items():
if name == object_name:
return uid
return None
def seed(self, seed=None):
"""
Set the seed for this env's random number generator(s)
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def hard_reset(self):
"""
Full reset of the simulation. Delete and load again all objects and reset physics.
"""
self.p.resetSimulation()
self.p.disconnect()
self._connect_to_physics_server()
self.scene_objects_uids = {}
#self.episode_number = 0
self._set_physics()
self._setup_scene()
def _restart_episode(self):
"""
Reset episode information and delete all objects
""" | self.episode_start_time = time.time()
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_steps = 0
def reset(self, hard=False):
"""
Reset the state of the environment
"""
if hard:
self.hard_reset()
else:
self._remove_all_objects()
self._restart_episode()
def _draw_bounding_boxes(self):
"""
Show bounding boxes in tne PyBullet GUI
"""
for object in self.env_objects:
object.draw_bounding_box()
def _compute_reward(self):
"""
Compute reward for the agent
"""
return NotImplementedError
def _print_episode_summary(self, info_dict={}):
"""
Show an extra information about the episode
Parameters:
:param info_dict: (dict) Extra info
"""
if self.episode_failed:
episode_status = "FAILURE"
else:
episode_status = "SUCCESS"
print("#---------Episode-Summary---------#")
print("Episode number: " + str(self.episode_number))
print("Episode's number of steps: " + str(self.episode_steps))
#print("Episode status: " + episode_status)
print("Episode info: " + self.episode_info)
print("Episode reward: " + str(self.episode_reward))
#print("Last step reward: " + str(self.reward.rewards_history[-1]))
print("#---------------------------------#")
for key, value in info_dict.items():
print(key + ": " + str(value))
def _get_random_urdf_filenames(self, n, used_objects=None):
"""
Sample random URDF files from directory with objects URDFs
Parameters:
:param n: (int) Number of URDF's
:param used_objects: (list) Specified subset of objects
Returns:
:return selected_objects_filenames: (list)
"""
if used_objects or (self.all_objects_filenames is None):
all_objects_filenames = []
for object_name in used_objects:
if "virtual" in object_name:
all_objects_filenames.append(object_name)
for file in self.all_objects_filenames:
if '/'+object_name+'.' in file:
all_objects_filenames.append(file)
else:
# uses self.all_objects_filenames
pass
assert all_objects_filenames is not None
selected_objects_filenames = []
total_num_objects = len(all_objects_filenames)
if (n <= total_num_objects):
selected_objects = np.random.choice(
np.arange(total_num_objects), n, replace=True)
else:
selected_objects = list(np.arange(total_num_objects))
remain = n - total_num_objects
selected_objects += list(np.random.choice(
np.arange(total_num_objects), remain))
for object_id in selected_objects:
selected_objects_filenames.append(all_objects_filenames[object_id])
return selected_objects_filenames
def _get_all_urdf_filenames(self, dir):
"""
Get all URDF filenames from directory
Parameters:
:param dir: (int) Number of URDFs
Returns:
:return filenames: (list)
"""
list_all = []
for (dirpath, dirnames, filenames) in os.walk(self.objects_dir_path):
if '_old' not in dirpath and 'urdf' in dirpath:
list_all += [os.path.join(dirpath, file) for file in filenames]
return list_all
def _remove_object(self, object):
"""
Totally remove object from the simulation
Parameters:
:param object: (EnvObject) Object to remove
"""
self.env_objects.remove(object)
self.p.removeBody(object.uid)
def _remove_all_objects(self):
"""
Remove all objects from simulation (not scene objects or robots)
"""
env_objects_copy = self.env_objects[:]
for env_object in env_objects_copy:
self._remove_object(env_object)
def get_texturizable_objects_uids(self):
"""
Get all objects in the environment, on which textures can be applied
Returns:
:return texturizable_objects_uids: (list)
"""
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def get_colorizable_objects_uids(self):
"""
Get all objects in the environment, which color can be changed
Returns:
:return colorizable_objects_uids: (list)
"""
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def __del__(self):
"""
Disconnect from the physics server
"""
self.p.disconnect()
class CameraEnv(BaseEnv):
"""
The class for environments with rendering
Parameters:
:param camera_resolution: (list) The number of pixels in image (WxH)
:param shadows_on: (bool) Whether or not to use shadows while rendering, only applies to ER_TINY_RENDERER
:param render_on: (bool) Turn on rendering
:param renderer: (int) self.p.ER_TINY_RENDERER (CPU) or self.p.ER_BULLET_HARDWARE_OPENGL (GPU)
:param active_cameras: (list) Set 1 at a position(=camera number) to save images from this camera
"""
def __init__(self, camera_resolution=[640, 480], shadows_on=True,
render_on=True, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
active_cameras=None, **kwargs):
super(CameraEnv, self).__init__(**kwargs)
self.camera_resolution = camera_resolution
self.shadows_on = shadows_on
self.render_on = render_on
self.renderer = renderer
self.active_cameras = active_cameras
self.cameras = []
self.set_light()
self._set_cameras()
def set_light(self, light_direction=[1, 1, 1], light_color=[0.1, 0.1, 0.1],
light_distance=1., light_ambient=1., light_diffuse=1.,
light_specular=1.):
"""
Set light parameters for rendering, doesn't affect PyBullet GUI. Appart from light_direction, all parameters only apply to ER_TINY_RENDERER.
Parameters:
:param light_direction: (list) Specifies the world position of the light source
:param light_color: (list) Directional light color in RGB in range 0..1
:param light_distance: (float) Distance of the light along the normalized light_direction
:param light_ambient: (float) Light ambient coefficient in range 0..1
:param light_diffuse: (float) Light diffuse coefficient in range 0..1
:param light_specular: (float) Light specular coefficient in range 0..1
"""
self.light_direction = light_direction
self.light_color = light_color
self.light_distance = light_distance
self.light_ambient = light_ambient
self.light_diffuse = light_diffuse
self.light_specular = light_specular
def get_render_parameters(self):
"""
Return environment parameters for rendering, initially is intended to
use by cameras
Returns:
:return render_parameters: (dict) Render parameters
"""
return {
"width": self.camera_resolution[0],
"height": self.camera_resolution[1],
"lightDirection": self.light_direction,
"lightColor": self.light_color,
"lightDistance": self.light_distance,
"shadow": 1 if self.shadows_on else 0,
"lightAmbientCoeff": self.light_ambient,
"lightDiffuseCoeff": self.light_diffuse,
"lightSpecularCoeff": self.light_specular,
"renderer": self.renderer
}
def _set_cameras(self):
"""
Set cameras available to use for rendering
"""
raise NotImplementedError
def get_cameras(self):
return self.cameras
def add_camera(self, **kwargs):
"""
Add new camera to the environment
Parameters:
:param position: (list) Eye position in Cartesian world coordinates
:prarm target_position: (list) Position of the target point
:param up_vector: (list) Up vector of the camera
:param up_axis_index: (int) Either 1 for Y or 2 for Z axis up
:param yaw: (float) Yaw angle in degrees left/right around up-axis
:param pitch: (float) Pitch in degrees up/down
:param roll: (float) Roll in degrees around forward vector
:param distance: (float) Distance from eye to focus point
:param field_of_view: (float) Field of view
:param near_plane_distance: (float) Near plane distance
:param far_plane_distance: (float) Far plane distance
"""
self.cameras.append(Camera(env=self, **kwargs))
def set_active_cameras(self, active_cameras):
if (len(active_cameras) == len(self.cameras)):
self.active_cameras = active_cameras
def change_current_camera(self, camera_num):
print("Change camera to " + str(self.current_camera))
self.current_camera = camera_num
def render(self, mode="rgb_array", camera_id=None):
"""
Get image (image, depth, segmentation_mask) from camera or active cameras
Parameters:
:param mode: (str) rgb_array to return RGB image
:param camera_id: (int) Get image from specified camera
Returns:
:return camera_data: (dict) Key: camera_id, Value: info from camera
"""
if mode != "rgb_array":
return np.array([])
camera_data = {}
if self.render_on:
if camera_id is not None:
camera_data[camera_id] = self.cameras[camera_id].render()
else:
for camera_num in range(len(self.active_cameras)):
if self.active_cameras[camera_num]:
camera_data[camera_num] = self.cameras[camera_num].render()
return camera_data
def project_point_to_camera_image(self, point, camera_id):
"""
Project 3D point in Cartesian world coordinates to 2D point in pixel space
Parameters:
:param point: (list) 3D point in Cartesian world coordinates
:param camera_id: (int) Index of camera to project on
Returns:
:return 2d_point: (list) 2D coordinates of point on imageg
"""
return self.cameras[camera_id].project_point_to_image(point)
def get_camera_opencv_matrix_values(self, camera_id):
"""
Compute values of OpenCV matrix
Parameters:
:param camera_id: (int) Index of camera to get matrix from
Returns:
:return values: (dict) fx, fy, cx, cy values
"""
return self.cameras[camera_id].get_opencv_camera_matrix_values() | self.p.removeAllUserDebugItems() |
solution.py | class | :
def maxDiv(self, a: int, b: int) -> int:
while a % b == 0:
a = a / b
return a
def isUgly2(self, n: int) -> bool:
n = self.maxDiv(n, 2)
n = self.maxDiv(n, 3)
n = self.maxDiv(n, 5)
return n == 1
def isUgly(self, n: int) -> bool:
if n == 0:
return False
while n % 2 == 0:
n /= 2
while n % 3 == 0:
n /= 3
while n % 5 == 0:
n /= 5
return n == 1
s = Solution()
print(s.isUgly(6))
print(s.isUgly(7))
print(s.isUgly(8))
print(s.isUgly(1))
print(s.isUgly(14))
print(s.isUgly(21))
print(s.isUgly(0))
print(s.isUgly(1))
| Solution |
jquery.ui.datepicker-cy-GB.min.js | /*! jQuery UI - v1.9.2 - 2017-10-08
* http://jqueryui.com
* Copyright jQuery Foundation and other contributors; Licensed MIT */
| jQuery(function(t){t.datepicker.regional["cy-GB"]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["Ionawr","Chwefror","Mawrth","Ebrill","Mai","Mehefin","Gorffennaf","Awst","Medi","Hydref","Tachwedd","Rhagfyr"],monthNamesShort:["Ion","Chw","Maw","Ebr","Mai","Meh","Gor","Aws","Med","Hyd","Tac","Rha"],dayNames:["Dydd Sul","Dydd Llun","Dydd Mawrth","Dydd Mercher","Dydd Iau","Dydd Gwener","Dydd Sadwrn"],dayNamesShort:["Sul","Llu","Maw","Mer","Iau","Gwe","Sad"],dayNamesMin:["Su","Ll","Ma","Me","Ia","Gw","Sa"],weekHeader:"Wy",dateFormat:"dd/mm/yy",firstDay:1,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},t.datepicker.setDefaults(t.datepicker.regional["cy-GB"])}); |
|
entities.rs | use alloc::vec::Vec;
use core::cmp;
use core::convert::TryFrom;
use core::iter::ExactSizeIterator;
use core::num::{NonZeroU32, NonZeroU64};
use core::ops::Range;
use core::sync::atomic::{AtomicIsize, Ordering};
use core::{fmt, mem};
#[cfg(feature = "std")]
use std::error::Error;
/// Lightweight unique ID, or handle, of an entity
///
/// Obtained from `World::spawn`. Can be stored to refer to an entity in the future.
///
/// Enable the `serde` feature on the crate to make this `Serialize`able. Some applications may be
/// able to save space by only serializing the output of `Entity::id`.
#[derive(Clone, Copy, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub struct Entity {
pub(crate) generation: NonZeroU32,
pub(crate) id: u32,
}
impl Entity {
/// Convert to a form convenient for passing outside of rust
///
/// No particular structure is guaranteed for the returned bits.
///
/// Useful for storing entity IDs externally, or in conjunction with `Entity::from_bits` and
/// `World::spawn_at` for easy serialization. Alternatively, consider `id` for more compact
/// representation.
pub fn to_bits(self) -> NonZeroU64 {
unsafe {
NonZeroU64::new_unchecked(u64::from(self.generation.get()) << 32 | u64::from(self.id))
}
}
/// Reconstruct an `Entity` previously destructured with `to_bits` if the bitpattern is valid,
/// else `None`
///
/// Useful for storing entity IDs externally, or in conjunction with `Entity::to_bits` and
/// `World::spawn_at` for easy serialization.
pub fn from_bits(bits: u64) -> Option<Self> {
Some(Self {
generation: NonZeroU32::new((bits >> 32) as u32)?,
id: bits as u32,
})
}
/// Extract a transiently unique identifier
///
/// No two simultaneously-live entities share the same ID, but dead entities' IDs may collide
/// with both live and dead entities. Useful for compactly representing entities within a
/// specific snapshot of the world, such as when serializing.
///
/// See also `World::find_entity_from_id`.
pub fn id(self) -> u32 {
self.id
}
}
impl fmt::Debug for Entity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}v{}", self.id, self.generation)
}
}
#[cfg(feature = "serde")]
impl serde::Serialize for Entity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.to_bits().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de> serde::Deserialize<'de> for Entity {
fn deserialize<D>(deserializer: D) -> Result<Entity, D::Error>
where
D: serde::Deserializer<'de>,
{
let bits = u64::deserialize(deserializer)?;
match Entity::from_bits(bits) {
Some(ent) => Ok(ent),
None => Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Unsigned(bits),
&"`a valid `Entity` bitpattern",
)),
}
}
}
/// An iterator returning a sequence of Entity values from `Entities::reserve_entities`.
pub struct ReserveEntitiesIterator<'a> {
// Metas, so we can recover the current generation for anything in the freelist.
meta: &'a [EntityMeta],
// Reserved IDs formerly in the freelist to hand out.
id_iter: core::slice::Iter<'a, u32>,
// New Entity IDs to hand out, outside the range of meta.len().
id_range: core::ops::Range<u32>,
}
impl<'a> Iterator for ReserveEntitiesIterator<'a> {
type Item = Entity;
fn next(&mut self) -> Option<Self::Item> {
self.id_iter
.next()
.map(|&id| Entity {
generation: self.meta[id as usize].generation,
id,
})
.or_else(|| {
self.id_range.next().map(|id| Entity {
generation: NonZeroU32::new(1).unwrap(),
id,
})
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.id_iter.len() + self.id_range.len();
(len, Some(len))
}
}
impl<'a> ExactSizeIterator for ReserveEntitiesIterator<'a> {}
#[derive(Default)]
#[doc(hidden)]
pub struct Entities {
pub meta: Vec<EntityMeta>,
// The `pending` and `free_cursor` fields describe three sets of Entity IDs
// that have been freed or are in the process of being allocated:
//
// - The `freelist` IDs, previously freed by `free()`. These IDs are available to any
// of `alloc()`, `reserve_entity()` or `reserve_entities()`. Allocation will
// always prefer these over brand new IDs.
//
// - The `reserved` list of IDs that were once in the freelist, but got
// reserved by `reserve_entities` or `reserve_entity()`. They are now waiting
// for `flush()` to make them fully allocated.
//
// - The count of new IDs that do not yet exist in `self.meta()`, but which
// we have handed out and reserved. `flush()` will allocate room for them in `self.meta()`.
//
// The contents of `pending` look like this:
//
// ```
// ----------------------------
// | freelist | reserved |
// ----------------------------
// ^ ^
// free_cursor pending.len()
// ```
//
// As IDs are allocated, `free_cursor` is atomically decremented, moving
// items from the freelist into the reserved list by sliding over the boundary.
//
// Once the freelist runs out, `free_cursor` starts going negative.
// The more negative it is, the more IDs have been reserved starting exactly at
// the end of `meta.len()`.
//
// This formulation allows us to reserve any number of IDs first from the freelist
// and then from the new IDs, using only a single atomic subtract.
//
// Once `flush()` is done, `free_cursor` will equal `pending.len()`.
pending: Vec<u32>,
free_cursor: AtomicIsize,
len: u32,
}
impl Entities {
/// Reserve entity IDs concurrently
///
/// Storage for entity generation and location is lazily allocated by calling `flush`.
pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator {
// Use one atomic subtract to grab a range of new IDs. The range might be
// entirely nonnegative, meaning all IDs come from the freelist, or entirely
// negative, meaning they are all new IDs to allocate, or a mix of both.
let range_end = self
.free_cursor
.fetch_sub(count as isize, Ordering::Relaxed);
let range_start = range_end - count as isize;
let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
let (new_id_start, new_id_end) = if range_start >= 0 {
// We satisfied all requests from the freelist.
(0, 0)
} else {
// We need to allocate some new Entity IDs outside of the range of self.meta.
//
// `range_start` covers some negative territory, e.g. `-3..6`.
// Since the nonnegative values `0..6` are handled by the freelist, that
// means we need to handle the negative range here.
//
// In this example, we truncate the end to 0, leaving us with `-3..0`.
// Then we negate these values to indicate how far beyond the end of `meta.end()`
// to go, yielding `meta.len()+0 .. meta.len()+3`.
let base = self.meta.len() as isize;
let new_id_end = u32::try_from(base - range_start).expect("too many entities");
// `new_id_end` is in range, so no need to check `start`.
let new_id_start = (base - range_end.min(0)) as u32;
(new_id_start, new_id_end)
};
ReserveEntitiesIterator {
meta: &self.meta[..],
id_iter: self.pending[freelist_range].iter(),
id_range: new_id_start..new_id_end,
}
}
/// Reserve one entity ID concurrently
///
/// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
pub fn reserve_entity(&self) -> Entity {
let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
if n > 0 {
// Allocate from the freelist.
let id = self.pending[(n - 1) as usize];
Entity {
generation: self.meta[id as usize].generation,
id,
}
} else {
// Grab a new ID, outside the range of `meta.len()`. `flush()` must
// eventually be called to make it valid.
//
// As `self.free_cursor` goes more and more negative, we return IDs farther
// and farther beyond `meta.len()`.
Entity {
generation: NonZeroU32::new(1).unwrap(),
id: u32::try_from(self.meta.len() as isize - n).expect("too many entities"),
}
}
}
/// Check that we do not have pending work requiring `flush()` to be called.
fn verify_flushed(&mut self) {
debug_assert!(
!self.needs_flush(),
"flush() needs to be called before this operation is legal"
);
}
/// Allocate an entity ID directly
///
/// Location should be written immediately.
pub fn alloc(&mut self) -> Entity {
self.verify_flushed();
self.len += 1;
if let Some(id) = self.pending.pop() {
let new_free_cursor = self.pending.len() as isize;
self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
Entity {
generation: self.meta[id as usize].generation,
id,
}
} else {
let id = u32::try_from(self.meta.len()).expect("too many entities");
self.meta.push(EntityMeta::EMPTY);
Entity {
generation: NonZeroU32::new(1).unwrap(),
id,
}
}
}
/// Allocate and set locations for many entity IDs laid out contiguously in an archetype
///
/// `self.finish_alloc_many()` must be called after!
pub fn alloc_many(&mut self, n: u32, archetype: u32, mut first_index: u32) -> AllocManyState {
self.verify_flushed();
let fresh = (n as usize).saturating_sub(self.pending.len()) as u32;
assert!(
(self.meta.len() + fresh as usize) < u32::MAX as usize,
"too many entities"
);
let pending_end = self.pending.len().saturating_sub(n as usize);
for &id in &self.pending[pending_end..] {
self.meta[id as usize].location = Location {
archetype,
index: first_index,
};
first_index += 1;
}
let fresh_start = self.meta.len() as u32;
self.meta.extend(
(first_index..(first_index + fresh)).map(|index| EntityMeta {
generation: NonZeroU32::new(1).unwrap(),
location: Location { archetype, index },
}),
);
self.len += n;
AllocManyState {
fresh: fresh_start..(fresh_start + fresh),
pending_end,
}
}
/// Remove entities used by `alloc_many` from the freelist
///
/// This is an awkward separate function to avoid borrowck issues in `SpawnColumnBatchIter`.
pub fn finish_alloc_many(&mut self, pending_end: usize) {
self.pending.truncate(pending_end);
}
/// Allocate a specific entity ID, overwriting its generation
///
/// Returns the location of the entity currently using the given ID, if any. Location should be written immediately.
pub fn alloc_at(&mut self, entity: Entity) -> Option<Location> {
self.verify_flushed();
let loc = if entity.id as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.id);
let new_free_cursor = self.pending.len() as isize;
self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
self.meta.resize(entity.id as usize + 1, EntityMeta::EMPTY);
self.len += 1;
None
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.id) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as isize;
self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
self.len += 1;
None
} else {
Some(mem::replace(
&mut self.meta[entity.id as usize].location,
EntityMeta::EMPTY.location,
))
};
self.meta[entity.id as usize].generation = entity.generation;
loc
}
/// Destroy an entity, allowing it to be reused
///
/// Must not be called while reserved entities are awaiting `flush()`.
pub fn free(&mut self, entity: Entity) -> Result<Location, NoSuchEntity> {
self.verify_flushed();
let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
if meta.generation != entity.generation {
return Err(NoSuchEntity);
}
meta.generation = NonZeroU32::new(u32::from(meta.generation).wrapping_add(1))
.unwrap_or_else(|| NonZeroU32::new(1).unwrap());
let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
self.pending.push(entity.id);
let new_free_cursor = self.pending.len() as isize;
self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
self.len -= 1;
Ok(loc)
}
/// Ensure at least `n` allocations can succeed without reallocating
pub fn reserve(&mut self, additional: u32) {
self.verify_flushed();
let freelist_size = self.free_cursor.load(Ordering::Relaxed);
let shortfall = additional as isize - freelist_size;
if shortfall > 0 {
self.meta.reserve(shortfall as usize);
}
}
pub fn contains(&self, entity: Entity) -> bool {
// Note that out-of-range IDs are considered to be "contained" because
// they must be reserved IDs that we haven't flushed yet.
match self.meta.get(entity.id as usize) {
Some(meta) => meta.generation == entity.generation,
None => {
// Check if this could have been obtained from `reserve_entity`
let free = self.free_cursor.load(Ordering::Relaxed);
entity.generation.get() == 1
&& free < 0
&& (entity.id as isize) < (free.abs() + self.meta.len() as isize)
}
}
}
pub fn clear(&mut self) {
self.meta.clear();
self.pending.clear();
self.free_cursor.store(0, Ordering::Relaxed); // Not racey due to &mut self
self.len = 0;
}
/// Access the location storage of an entity
///
/// Must not be called on pending entities.
pub fn get_mut(&mut self, entity: Entity) -> Result<&mut Location, NoSuchEntity> {
let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
if meta.generation == entity.generation {
Ok(&mut meta.location)
} else {
Err(NoSuchEntity)
}
}
/// Returns `Ok(Location { archetype: 0, index: undefined })` for pending entities
pub fn get(&self, entity: Entity) -> Result<Location, NoSuchEntity> {
if self.meta.len() <= entity.id as usize {
return Ok(Location {
archetype: 0,
index: u32::max_value(),
});
}
let meta = &self.meta[entity.id as usize];
if meta.generation != entity.generation {
return Err(NoSuchEntity);
}
Ok(meta.location)
}
/// Panics if the given id would represent an index outside of `meta`.
///
/// # Safety
/// Must only be called for currently allocated `id`s.
pub unsafe fn resolve_unknown_gen(&self, id: u32) -> Entity {
let meta_len = self.meta.len();
if meta_len > id as usize {
let meta = &self.meta[id as usize];
Entity {
generation: meta.generation,
id,
}
} else {
// See if it's pending, but not yet flushed.
let free_cursor = self.free_cursor.load(Ordering::Relaxed);
let num_pending = cmp::max(-free_cursor, 0) as usize;
if meta_len + num_pending > id as usize {
// Pending entities will have generation 0.
Entity {
generation: NonZeroU32::new(1).unwrap(),
id,
}
} else {
panic!("entity id is out of range");
}
}
}
fn needs_flush(&mut self) -> bool {
// Not racey due to &mut self
self.free_cursor.load(Ordering::Relaxed) != self.pending.len() as isize
}
/// Allocates space for entities previously reserved with `reserve_entity` or
/// `reserve_entities`, then initializes each one using the supplied function.
pub fn flush(&mut self, mut init: impl FnMut(u32, &mut Location)) {
// Not racey due because of self is &mut.
let free_cursor = self.free_cursor.load(Ordering::Relaxed);
let new_free_cursor = if free_cursor >= 0 {
free_cursor as usize
} else {
let old_meta_len = self.meta.len();
let new_meta_len = old_meta_len + -free_cursor as usize;
self.meta.resize(new_meta_len, EntityMeta::EMPTY); | self.len += -free_cursor as u32;
for (id, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) {
init(id as u32, &mut meta.location);
}
self.free_cursor.store(0, Ordering::Relaxed);
0
};
self.len += (self.pending.len() - new_free_cursor) as u32;
for id in self.pending.drain(new_free_cursor..) {
init(id, &mut self.meta[id as usize].location);
}
}
#[inline]
pub fn len(&self) -> u32 {
self.len
}
}
#[derive(Copy, Clone)]
#[doc(hidden)]
pub struct EntityMeta {
pub generation: NonZeroU32,
pub location: Location,
}
impl EntityMeta {
const EMPTY: EntityMeta = EntityMeta {
generation: match NonZeroU32::new(1) {
Some(x) => x,
None => unreachable!(),
},
location: Location {
archetype: 0,
index: u32::max_value(), // dummy value, to be filled in
},
};
}
#[derive(Copy, Clone)]
#[doc(hidden)]
pub struct Location {
pub archetype: u32,
pub index: u32,
}
/// Error indicating that no entity with a particular ID exists
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct NoSuchEntity;
impl fmt::Display for NoSuchEntity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("no such entity")
}
}
#[cfg(feature = "std")]
impl Error for NoSuchEntity {}
#[derive(Clone)]
#[doc(hidden)]
pub struct AllocManyState {
pub pending_end: usize,
fresh: Range<u32>,
}
impl AllocManyState {
pub fn next(&mut self, entities: &Entities) -> Option<u32> {
if self.pending_end < entities.pending.len() {
let id = entities.pending[self.pending_end];
self.pending_end += 1;
Some(id)
} else {
self.fresh.next()
}
}
pub fn len(&self, entities: &Entities) -> usize {
self.fresh.len() + (entities.pending.len() - self.pending_end)
}
}
#[cfg(test)]
mod tests {
use super::*;
use hashbrown::{HashMap, HashSet};
use rand::{rngs::StdRng, Rng, SeedableRng};
#[test]
fn entity_bits_roundtrip() {
let e = Entity {
generation: NonZeroU32::new(0xDEADBEEF).unwrap(),
id: 0xBAADF00D,
};
assert_eq!(Entity::from_bits(e.to_bits().into()).unwrap(), e);
}
#[test]
fn alloc_and_free() {
let mut rng = StdRng::seed_from_u64(0xFEEDFACEDEADF00D);
let mut e = Entities::default();
let mut first_unused = 0u32;
let mut id_to_gen: HashMap<u32, u32> = Default::default();
let mut free_set: HashSet<u32> = Default::default();
let mut len = 0;
for _ in 0..100 {
let alloc = rng.gen_bool(0.7);
if alloc || first_unused == 0 {
let entity = e.alloc();
len += 1;
let id = entity.id;
if !free_set.is_empty() {
// This should have come from the freelist.
assert!(free_set.remove(&id));
} else if id >= first_unused {
first_unused = id + 1;
}
e.get_mut(entity).unwrap().index = 37;
assert!(id_to_gen.insert(id, entity.generation.get()).is_none());
} else {
// Free a random ID, whether or not it's in use, and check for errors.
let id = rng.gen_range(0..first_unused);
let generation = id_to_gen.remove(&id);
let entity = Entity {
id,
generation: NonZeroU32::new(
generation.unwrap_or_else(|| NonZeroU32::new(1).unwrap().get()),
)
.unwrap(),
};
assert_eq!(e.free(entity).is_ok(), generation.is_some());
if generation.is_some() {
len -= 1;
}
free_set.insert(id);
}
assert_eq!(e.len(), len);
}
}
#[test]
fn alloc_at() {
let mut e = Entities::default();
let mut old = Vec::new();
for _ in 0..2 {
let entity = e.alloc();
old.push(entity);
e.free(entity).unwrap();
}
assert_eq!(e.len(), 0);
let id = old.first().unwrap().id();
assert!(old.iter().all(|entity| entity.id() == id));
let entity = *old.last().unwrap();
// The old ID shouldn't exist at this point, and should exist
// in the pending list.
assert!(!e.contains(entity));
assert!(e.pending.contains(&entity.id()));
// Allocating an entity at an unused location should not cause a location to be returned.
assert!(e.alloc_at(entity).is_none());
assert!(e.contains(entity));
// The entity in question should not exist in the free-list once allocated.
assert!(!e.pending.contains(&entity.id()));
assert_eq!(e.len(), 1);
// Allocating at the same id again should cause a location to be returned
// this time around.
assert!(e.alloc_at(entity).is_some());
assert!(e.contains(entity));
assert_eq!(e.len(), 1);
// Allocating an Entity should cause the new empty locations
// to be located in the free list.
assert_eq!(e.meta.len(), 1);
assert!(e
.alloc_at(Entity {
id: 3,
generation: NonZeroU32::new(2).unwrap(),
})
.is_none());
assert_eq!(e.pending.len(), 2);
assert_eq!(&e.pending, &[1, 2]);
assert_eq!(e.meta.len(), 4);
}
#[test]
fn contains() {
let mut e = Entities::default();
for _ in 0..2 {
let entity = e.alloc();
assert!(e.contains(entity));
e.free(entity).unwrap();
assert!(!e.contains(entity));
}
// Reserved but not flushed are still "contained".
for _ in 0..3 {
let entity = e.reserve_entity();
assert!(e.contains(entity));
assert!(!e.contains(Entity {
id: entity.id,
generation: NonZeroU32::new(2).unwrap(),
}));
assert!(!e.contains(Entity {
id: entity.id + 1,
generation: NonZeroU32::new(1).unwrap(),
}));
}
}
// Shared test code parameterized by how we want to allocate an Entity block.
fn reserve_test_helper(reserve_n: impl FnOnce(&mut Entities, u32) -> Vec<Entity>) {
let mut e = Entities::default();
// Allocate 10 items.
let mut v1: Vec<Entity> = (0..10).map(|_| e.alloc()).collect();
assert_eq!(v1.iter().map(|e| e.id).max(), Some(9));
for &entity in v1.iter() {
assert!(e.contains(entity));
e.get_mut(entity).unwrap().index = 37;
}
// Put the last 4 on the freelist.
for entity in v1.drain(6..) {
e.free(entity).unwrap();
}
assert_eq!(e.free_cursor.load(Ordering::Relaxed), 4);
// Reserve 10 entities, so 4 will come from the freelist.
// This means we will have allocated 10 + 10 - 4 total items, so max id is 15.
let v2 = reserve_n(&mut e, 10);
assert_eq!(v2.iter().map(|e| e.id).max(), Some(15));
// Reserved IDs still count as "contained".
assert!(v2.iter().all(|&entity| e.contains(entity)));
// We should have exactly IDs 0..16
let mut v3: Vec<Entity> = v1.iter().chain(v2.iter()).copied().collect();
assert_eq!(v3.len(), 16);
v3.sort_by_key(|entity| entity.id);
for (i, entity) in v3.into_iter().enumerate() {
assert_eq!(entity.id, i as u32);
}
// 6 will come from pending.
assert_eq!(e.free_cursor.load(Ordering::Relaxed), -6);
let mut flushed = Vec::new();
e.flush(|id, _| flushed.push(id));
flushed.sort_unstable();
assert_eq!(flushed, (6..16).collect::<Vec<_>>());
}
#[test]
fn reserve_entity() {
reserve_test_helper(|e, n| (0..n).map(|_| e.reserve_entity()).collect())
}
#[test]
fn reserve_entities() {
reserve_test_helper(|e, n| e.reserve_entities(n).collect())
}
#[test]
fn reserve_grows() {
let mut e = Entities::default();
let _ = e.reserve_entity();
e.flush(|_, _| {});
assert_eq!(e.len(), 1);
}
#[test]
fn reserve_grows_mixed() {
let mut e = Entities::default();
let a = e.alloc();
e.alloc();
e.free(a).unwrap();
let _ = e.reserve_entities(3);
e.flush(|_, _| {});
assert_eq!(e.len(), 4);
}
} | |
update_package.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {serde_json::json, update_package::UpdatePackage};
/// A mocked update package for testing.
pub struct TestUpdatePackage {
update_pkg: UpdatePackage,
temp_dir: tempfile::TempDir,
packages: Vec<String>,
}
impl TestUpdatePackage {
/// Creates a new TestUpdatePackage with nothing in it.
pub fn new() -> Self {
let temp_dir = tempfile::tempdir().expect("/tmp to exist");
let update_pkg_proxy = io_util::directory::open_in_namespace(
temp_dir.path().to_str().unwrap(),
io_util::OPEN_RIGHT_READABLE,
)
.expect("temp dir to open");
Self { temp_dir, update_pkg: UpdatePackage::new(update_pkg_proxy), packages: vec![] }
}
/// Adds a file to the update package, panics on error.
pub async fn add_file(
self,
path: impl AsRef<std::path::Path>,
contents: impl AsRef<[u8]>,
) -> Self {
let path = path.as_ref();
match path.parent() {
Some(empty) if empty == std::path::Path::new("") => {}
None => |
Some(parent) => std::fs::create_dir_all(self.temp_dir.path().join(parent)).unwrap(),
}
io_util::file::write_in_namespace(
self.temp_dir.path().join(path).to_str().unwrap(),
contents,
)
.await
.expect("create test update package file");
self
}
/// Adds a package to the update package, panics on error.
pub async fn add_package(mut self, package_url: impl Into<String>) -> Self {
self.packages.push(package_url.into());
let packages_json = json!({
"version": "1",
"content": self.packages,
})
.to_string();
self.add_file("packages.json", packages_json).await
}
/// Set the hash of the update package, panics on error.
pub async fn hash(self, hash: impl AsRef<[u8]>) -> Self {
self.add_file("meta", hash).await
}
}
impl std::ops::Deref for TestUpdatePackage {
type Target = UpdatePackage;
fn deref(&self) -> &Self::Target {
&self.update_pkg
}
}
| {} |
pos_util.py | import collections
from src.importer.known_jobs import KnownJobs
from src.preprocessing import preproc
from src.util import loe_util, jobtitle_util
mw_tokens = ['m/w', 'w/m', 'm/f', 'f/m',
'M/W', 'W/M', 'M/F', 'F/M']
def find_jobs(sentence):
jobs = []
# find known jobs
for hit in find_job_by_keyword(sentence, KnownJobs()):
jobs.append((hit, 'known-job'))
# find by m/w patterns
sentence_without_percentage = loe_util.remove_percentage(sentence)
for hit in find_job_by_keyword(sentence_without_percentage, mw_tokens):
jobs.append((hit, 'mw'))
# find by percentages
sentence_without_mw = jobtitle_util.remove_mw(sentence)
for hit in find_job_by_keyword(sentence_without_mw, loe_util.find_all_loe(sentence_without_mw)):
jobs.append((hit, 'loe'))
# find by gender forms
# sentence_without_mw_and_percentage = loe_util.remove_percentage(sentence_without_mw)
# jobs += find_job_by_keyword(sentence_without_mw_and_percentage, ['/in', '/-in'])
# search by keyword: gender
# for match in jobtitle_util.find_all_genderized(sentence):
# gender_job = expand_left_right(sentence.split(match[0])[0], sentence)
# if gender_job:
# yield gender_job
return jobs
def find_job_by_keyword(sentence, keywords):
# job_names = []
for keyword in keywords:
if keyword in sentence:
job_name = expand_left_right(keyword, sentence)
if job_name:
yield job_name
# job_names.append(job_name)
# return job_names
def expand_left_right(token, sentence):
if token not in sentence:
return None
job_name_tokens = preproc.to_words(token)
sentence_tokens = [word for word in preproc.to_words(sentence) if word not in ['(', ')']]
ix_from, ix_to = calculate_positions(job_name_tokens, sentence_tokens)
sentence_pos = preproc.pos_tag(sentence_tokens)
left = sentence_pos[:ix_from]
right = sentence_pos[ix_to:]
initial_content = [token] if token not in mw_tokens and not loe_util.is_percentate(token) else []
tokens = collections.deque(initial_content)
search_left(left, tokens)
search_right(right, tokens)
return ' '.join(tokens)
def search_left(pos_tagged_words, tokens=collections.deque()):
i = len(pos_tagged_words) - 1
while 0 <= i:
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.appendleft(word)
else:
break
i -= 1
return tokens
def search_right(pos_tagged_words, tokens=collections.deque()):
i = 0
while 0 <= i < len(pos_tagged_words):
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.append(word)
else:
break
i += 1
return tokens
def is_part_of_name(word, pos_tag):
return is_noun(pos_tag) or word in ['/']
def is_noun(pos_tag):
return pos_tag[0] in ['N', 'F']
def | (pos_tag):
return pos_tag.startswith('$')
def calculate_positions(job_name_tokens, sentence_tokens):
ix_from = [i for i, word in enumerate(sentence_tokens) if job_name_tokens[0] in word][0]
ix_to = ix_from + len(job_name_tokens)
return ix_from, ix_to
| is_punctuation |
errors.go | package main
import (
"fmt"
"net/url"
)
| } | func main() {
u := url.Parse("http://foo.org/index%html")
fmt.Println(u.Host) |
config.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Contains infrastructure for configuring the compiler, including parsing
//! command line options.
use driver::{early_error, early_warn};
use driver::driver;
use driver::session::Session;
use back;
use back::write;
use back::target_strs;
use back::{arm, x86, x86_64, mips, mipsel};
use lint;
use syntax::abi;
use syntax::ast;
use syntax::ast::{IntTy, UintTy};
use syntax::attr;
use syntax::attr::AttrMetaMethods;
use syntax::diagnostic::{ColorConfig, Auto, Always, Never};
use syntax::parse;
use syntax::parse::token::InternedString;
use std::collections::HashMap;
use getopts::{optopt, optmulti, optflag, optflagopt};
use getopts;
use std::cell::{RefCell};
use std::fmt;
use llvm;
pub struct Config {
pub os: abi::Os,
pub arch: abi::Architecture,
pub target_strs: target_strs::t,
pub int_type: IntTy,
pub uint_type: UintTy,
}
#[deriving(Clone, PartialEq)]
pub enum OptLevel {
No, // -O0
Less, // -O1
Default, // -O2
Aggressive // -O3
}
#[deriving(Clone, PartialEq)]
pub enum DebugInfoLevel {
NoDebugInfo,
LimitedDebugInfo,
FullDebugInfo,
}
#[deriving(Clone)]
pub struct Options {
// The crate config requested for the session, which may be combined
// with additional crate configurations during the compile process
pub crate_types: Vec<CrateType>,
pub gc: bool,
pub optimize: OptLevel,
pub debuginfo: DebugInfoLevel,
pub lint_opts: Vec<(String, lint::Level)>,
pub describe_lints: bool,
pub output_types: Vec<back::write::OutputType> ,
// This was mutable for rustpkg, which updates search paths based on the
// parsed code. It remains mutable in case its replacements wants to use
// this.
pub addl_lib_search_paths: RefCell<Vec<Path>>,
pub maybe_sysroot: Option<Path>,
pub target_triple: String,
// User-specified cfg meta items. The compiler itself will add additional
// items to the crate config, and during parsing the entire crate config
// will be added to the crate AST node. This should not be used for
// anything except building the full crate config prior to parsing.
pub cfg: ast::CrateConfig,
pub test: bool,
pub parse_only: bool,
pub no_trans: bool,
pub no_analysis: bool,
pub debugging_opts: u64,
/// Whether to write dependency files. It's (enabled, optional filename).
pub write_dependency_info: (bool, Option<Path>),
/// Crate id-related things to maybe print. It's (crate_name, crate_file_name).
pub print_metas: (bool, bool),
pub cg: CodegenOptions,
pub color: ColorConfig,
pub externs: HashMap<String, Vec<String>>,
pub crate_name: Option<String>,
/// An optional name to use as the crate for std during std injection,
/// written `extern crate std = "name"`. Default to "std". Used by
/// out-of-tree drivers.
pub alt_std_name: Option<String>
}
/// Some reasonable defaults
pub fn | () -> Options {
Options {
crate_types: Vec::new(),
gc: false,
optimize: No,
debuginfo: NoDebugInfo,
lint_opts: Vec::new(),
describe_lints: false,
output_types: Vec::new(),
addl_lib_search_paths: RefCell::new(Vec::new()),
maybe_sysroot: None,
target_triple: driver::host_triple().to_string(),
cfg: Vec::new(),
test: false,
parse_only: false,
no_trans: false,
no_analysis: false,
debugging_opts: 0,
write_dependency_info: (false, None),
print_metas: (false, false),
cg: basic_codegen_options(),
color: Auto,
externs: HashMap::new(),
crate_name: None,
alt_std_name: None,
}
}
// The type of entry function, so
// users can have their own entry
// functions that don't start a
// scheduler
#[deriving(PartialEq)]
pub enum EntryFnType {
EntryMain,
EntryStart,
EntryNone,
}
#[deriving(PartialEq, PartialOrd, Clone, Ord, Eq, Hash)]
pub enum CrateType {
CrateTypeExecutable,
CrateTypeDylib,
CrateTypeRlib,
CrateTypeStaticlib,
}
macro_rules! debugging_opts(
([ $opt:ident ] $cnt:expr ) => (
pub static $opt: u64 = 1 << $cnt;
);
([ $opt:ident, $($rest:ident),* ] $cnt:expr ) => (
pub static $opt: u64 = 1 << $cnt;
debugging_opts!([ $($rest),* ] $cnt + 1)
)
)
debugging_opts!(
[
VERBOSE,
TIME_PASSES,
COUNT_LLVM_INSNS,
TIME_LLVM_PASSES,
TRANS_STATS,
ASM_COMMENTS,
NO_VERIFY,
BORROWCK_STATS,
NO_LANDING_PADS,
DEBUG_LLVM,
SHOW_SPAN,
COUNT_TYPE_SIZES,
META_STATS,
NO_OPT,
GC,
PRINT_LINK_ARGS,
PRINT_LLVM_PASSES,
LTO,
AST_JSON,
AST_JSON_NOEXPAND,
LS,
SAVE_ANALYSIS,
FLOWGRAPH_PRINT_LOANS,
FLOWGRAPH_PRINT_MOVES,
FLOWGRAPH_PRINT_ASSIGNS,
FLOWGRAPH_PRINT_ALL
]
0
)
pub fn debugging_opts_map() -> Vec<(&'static str, &'static str, u64)> {
vec!(("verbose", "in general, enable more debug printouts", VERBOSE),
("time-passes", "measure time of each rustc pass", TIME_PASSES),
("count-llvm-insns", "count where LLVM \
instrs originate", COUNT_LLVM_INSNS),
("time-llvm-passes", "measure time of each LLVM pass",
TIME_LLVM_PASSES),
("trans-stats", "gather trans statistics", TRANS_STATS),
("asm-comments", "generate comments into the assembly (may change behavior)",
ASM_COMMENTS),
("no-verify", "skip LLVM verification", NO_VERIFY),
("borrowck-stats", "gather borrowck statistics", BORROWCK_STATS),
("no-landing-pads", "omit landing pads for unwinding",
NO_LANDING_PADS),
("debug-llvm", "enable debug output from LLVM", DEBUG_LLVM),
("show-span", "show spans for compiler debugging", SHOW_SPAN),
("count-type-sizes", "count the sizes of aggregate types",
COUNT_TYPE_SIZES),
("meta-stats", "gather metadata statistics", META_STATS),
("no-opt", "do not optimize, even if -O is passed", NO_OPT),
("print-link-args", "Print the arguments passed to the linker",
PRINT_LINK_ARGS),
("gc", "Garbage collect shared data (experimental)", GC),
("print-llvm-passes",
"Prints the llvm optimization passes being run",
PRINT_LLVM_PASSES),
("lto", "Perform LLVM link-time optimizations", LTO),
("ast-json", "Print the AST as JSON and halt", AST_JSON),
("ast-json-noexpand", "Print the pre-expansion AST as JSON and halt", AST_JSON_NOEXPAND),
("ls", "List the symbols defined by a library crate", LS),
("save-analysis", "Write syntax and type analysis information \
in addition to normal output", SAVE_ANALYSIS),
("flowgraph-print-loans", "Include loan analysis data in \
--pretty flowgraph output", FLOWGRAPH_PRINT_LOANS),
("flowgraph-print-moves", "Include move analysis data in \
--pretty flowgraph output", FLOWGRAPH_PRINT_MOVES),
("flowgraph-print-assigns", "Include assignment analysis data in \
--pretty flowgraph output", FLOWGRAPH_PRINT_ASSIGNS),
("flowgraph-print-all", "Include all dataflow analysis data in \
--pretty flowgraph output", FLOWGRAPH_PRINT_ALL))
}
#[deriving(Clone)]
pub enum Passes {
Passes(Vec<String>),
AllPasses,
}
impl Passes {
pub fn is_empty(&self) -> bool {
match *self {
Passes(ref v) => v.is_empty(),
AllPasses => false,
}
}
}
/// Declare a macro that will define all CodegenOptions fields and parsers all
/// at once. The goal of this macro is to define an interface that can be
/// programmatically used by the option parser in order to initialize the struct
/// without hardcoding field names all over the place.
///
/// The goal is to invoke this macro once with the correct fields, and then this
/// macro generates all necessary code. The main gotcha of this macro is the
/// cgsetters module which is a bunch of generated code to parse an option into
/// its respective field in the struct. There are a few hand-written parsers for
/// parsing specific types of values in this module.
macro_rules! cgoptions(
($($opt:ident : $t:ty = ($init:expr, $parse:ident, $desc:expr)),* ,) =>
(
#[deriving(Clone)]
pub struct CodegenOptions { $(pub $opt: $t),* }
pub fn basic_codegen_options() -> CodegenOptions {
CodegenOptions { $($opt: $init),* }
}
pub type CodegenSetter = fn(&mut CodegenOptions, v: Option<&str>) -> bool;
pub static CG_OPTIONS: &'static [(&'static str, CodegenSetter,
&'static str)] =
&[ $( (stringify!($opt), cgsetters::$opt, $desc) ),* ];
mod cgsetters {
use super::{CodegenOptions, Passes, AllPasses};
$(
pub fn $opt(cg: &mut CodegenOptions, v: Option<&str>) -> bool {
$parse(&mut cg.$opt, v)
}
)*
fn parse_bool(slot: &mut bool, v: Option<&str>) -> bool {
match v {
Some(..) => false,
None => { *slot = true; true }
}
}
fn parse_opt_string(slot: &mut Option<String>, v: Option<&str>) -> bool {
match v {
Some(s) => { *slot = Some(s.to_string()); true },
None => false,
}
}
fn parse_string(slot: &mut String, v: Option<&str>) -> bool {
match v {
Some(s) => { *slot = s.to_string(); true },
None => false,
}
}
fn parse_list(slot: &mut Vec<String>, v: Option<&str>)
-> bool {
match v {
Some(s) => {
for s in s.words() {
slot.push(s.to_string());
}
true
},
None => false,
}
}
fn parse_uint(slot: &mut uint, v: Option<&str>) -> bool {
use std::from_str::FromStr;
match v.and_then(FromStr::from_str) {
Some(i) => { *slot = i; true },
None => false
}
}
fn parse_passes(slot: &mut Passes, v: Option<&str>) -> bool {
match v {
Some("all") => {
*slot = AllPasses;
true
}
v => {
let mut passes = vec!();
if parse_list(&mut passes, v) {
*slot = Passes(passes);
true
} else {
false
}
}
}
}
}
) )
cgoptions!(
ar: Option<String> = (None, parse_opt_string,
"tool to assemble archives with"),
linker: Option<String> = (None, parse_opt_string,
"system linker to link outputs with"),
link_args: Vec<String> = (Vec::new(), parse_list,
"extra arguments to pass to the linker (space separated)"),
target_cpu: String = ("generic".to_string(), parse_string,
"select target processor (llc -mcpu=help for details)"),
target_feature: String = ("".to_string(), parse_string,
"target specific attributes (llc -mattr=help for details)"),
passes: Vec<String> = (Vec::new(), parse_list,
"a list of extra LLVM passes to run (space separated)"),
llvm_args: Vec<String> = (Vec::new(), parse_list,
"a list of arguments to pass to llvm (space separated)"),
save_temps: bool = (false, parse_bool,
"save all temporary output files during compilation"),
rpath: bool = (false, parse_bool,
"set rpath values in libs/exes"),
no_prepopulate_passes: bool = (false, parse_bool,
"don't pre-populate the pass manager with a list of passes"),
no_vectorize_loops: bool = (false, parse_bool,
"don't run the loop vectorization optimization passes"),
no_vectorize_slp: bool = (false, parse_bool,
"don't run LLVM's SLP vectorization pass"),
soft_float: bool = (false, parse_bool,
"generate software floating point library calls"),
prefer_dynamic: bool = (false, parse_bool,
"prefer dynamic linking to static linking"),
no_integrated_as: bool = (false, parse_bool,
"use an external assembler rather than LLVM's integrated one"),
no_redzone: bool = (false, parse_bool,
"disable the use of the redzone"),
relocation_model: String = ("pic".to_string(), parse_string,
"choose the relocation model to use (llc -relocation-model for details)"),
code_model: String = ("default".to_string(), parse_string,
"choose the code model to use (llc -code-model for details)"),
metadata: Vec<String> = (Vec::new(), parse_list,
"metadata to mangle symbol names with"),
extra_filename: String = ("".to_string(), parse_string,
"extra data to put in each output filename"),
codegen_units: uint = (1, parse_uint,
"divide crate into N units to optimize in parallel"),
remark: Passes = (Passes(Vec::new()), parse_passes,
"print remarks for these optimization passes (space separated, or \"all\")"),
)
pub fn build_codegen_options(matches: &getopts::Matches) -> CodegenOptions
{
let mut cg = basic_codegen_options();
for option in matches.opt_strs("C").move_iter() {
let mut iter = option.as_slice().splitn(1, '=');
let key = iter.next().unwrap();
let value = iter.next();
let option_to_lookup = key.replace("-", "_");
let mut found = false;
for &(candidate, setter, _) in CG_OPTIONS.iter() {
if option_to_lookup.as_slice() != candidate { continue }
if !setter(&mut cg, value) {
match value {
Some(..) => {
early_error(format!("codegen option `{}` takes no \
value", key).as_slice())
}
None => {
early_error(format!("codegen option `{0}` requires \
a value (-C {0}=<value>)",
key).as_slice())
}
}
}
found = true;
break;
}
if !found {
early_error(format!("unknown codegen option: `{}`",
key).as_slice());
}
}
return cg;
}
pub fn default_lib_output() -> CrateType {
CrateTypeRlib
}
pub fn default_configuration(sess: &Session) -> ast::CrateConfig {
let tos = match sess.targ_cfg.os {
abi::OsWindows => InternedString::new("windows"),
abi::OsMacos => InternedString::new("macos"),
abi::OsLinux => InternedString::new("linux"),
abi::OsAndroid => InternedString::new("android"),
abi::OsFreebsd => InternedString::new("freebsd"),
abi::OsDragonfly => InternedString::new("dragonfly"),
abi::OsiOS => InternedString::new("ios"),
};
// ARM is bi-endian, however using NDK seems to default
// to little-endian unless a flag is provided.
let (end,arch,wordsz) = match sess.targ_cfg.arch {
abi::X86 => ("little", "x86", "32"),
abi::X86_64 => ("little", "x86_64", "64"),
abi::Arm => ("little", "arm", "32"),
abi::Mips => ("big", "mips", "32"),
abi::Mipsel => ("little", "mipsel", "32")
};
let fam = match sess.targ_cfg.os {
abi::OsWindows => InternedString::new("windows"),
_ => InternedString::new("unix")
};
let mk = attr::mk_name_value_item_str;
return vec!(// Target bindings.
attr::mk_word_item(fam.clone()),
mk(InternedString::new("target_os"), tos),
mk(InternedString::new("target_family"), fam),
mk(InternedString::new("target_arch"), InternedString::new(arch)),
mk(InternedString::new("target_endian"), InternedString::new(end)),
mk(InternedString::new("target_word_size"),
InternedString::new(wordsz))
);
}
pub fn append_configuration(cfg: &mut ast::CrateConfig,
name: InternedString) {
if !cfg.iter().any(|mi| mi.name() == name) {
cfg.push(attr::mk_word_item(name))
}
}
pub fn build_configuration(sess: &Session) -> ast::CrateConfig {
// Combine the configuration requested by the session (command line) with
// some default and generated configuration items
let default_cfg = default_configuration(sess);
let mut user_cfg = sess.opts.cfg.clone();
// If the user wants a test runner, then add the test cfg
if sess.opts.test {
append_configuration(&mut user_cfg, InternedString::new("test"))
}
user_cfg.move_iter().collect::<Vec<_>>().append(default_cfg.as_slice())
}
pub fn get_os(triple: &str) -> Option<abi::Os> {
for &(name, os) in os_names.iter() {
if triple.contains(name) { return Some(os) }
}
None
}
static os_names : &'static [(&'static str, abi::Os)] = &[
("mingw32", abi::OsWindows),
("win32", abi::OsWindows),
("windows", abi::OsWindows),
("darwin", abi::OsMacos),
("android", abi::OsAndroid),
("linux", abi::OsLinux),
("freebsd", abi::OsFreebsd),
("dragonfly", abi::OsDragonfly),
("ios", abi::OsiOS)];
pub fn get_arch(triple: &str) -> Option<abi::Architecture> {
for &(arch, abi) in architecture_abis.iter() {
if triple.contains(arch) { return Some(abi) }
}
None
}
static architecture_abis : &'static [(&'static str, abi::Architecture)] = &[
("i386", abi::X86),
("i486", abi::X86),
("i586", abi::X86),
("i686", abi::X86),
("i786", abi::X86),
("x86_64", abi::X86_64),
("arm", abi::Arm),
("xscale", abi::Arm),
("thumb", abi::Arm),
("mipsel", abi::Mipsel),
("mips", abi::Mips)];
pub fn build_target_config(sopts: &Options) -> Config {
let os = match get_os(sopts.target_triple.as_slice()) {
Some(os) => os,
None => early_error("unknown operating system")
};
let arch = match get_arch(sopts.target_triple.as_slice()) {
Some(arch) => arch,
None => {
early_error(format!("unknown architecture: {}",
sopts.target_triple.as_slice()).as_slice())
}
};
let (int_type, uint_type) = match arch {
abi::X86 => (ast::TyI32, ast::TyU32),
abi::X86_64 => (ast::TyI64, ast::TyU64),
abi::Arm => (ast::TyI32, ast::TyU32),
abi::Mips => (ast::TyI32, ast::TyU32),
abi::Mipsel => (ast::TyI32, ast::TyU32)
};
let target_triple = sopts.target_triple.clone();
let target_strs = match arch {
abi::X86 => x86::get_target_strs(target_triple, os),
abi::X86_64 => x86_64::get_target_strs(target_triple, os),
abi::Arm => arm::get_target_strs(target_triple, os),
abi::Mips => mips::get_target_strs(target_triple, os),
abi::Mipsel => mipsel::get_target_strs(target_triple, os)
};
Config {
os: os,
arch: arch,
target_strs: target_strs,
int_type: int_type,
uint_type: uint_type,
}
}
// rustc command line options
pub fn optgroups() -> Vec<getopts::OptGroup> {
vec!(
optflag("h", "help", "Display this message"),
optmulti("", "cfg", "Configure the compilation environment", "SPEC"),
optmulti("L", "", "Add a directory to the library search path", "PATH"),
optmulti("", "crate-type", "Comma separated list of types of crates
for the compiler to emit",
"[bin|lib|rlib|dylib|staticlib]"),
optmulti("", "emit", "Comma separated list of types of output for the compiler to emit",
"[asm|bc|ir|obj|link]"),
optopt("", "crate-name", "Specify the name of the crate being built",
"NAME"),
optflag("", "print-crate-name", "Output the crate name and exit"),
optflag("", "print-file-name", "Output the file(s) that would be written if compilation \
continued and exit"),
optflag("", "crate-file-name", "deprecated in favor of --print-file-name"),
optflag("g", "", "Equivalent to --debuginfo=2"),
optopt("", "debuginfo", "Emit DWARF debug info to the objects created:
0 = no debug info,
1 = line-tables only (for stacktraces and breakpoints),
2 = full debug info with variable and type information (same as -g)", "LEVEL"),
optflag("", "no-trans", "Run all passes except translation; no output"),
optflag("", "no-analysis",
"Parse and expand the source, but run no analysis and produce no output"),
optflag("O", "", "Equivalent to --opt-level=2"),
optopt("o", "", "Write output to <filename>", "FILENAME"),
optopt("", "opt-level", "Optimize with possible levels 0-3", "LEVEL"),
optopt( "", "out-dir", "Write output to compiler-chosen filename in <dir>", "DIR"),
optflag("", "parse-only", "Parse only; do not compile, assemble, or link"),
optopt("", "explain", "Provide a detailed explanation of an error message", "OPT"),
optflagopt("", "pretty",
"Pretty-print the input instead of compiling;
valid types are: `normal` (un-annotated source),
`expanded` (crates expanded),
`typed` (crates expanded, with type annotations),
`expanded,identified` (fully parenthesized, AST nodes with IDs), or
`flowgraph=<nodeid>` (graphviz formatted flowgraph for node)",
"TYPE"),
optflagopt("", "dep-info",
"Output dependency info to <filename> after compiling, \
in a format suitable for use by Makefiles", "FILENAME"),
optopt("", "sysroot", "Override the system root", "PATH"),
optflag("", "test", "Build a test harness"),
optopt("", "target", "Target triple cpu-manufacturer-kernel[-os]
to compile for (see chapter 3.4 of http://www.sourceware.org/autobook/
for details)", "TRIPLE"),
optmulti("W", "warn", "Set lint warnings", "OPT"),
optmulti("A", "allow", "Set lint allowed", "OPT"),
optmulti("D", "deny", "Set lint denied", "OPT"),
optmulti("F", "forbid", "Set lint forbidden", "OPT"),
optmulti("C", "codegen", "Set a codegen option", "OPT[=VALUE]"),
optmulti("Z", "", "Set internal debugging options", "FLAG"),
optflagopt("v", "version", "Print version info and exit", "verbose"),
optopt("", "color", "Configure coloring of output:
auto = colorize, if output goes to a tty (default);
always = always colorize output;
never = never colorize output", "auto|always|never"),
optmulti("", "extern", "Specify where an external rust library is located",
"NAME=PATH"),
)
}
// Convert strings provided as --cfg [cfgspec] into a crate_cfg
fn parse_cfgspecs(cfgspecs: Vec<String> ) -> ast::CrateConfig {
cfgspecs.move_iter().map(|s| {
parse::parse_meta_from_source_str("cfgspec".to_string(),
s.to_string(),
Vec::new(),
&parse::new_parse_sess())
}).collect::<ast::CrateConfig>()
}
pub fn build_session_options(matches: &getopts::Matches) -> Options {
let unparsed_crate_types = matches.opt_strs("crate-type");
let crate_types = parse_crate_types_from_list(unparsed_crate_types)
.unwrap_or_else(|e| early_error(e.as_slice()));
let parse_only = matches.opt_present("parse-only");
let no_trans = matches.opt_present("no-trans");
let no_analysis = matches.opt_present("no-analysis");
let mut lint_opts = vec!();
let mut describe_lints = false;
for &level in [lint::Allow, lint::Warn, lint::Deny, lint::Forbid].iter() {
for lint_name in matches.opt_strs(level.as_str()).move_iter() {
if lint_name.as_slice() == "help" {
describe_lints = true;
} else {
lint_opts.push((lint_name.replace("-", "_").into_string(), level));
}
}
}
let mut debugging_opts = 0;
let debug_flags = matches.opt_strs("Z");
let debug_map = debugging_opts_map();
for debug_flag in debug_flags.iter() {
let mut this_bit = 0;
for tuple in debug_map.iter() {
let (name, bit) = match *tuple { (ref a, _, b) => (a, b) };
if *name == debug_flag.as_slice() {
this_bit = bit;
break;
}
}
if this_bit == 0 {
early_error(format!("unknown debug flag: {}",
*debug_flag).as_slice())
}
debugging_opts |= this_bit;
}
if debugging_opts & DEBUG_LLVM != 0 {
unsafe { llvm::LLVMSetDebug(1); }
}
let mut output_types = Vec::new();
if !parse_only && !no_trans {
let unparsed_output_types = matches.opt_strs("emit");
for unparsed_output_type in unparsed_output_types.iter() {
for part in unparsed_output_type.as_slice().split(',') {
let output_type = match part.as_slice() {
"asm" => write::OutputTypeAssembly,
"ir" => write::OutputTypeLlvmAssembly,
"bc" => write::OutputTypeBitcode,
"obj" => write::OutputTypeObject,
"link" => write::OutputTypeExe,
_ => {
early_error(format!("unknown emission type: `{}`",
part).as_slice())
}
};
output_types.push(output_type)
}
}
};
output_types.as_mut_slice().sort();
output_types.dedup();
if output_types.len() == 0 {
output_types.push(write::OutputTypeExe);
}
let sysroot_opt = matches.opt_str("sysroot").map(|m| Path::new(m));
let target = matches.opt_str("target").unwrap_or(
driver::host_triple().to_string());
let opt_level = {
if (debugging_opts & NO_OPT) != 0 {
No
} else if matches.opt_present("O") {
if matches.opt_present("opt-level") {
early_error("-O and --opt-level both provided");
}
Default
} else if matches.opt_present("opt-level") {
match matches.opt_str("opt-level").as_ref().map(|s| s.as_slice()) {
None |
Some("0") => No,
Some("1") => Less,
Some("2") => Default,
Some("3") => Aggressive,
Some(arg) => {
early_error(format!("optimization level needs to be \
between 0-3 (instead was `{}`)",
arg).as_slice());
}
}
} else {
No
}
};
let gc = debugging_opts & GC != 0;
let debuginfo = if matches.opt_present("g") {
if matches.opt_present("debuginfo") {
early_error("-g and --debuginfo both provided");
}
FullDebugInfo
} else if matches.opt_present("debuginfo") {
match matches.opt_str("debuginfo").as_ref().map(|s| s.as_slice()) {
Some("0") => NoDebugInfo,
Some("1") => LimitedDebugInfo,
None |
Some("2") => FullDebugInfo,
Some(arg) => {
early_error(format!("debug info level needs to be between \
0-2 (instead was `{}`)",
arg).as_slice());
}
}
} else {
NoDebugInfo
};
let addl_lib_search_paths = matches.opt_strs("L").iter().map(|s| {
Path::new(s.as_slice())
}).collect();
let cfg = parse_cfgspecs(matches.opt_strs("cfg"));
let test = matches.opt_present("test");
let write_dependency_info = (matches.opt_present("dep-info"),
matches.opt_str("dep-info")
.map(|p| Path::new(p)));
let print_metas = (matches.opt_present("print-crate-name"),
matches.opt_present("print-file-name") ||
matches.opt_present("crate-file-name"));
if matches.opt_present("crate-file-name") {
early_warn("the --crate-file-name argument has been renamed to \
--print-file-name");
}
let cg = build_codegen_options(matches);
if !cg.remark.is_empty() && debuginfo == NoDebugInfo {
early_warn("-C remark will not show source locations without --debuginfo");
}
let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
Some("auto") => Auto,
Some("always") => Always,
Some("never") => Never,
None => Auto,
Some(arg) => {
early_error(format!("argument for --color must be auto, always \
or never (instead was `{}`)",
arg).as_slice())
}
};
let mut externs = HashMap::new();
for arg in matches.opt_strs("extern").iter() {
let mut parts = arg.as_slice().splitn(1, '=');
let name = match parts.next() {
Some(s) => s,
None => early_error("--extern value must not be empty"),
};
let location = match parts.next() {
Some(s) => s,
None => early_error("--extern value must be of the format `foo=bar`"),
};
let locs = externs.find_or_insert(name.to_string(), Vec::new());
locs.push(location.to_string());
}
let crate_name = matches.opt_str("crate-name");
Options {
crate_types: crate_types,
gc: gc,
optimize: opt_level,
debuginfo: debuginfo,
lint_opts: lint_opts,
describe_lints: describe_lints,
output_types: output_types,
addl_lib_search_paths: RefCell::new(addl_lib_search_paths),
maybe_sysroot: sysroot_opt,
target_triple: target,
cfg: cfg,
test: test,
parse_only: parse_only,
no_trans: no_trans,
no_analysis: no_analysis,
debugging_opts: debugging_opts,
write_dependency_info: write_dependency_info,
print_metas: print_metas,
cg: cg,
color: color,
externs: externs,
crate_name: crate_name,
alt_std_name: None
}
}
pub fn parse_crate_types_from_list(list_list: Vec<String>) -> Result<Vec<CrateType>, String> {
let mut crate_types: Vec<CrateType> = Vec::new();
for unparsed_crate_type in list_list.iter() {
for part in unparsed_crate_type.as_slice().split(',') {
let new_part = match part {
"lib" => default_lib_output(),
"rlib" => CrateTypeRlib,
"staticlib" => CrateTypeStaticlib,
"dylib" => CrateTypeDylib,
"bin" => CrateTypeExecutable,
_ => {
return Err(format!("unknown crate type: `{}`",
part));
}
};
crate_types.push(new_part)
}
}
return Ok(crate_types);
}
impl fmt::Show for CrateType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CrateTypeExecutable => "bin".fmt(f),
CrateTypeDylib => "dylib".fmt(f),
CrateTypeRlib => "rlib".fmt(f),
CrateTypeStaticlib => "staticlib".fmt(f)
}
}
}
#[cfg(test)]
mod test {
use driver::config::{build_configuration, optgroups, build_session_options};
use driver::session::build_session;
use getopts::getopts;
use syntax::attr;
use syntax::attr::AttrMetaMethods;
use syntax::diagnostics;
// When the user supplies --test we should implicitly supply --cfg test
#[test]
fn test_switch_implies_cfg_test() {
let matches =
&match getopts(["--test".to_string()], optgroups().as_slice()) {
Ok(m) => m,
Err(f) => fail!("test_switch_implies_cfg_test: {}", f)
};
let registry = diagnostics::registry::Registry::new([]);
let sessopts = build_session_options(matches);
let sess = build_session(sessopts, None, registry);
let cfg = build_configuration(&sess);
assert!((attr::contains_name(cfg.as_slice(), "test")));
}
// When the user supplies --test and --cfg test, don't implicitly add
// another --cfg test
#[test]
fn test_switch_implies_cfg_test_unless_cfg_test() {
let matches =
&match getopts(["--test".to_string(), "--cfg=test".to_string()],
optgroups().as_slice()) {
Ok(m) => m,
Err(f) => {
fail!("test_switch_implies_cfg_test_unless_cfg_test: {}", f)
}
};
let registry = diagnostics::registry::Registry::new([]);
let sessopts = build_session_options(matches);
let sess = build_session(sessopts, None, registry);
let cfg = build_configuration(&sess);
let mut test_items = cfg.iter().filter(|m| m.name().equiv(&("test")));
assert!(test_items.next().is_some());
assert!(test_items.next().is_none());
}
}
| basic_options |
normalization_test.py | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for normalization layers."""
from absl.testing import absltest
import numpy as onp
from trax.layers import base
from trax.layers import normalization
from trax.math import numpy as np
from trax.shapes import ShapeDtype
class NormalizationLayerTest(absltest.TestCase):
def test_batch_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(normalization.BatchNorm(),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
def | (self):
input_shape = (2, 3, 4)
input_dtype = np.float32
input_signature = ShapeDtype(input_shape, input_dtype)
eps = 1e-5
inp1 = np.reshape(np.arange(np.prod(input_shape), dtype=input_dtype),
input_shape)
m1 = 11.5 # Mean of this random input.
v1 = 47.9167 # Variance of this random input.
layer = normalization.BatchNorm(axis=(0, 1, 2))
_, _ = layer.init(input_signature)
state = layer.state
onp.testing.assert_allclose(state[0], 0)
onp.testing.assert_allclose(state[1], 1)
self.assertEqual(state[2], 0)
out = layer(inp1)
state = layer.state
onp.testing.assert_allclose(state[0], m1 * 0.001)
onp.testing.assert_allclose(state[1], 0.999 + v1 * 0.001, rtol=1e-6)
self.assertEqual(state[2], 1)
onp.testing.assert_allclose(out, (inp1 - m1) / np.sqrt(v1 + eps),
rtol=1e-6)
def test_layer_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(
normalization.LayerNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
def test_frn_shape(self):
B, H, W, C = 64, 5, 7, 3 # pylint: disable=invalid-name
input_signature = ShapeDtype((B, H, W, C))
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(learn_epsilon=False),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
if __name__ == '__main__':
absltest.main()
| test_batch_norm |
dummy.rs | //! Dummy implementations of things that a Wasm module can import.
use std::rc::Rc;
use wasmtime::{
Callable, Extern, ExternType, Func, FuncType, Global, GlobalType, HostRef, ImportType, Memory,
MemoryType, Store, Table, TableType, Trap, Val, ValType,
};
/// Create a set of dummy functions/globals/etc for the given imports.
pub fn dummy_imports(store: &Store, import_tys: &[ImportType]) -> Result<Vec<Extern>, Trap> {
let mut imports = Vec::with_capacity(import_tys.len());
for imp in import_tys {
imports.push(match imp.ty() {
ExternType::Func(func_ty) => {
Extern::Func(HostRef::new(DummyFunc::new(&store, func_ty.clone())))
}
ExternType::Global(global_ty) => {
Extern::Global(HostRef::new(dummy_global(&store, global_ty.clone())?))
}
ExternType::Table(table_ty) => {
Extern::Table(HostRef::new(dummy_table(&store, table_ty.clone())?))
}
ExternType::Memory(mem_ty) => {
Extern::Memory(HostRef::new(dummy_memory(&store, mem_ty.clone())))
}
});
}
Ok(imports)
}
/// A function that doesn't do anything but return the default (zero) value for
/// the function's type.
#[derive(Debug)]
pub struct DummyFunc(FuncType);
impl DummyFunc {
/// Construct a new dummy `Func`.
pub fn new(store: &Store, ty: FuncType) -> Func {
let callable = DummyFunc(ty.clone());
Func::new(store, ty, Rc::new(callable) as _)
}
}
impl Callable for DummyFunc {
fn call(&self, _params: &[Val], results: &mut [Val]) -> Result<(), Trap> {
for (ret_ty, result) in self.0.results().iter().zip(results) {
*result = dummy_value(ret_ty)?;
}
Ok(())
}
}
/// Construct a dummy value for the given value type.
pub fn dummy_value(val_ty: &ValType) -> Result<Val, Trap> {
Ok(match val_ty {
ValType::I32 => Val::I32(0),
ValType::I64 => Val::I64(0),
ValType::F32 => Val::F32(0),
ValType::F64 => Val::F64(0),
ValType::V128 => {
return Err(Trap::new(
"dummy_value: unsupported function return type: v128".to_string(),
)) | ValType::AnyRef => {
return Err(Trap::new(
"dummy_value: unsupported function return type: anyref".to_string(),
))
}
ValType::FuncRef => {
return Err(Trap::new(
"dummy_value: unsupported function return type: funcref".to_string(),
))
}
})
}
/// Construct a dummy global for the given global type.
pub fn dummy_global(store: &Store, ty: GlobalType) -> Result<Global, Trap> {
let val = dummy_value(ty.content())?;
Ok(Global::new(store, ty, val))
}
/// Construct a dummy table for the given table type.
pub fn dummy_table(store: &Store, ty: TableType) -> Result<Table, Trap> {
let init_val = dummy_value(&ty.element())?;
Ok(Table::new(store, ty, init_val))
}
/// Construct a dummy memory for the given memory type.
pub fn dummy_memory(store: &Store, ty: MemoryType) -> Memory {
Memory::new(store, ty)
} | } |
2016111514_add_primary_key_to_worker__54725ffc62f3.py | """Add primary key to worker_dependency
Revision ID: 54725ffc62f3
Revises: 730e212b938
Create Date: 2016-11-15 14:02:41.621934
"""
# revision identifiers, used by Alembic.
revision = '54725ffc62f3'
down_revision = '730e212b938'
from alembic import op
def upgrade():
# Cannot add primary key with auto-increment natively in alembic
# Note that this is MySQL-specific
op.execute("ALTER TABLE `worker_dependency` ADD `id` INT PRIMARY KEY AUTO_INCREMENT FIRST;")
def | ():
op.drop_column('worker_dependency', 'id')
| downgrade |
hack_callback_handler.rs | use super::*;
use super::dialogue_helpers::{launch_dialogue, DialogueBuilder};
use components::*;
use resources::*;
pub struct HackCallbackHandlerSystem;
#[derive(SystemData)]
pub struct HackCallbackHandlerSystemData<'a> {
queued_actions: Write<'a, QueuedPlayerActions>,
hackable: WriteStorage<'a, Hackable>,
callbacks: Write<'a, Callbacks>,
}
impl<'a> System<'a> for HackCallbackHandlerSystem {
type SystemData = HackCallbackHandlerSystemData<'a>;
fn run(&mut self, mut data: Self::SystemData) {
let hack_callbacks = data.callbacks.take_some(|cb| match cb {
Callback::Hack(hdc) => TakeDecision::Take(hdc),
x => TakeDecision::Leave(x),
});
for hcb in hack_callbacks {
handle_hack_callback(hcb, &mut data.queued_actions, &mut data.hackable, &mut data.callbacks);
}
}
}
fn handle_hack_callback(
hack_callback: HackCallback,
queued_actions: &mut QueuedPlayerActions,
hackable: &mut WriteStorage<'_, Hackable>,
callbacks: &mut Callbacks,
) {
match hack_callback {
HackCallback::InitiateHack { target, turn_duration } => {
for _ in 0..turn_duration {
queued_actions.action_queue.push_back(QueuedPlayerAction::Wait);
}
queued_actions.action_queue.push_back(QueuedPlayerAction::Hack { target });
}
HackCallback::ChooseHackTarget { entity } => |
}
}
| {
let hackable = hackable
.get_mut(entity)
.expect("If we initiated hack on an entity, it better be hackable");
let mut builder = DialogueBuilder::new(&format!("Hacking {}...", hackable.name));
match &hackable.hack_state {
HackState::Uncompromised => {
builder = builder.with_option(
"[Compromise]",
vec![
Callback::Hack(HackCallback::InitiateHack {
target: HackTarget {
entity,
hack_type: HackType::Compromise,
},
turn_duration: 60,
}),
Callback::EndDialogue,
],
);
}
HackState::Compromised => {
builder = builder
.with_option(
"[Lock Shut]",
vec![
Callback::Hack(HackCallback::InitiateHack {
target: HackTarget {
entity,
hack_type: HackType::Door {
new_door_behavior: DoorBehavior::StayClosed,
},
},
turn_duration: 5,
}),
Callback::EndDialogue,
],
)
.with_option(
"[Lock Open]",
vec![
Callback::Hack(HackCallback::InitiateHack {
target: HackTarget {
entity,
hack_type: HackType::Door {
new_door_behavior: DoorBehavior::StayOpen,
},
},
turn_duration: 5,
}),
Callback::EndDialogue,
],
)
.with_option(
"[Set to Automatic]",
vec![
Callback::Hack(HackCallback::InitiateHack {
target: HackTarget {
entity,
hack_type: HackType::Door {
new_door_behavior: DoorBehavior::FullAuto,
},
},
turn_duration: 5,
}),
Callback::EndDialogue,
],
);
}
};
builder = builder.with_option("[Cancel]", vec![Callback::EndDialogue]);
launch_dialogue(builder, callbacks);
} |
_constants.py | from enum import Enum
import sys
class Mode(Enum):
|
BOX_WITH_HANDLE = [0, 1, 2, 3, 4, 5, 6, 7, 9]
BOX_LINE_HANDLE = [1, 2, 4, 6, 0, 1, 8]
BOX_LINE = [0, 2, 4, 6, 0]
BOX_TOP_LEFT = 0
BOX_TOP_CENTER = 1
BOX_BOTTOM_RIGHT = 4
BOX_BOTTOM_LEFT = 6
BOX_CENTER = 8
BOX_HANDLE = 9
BOX_LEN = 8
BACKSPACE = 'delete' if sys.platform == 'darwin' else 'backspace'
| """MODE: Interactive mode. The normal, default mode is PAN_ZOOM, which
allows for normal interactivity with the canvas.
The SELECT mode allows for entire shapes to be selected, moved and
resized.
The DIRECT mode allows for shapes to be selected and their individual
vertices to be moved.
The VERTEX_INSERT and VERTEX_REMOVE modes allow for individual
vertices either to be added to or removed from shapes that are already
selected. Note that shapes cannot be selected in this mode.
The ADD_RECTANGLE, ADD_ELLIPSE, ADD_LINE, ADD_PATH, and ADD_POLYGON
modes all allow for their corresponding shape type to be added.
"""
PAN_ZOOM = 0
SELECT = 1
DIRECT = 2
ADD_RECTANGLE = 3
ADD_ELLIPSE = 4
ADD_LINE = 5
ADD_PATH = 6
ADD_POLYGON = 7
VERTEX_INSERT = 8
VERTEX_REMOVE = 9 |
oci_object_storage_replication_policy_facts.py | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_object_storage_replication_policy_facts
short_description: Fetches details about one or multiple ReplicationPolicy resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ReplicationPolicy resources in Oracle Cloud Infrastructure
- List the replication policies associated with a bucket.
- If I(replication_id) is specified, the details of a single ReplicationPolicy will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
namespace_name:
description:
- The Object Storage namespace used for the request.
type: str
required: true
bucket_name:
description:
- "The name of the bucket. Avoid entering confidential information.
Example: `my-new-bucket1`"
type: str
required: true
replication_id:
description:
- The ID of the replication policy.
- Required to get a specific replication_policy.
type: str
aliases: ["id"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_name_option ]
"""
EXAMPLES = """
- name: Get a specific replication_policy
oci_object_storage_replication_policy_facts:
# required
namespace_name: namespace_name_example
bucket_name: my-new-bucket1
replication_id: "ocid1.replication.oc1..xxxxxxEXAMPLExxxxxx"
- name: List replication_policies
oci_object_storage_replication_policy_facts:
# required
namespace_name: namespace_name_example
bucket_name: my-new-bucket1
"""
RETURN = """
replication_policies:
description:
- List of ReplicationPolicy resources
returned: on success
type: complex
contains:
id:
description:
- The id of the replication policy.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- The name of the policy.
returned: on success
type: str
sample: name_example
destination_region_name:
description:
- "The destination region to replicate to, for example \\"us-ashburn-1\\"."
returned: on success
type: str
sample: destination_region_name_example
destination_bucket_name:
description:
- The bucket to replicate to in the destination region. Replication policy creation does not automatically
create a destination bucket. Create the destination bucket before creating the policy.
returned: on success
type: str
sample: destination_bucket_name_example
time_created:
description:
- The date when the replication policy was created as per L(RFC 3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_last_sync:
description:
- Changes made to the source bucket before this time has been replicated.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
status:
description:
- The replication status of the policy. If the status is CLIENT_ERROR, once the user fixes the issue
described in the status message, the status will become ACTIVE.
returned: on success
type: str
sample: ACTIVE
status_message:
description:
- A human-readable description of the status.
returned: on success
type: str
sample: status_message_example
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"name": "name_example",
"destination_region_name": "destination_region_name_example",
"destination_bucket_name": "destination_bucket_name_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_last_sync": "2013-10-20T19:20:30+01:00",
"status": "ACTIVE",
"status_message": "status_message_example"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.object_storage import ObjectStorageClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ReplicationPolicyFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"namespace_name",
"bucket_name",
"replication_id",
]
def get_required_params_for_list(self):
return [
"namespace_name",
"bucket_name",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_replication_policy,
namespace_name=self.module.params.get("namespace_name"),
bucket_name=self.module.params.get("bucket_name"),
replication_id=self.module.params.get("replication_id"),
)
def | (self):
optional_list_method_params = [
"name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_replication_policies,
namespace_name=self.module.params.get("namespace_name"),
bucket_name=self.module.params.get("bucket_name"),
**optional_kwargs
)
ReplicationPolicyFactsHelperCustom = get_custom_class(
"ReplicationPolicyFactsHelperCustom"
)
class ResourceFactsHelper(
ReplicationPolicyFactsHelperCustom, ReplicationPolicyFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
namespace_name=dict(type="str", required=True),
bucket_name=dict(type="str", required=True),
replication_id=dict(aliases=["id"], type="str"),
name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="replication_policy",
service_client_class=ObjectStorageClient,
namespace="object_storage",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(replication_policies=result)
if __name__ == "__main__":
main()
| list_resources |
main.ts | import {platformBrowserDynamic} from '@angular/platform-browser-dynamic';
/*
* Bootstrap our Angular app with a top level NgModule
*/
platformBrowserDynamic()
.bootstrapModule(AppModule); | import './polifills';
import {AppModule} from './app.module';
|
|
config.go | // this code is from https://github.com/pzhzqt/goostub
// there is license and copyright notice in licenses/goostub dir
package common
import (
"time"
)
var CycleDetectionInterval time.Duration
var EnableLogging bool = false
var LogTimeout time.Duration
var EnableDebug bool = false
const (
// invalid page id | InvalidPageID = -1
// invalid transaction id
InvalidTxnID = -1
// invalid log sequence number
InvalidLSN = -1
// the header page id
HeaderPageID = 0
// size of a data page in byte
PageSize = 4096
// size of buffer pool
BufferPoolSize = 10
// size of a log buffer in byte
LogBufferSize = ((BufferPoolSize + 1) * PageSize)
// size of extendible hash bucket
BucketSize = 50
)
//type FrameID int32 // frame id type
//type PageID int32 // page id type
type TxnID int32 // transaction id type
//type LSN int32 // log sequence number
type SlotOffset uintptr // slot offset type
//type OID uint16 | |
app.Period.Service.ts | import { Injectable } from '@angular/core';
import { Observable, throwError } from 'rxjs'
import { catchError, tap } from 'rxjs/operators'
import { PeriodModel } from '../Models/app.PeriodModel';
import { HttpClient, HttpErrorResponse, HttpHeaders, HttpResponse } from '@angular/common/http';
@Injectable({
providedIn: 'root'
})
export class |
{
private apiUrl = " http://localhost:49749/api/Period/";
private data: any;
token: any;
username: any;
constructor(private http: HttpClient) {
}
public GetAllPeriod()
{
let headers = new HttpHeaders({ 'Content-Type': 'application/json' });
return this.http.get<PeriodModel[]>(this.apiUrl, { headers: headers })
.pipe(tap(data => data),
catchError(this.handleError)
);
}
private handleError(error: HttpErrorResponse) {
if (error.error instanceof ErrorEvent) {
// A client-side or network error occurred. Handle it accordingly.
console.error('An error occurred:', error.error.message);
} else {
// The backend returned an unsuccessful response code.
// The response body may contain clues as to what went wrong,
console.error(`Backend returned code ${error.status}, ` + `body was: ${error.error}`);
}
// return an observable with a user-facing error message
return throwError('Something bad happened; please try again later.');
};
} | PeriodService |
test_datetime_parse.py | """
Stolen from https://github.com/django/django/blob/master/tests/utils_tests/test_dateparse.py at
9718fa2e8abe430c3526a9278dd976443d4ae3c6
Changed to:
* use standard pytest layout
* parametrize tests
"""
from datetime import date, datetime, time, timedelta, timezone
import pytest
from pydantic import BaseModel, ValidationError, errors
from pydantic.datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
def create_tz(minutes):
return timezone(timedelta(minutes=minutes))
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
('1494012444.883309', date(2017, 5, 5)),
(b'1494012444.883309', date(2017, 5, 5)),
(1_494_012_444.883_309, date(2017, 5, 5)),
('1494012444', date(2017, 5, 5)),
(1_494_012_444, date(2017, 5, 5)),
(0, date(1970, 1, 1)), | (date(2012, 4, 9), date(2012, 4, 9)),
(datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
# Invalid inputs
('x20120423', errors.DateError),
('2012-04-56', errors.DateError),
(19_999_999_999, date(2603, 10, 11)), # just before watershed
(20_000_000_001, date(1970, 8, 20)), # just after watershed
(1_549_316_052, date(2019, 2, 4)), # nowish in s
(1_549_316_052_104, date(2019, 2, 4)), # nowish in ms
(1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs
(1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns
('infinity', date(9999, 12, 31)),
('inf', date(9999, 12, 31)),
(float('inf'), date(9999, 12, 31)),
('infinity ', date(9999, 12, 31)),
(int('1' + '0' * 100), date(9999, 12, 31)),
(1e1000, date(9999, 12, 31)),
('-infinity', date(1, 1, 1)),
('-inf', date(1, 1, 1)),
('nan', ValueError),
],
)
def test_date_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_date(value)
else:
assert parse_date(value) == result
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
('09:15:00', time(9, 15)),
('10:10', time(10, 10)),
('10:20:30.400', time(10, 20, 30, 400_000)),
(b'10:20:30.400', time(10, 20, 30, 400_000)),
('4:8:16', time(4, 8, 16)),
(time(4, 8, 16), time(4, 8, 16)),
(3610, time(1, 0, 10)),
(3600.5, time(1, 0, 0, 500000)),
(86400 - 1, time(23, 59, 59)),
('11:05:00-05:30', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00-0530', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00Z', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05:00+00', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05-06', time(11, 5, 0, tzinfo=create_tz(-360))),
('11:05+06', time(11, 5, 0, tzinfo=create_tz(360))),
# Invalid inputs
(86400, errors.TimeError),
('xxx', errors.TimeError),
('091500', errors.TimeError),
(b'091500', errors.TimeError),
('09:15:90', errors.TimeError),
('11:05:00Y', errors.TimeError),
('11:05:00-25:00', errors.TimeError),
],
)
def test_time_parsing(value, result):
if result == errors.TimeError:
with pytest.raises(errors.TimeError):
parse_time(value)
else:
assert parse_time(value) == result
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
# values in seconds
('1494012444.883309', datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
(1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
('1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(b'1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
# values in ms
('1494012444000.883309', datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
('-1494012444000.883309', datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
(1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
('2012-04-23T09:15:00', datetime(2012, 4, 23, 9, 15)),
('2012-4-9 4:8:16', datetime(2012, 4, 9, 4, 8, 16)),
('2012-04-23T09:15:00Z', datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
('2012-4-9 4:8:16-0320', datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
('2012-04-23T10:20:30.400+02:30', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
('2012-04-23T10:20:30.400+02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
('2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(b'2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(datetime(2017, 5, 5), datetime(2017, 5, 5)),
(0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
# Invalid inputs
('x20120423091500', errors.DateTimeError),
('2012-04-56T09:15:90', errors.DateTimeError),
('2012-04-23T11:05:00-25:00', errors.DateTimeError),
(19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed
(20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed
(1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s
(1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms
(1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs
(1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns
('infinity', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf ', datetime(9999, 12, 31, 23, 59, 59, 999999)),
(1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
(float('inf'), datetime(9999, 12, 31, 23, 59, 59, 999999)),
('-infinity', datetime(1, 1, 1, 0, 0)),
('-inf', datetime(1, 1, 1, 0, 0)),
('nan', ValueError),
],
)
def test_datetime_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_datetime(value)
else:
assert parse_datetime(value) == result
@pytest.mark.parametrize(
'delta',
[
timedelta(days=4, minutes=15, seconds=30, milliseconds=100), # fractions of seconds
timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
timedelta(days=4, minutes=15, seconds=30), # multiple days
timedelta(days=1, minutes=00, seconds=00), # single day
timedelta(days=-4, minutes=15, seconds=30), # negative durations
timedelta(minutes=15, seconds=30), # minute & seconds
timedelta(seconds=30), # seconds
],
)
def test_parse_python_format(delta):
assert parse_duration(delta) == delta
assert parse_duration(str(delta)) == delta
@pytest.mark.parametrize(
'value,result',
[
# seconds
(timedelta(seconds=30), timedelta(seconds=30)),
('30', timedelta(seconds=30)),
(30, timedelta(seconds=30)),
(30.1, timedelta(seconds=30, milliseconds=100)),
# minutes seconds
('15:30', timedelta(minutes=15, seconds=30)),
('5:30', timedelta(minutes=5, seconds=30)),
# hours minutes seconds
('10:15:30', timedelta(hours=10, minutes=15, seconds=30)),
('1:15:30', timedelta(hours=1, minutes=15, seconds=30)),
('100:200:300', timedelta(hours=100, minutes=200, seconds=300)),
# days
('4 15:30', timedelta(days=4, minutes=15, seconds=30)),
('4 10:15:30', timedelta(days=4, hours=10, minutes=15, seconds=30)),
# fractions of seconds
('15:30.1', timedelta(minutes=15, seconds=30, milliseconds=100)),
('15:30.01', timedelta(minutes=15, seconds=30, milliseconds=10)),
('15:30.001', timedelta(minutes=15, seconds=30, milliseconds=1)),
('15:30.0001', timedelta(minutes=15, seconds=30, microseconds=100)),
('15:30.00001', timedelta(minutes=15, seconds=30, microseconds=10)),
('15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
(b'15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
# negative
('-4 15:30', timedelta(days=-4, minutes=15, seconds=30)),
('-172800', timedelta(days=-2)),
('-15:30', timedelta(minutes=-15, seconds=30)),
('-1:15:30', timedelta(hours=-1, minutes=15, seconds=30)),
('-30.1', timedelta(seconds=-30, milliseconds=-100)),
# iso_8601
('P4Y', errors.DurationError),
('P4M', errors.DurationError),
('P4W', errors.DurationError),
('P4D', timedelta(days=4)),
('P0.5D', timedelta(hours=12)),
('PT5H', timedelta(hours=5)),
('PT5M', timedelta(minutes=5)),
('PT5S', timedelta(seconds=5)),
('PT0.000005S', timedelta(microseconds=5)),
(b'PT0.000005S', timedelta(microseconds=5)),
],
)
def test_parse_durations(value, result):
if result == errors.DurationError:
with pytest.raises(errors.DurationError):
parse_duration(value)
else:
assert parse_duration(value) == result
@pytest.mark.parametrize(
'field, value, error_message',
[
('dt', [], 'invalid type; expected datetime, string, bytes, int or float'),
('dt', {}, 'invalid type; expected datetime, string, bytes, int or float'),
('dt', object, 'invalid type; expected datetime, string, bytes, int or float'),
('d', [], 'invalid type; expected date, string, bytes, int or float'),
('d', {}, 'invalid type; expected date, string, bytes, int or float'),
('d', object, 'invalid type; expected date, string, bytes, int or float'),
('t', [], 'invalid type; expected time, string, bytes, int or float'),
('t', {}, 'invalid type; expected time, string, bytes, int or float'),
('t', object, 'invalid type; expected time, string, bytes, int or float'),
('td', [], 'invalid type; expected timedelta, string, bytes, int or float'),
('td', {}, 'invalid type; expected timedelta, string, bytes, int or float'),
('td', object, 'invalid type; expected timedelta, string, bytes, int or float'),
],
)
def test_model_type_errors(field, value, error_message):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: value})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {'loc': (field,), 'type': 'type_error', 'msg': error_message}
@pytest.mark.parametrize('field', ['dt', 'd', 't', 'dt'])
def test_unicode_decode_error(field):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: b'\x81'})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {
'loc': (field,),
'type': 'value_error.unicodedecode',
'msg': "'utf-8' codec can't decode byte 0x81 in position 0: invalid start byte",
}
def test_nan():
class Model(BaseModel):
dt: datetime
d: date
with pytest.raises(ValidationError) as exc_info:
Model(dt='nan', d='nan')
assert exc_info.value.errors() == [
{
'loc': ('dt',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
{
'loc': ('d',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
] | ('2012-04-23', date(2012, 4, 23)),
(b'2012-04-23', date(2012, 4, 23)),
('2012-4-9', date(2012, 4, 9)), |
xuint16_slice_new.go | package xuint16Slice
type TUint16Slice []uint16
func New(src ...[]uint16) TUint16Slice {
var inst []uint16
if len(src) > 0 && src[0] != nil | else {
inst = make([]uint16, 0)
}
return TUint16Slice(inst)
}
| {
inst = src[0]
} |
status.go | package k8shandler
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
api "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const healthUnknown = "cluster health unknown"
const NOT_FOUND_INDEX = -1
var DISK_WATERMARK_LOW_PCT *float64
var DISK_WATERMARK_HIGH_PCT *float64
var DISK_WATERMARK_LOW_ABS *resource.Quantity
var DISK_WATERMARK_HIGH_ABS *resource.Quantity
func (elasticsearchRequest *ElasticsearchRequest) UpdateClusterStatus() error {
cluster := elasticsearchRequest.cluster
esClient := elasticsearchRequest.esClient
clusterStatus := cluster.Status.DeepCopy()
health, err := esClient.GetClusterHealth()
if err != nil {
health.Status = healthUnknown
}
clusterStatus.Cluster = health
allocation, err := esClient.GetShardAllocation()
switch {
case allocation == "none":
clusterStatus.ShardAllocationEnabled = api.ShardAllocationNone
case allocation == "primaries":
clusterStatus.ShardAllocationEnabled = api.ShardAllocationPrimaries
case allocation == "all":
clusterStatus.ShardAllocationEnabled = api.ShardAllocationAll
default:
clusterStatus.ShardAllocationEnabled = api.ShardAllocationUnknown
}
clusterStatus.Pods = rolePodStateMap(cluster.Namespace, cluster.Name, elasticsearchRequest.client)
updateStatusConditions(clusterStatus)
elasticsearchRequest.updateNodeConditions(clusterStatus)
if !reflect.DeepEqual(clusterStatus, cluster.Status) {
nretries := -1
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nretries++
if getErr := elasticsearchRequest.client.Get(context.TODO(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cluster); getErr != nil {
logrus.Debugf("Could not get Elasticsearch %v: %v", cluster.Name, getErr)
return getErr
}
cluster.Status.Cluster = clusterStatus.Cluster
cluster.Status.Conditions = clusterStatus.Conditions
cluster.Status.Pods = clusterStatus.Pods
cluster.Status.ShardAllocationEnabled = clusterStatus.ShardAllocationEnabled
cluster.Status.Nodes = clusterStatus.Nodes
if updateErr := elasticsearchRequest.client.Update(context.TODO(), cluster); updateErr != nil {
logrus.Debugf("Failed to update Elasticsearch %s status. Reason: %v. Trying again...", cluster.Name, updateErr)
return updateErr
}
return nil
})
if retryErr != nil {
return fmt.Errorf("Error: could not update status for Elasticsearch %v after %v retries: %v", cluster.Name, nretries, retryErr)
}
logrus.Debugf("Updated Elasticsearch %v after %v retries", cluster.Name, nretries)
}
return nil
}
func (elasticsearchRequest *ElasticsearchRequest) GetCurrentPodStateMap() map[api.ElasticsearchNodeRole]api.PodStateMap {
return rolePodStateMap(elasticsearchRequest.cluster.Namespace, elasticsearchRequest.cluster.Name, elasticsearchRequest.client)
}
func containsClusterCondition(condition api.ClusterConditionType, status v1.ConditionStatus, elasticsearchStatus *api.ElasticsearchStatus) bool {
// if we're looking for a status of v1.ConditionTrue then we want to see if the
// condition is present and the status is the same
//
// if we're looking for a status of v1.ConditionFalse then we want the condition
// to either be present with status of false or to not find the condition
defaultValue := (status != v1.ConditionTrue)
for _, clusterCondition := range elasticsearchStatus.Conditions {
if clusterCondition.Type == condition {
return clusterCondition.Status == status
}
}
return defaultValue
}
// if a status doesn't exist, provide a new one
func getNodeStatus(name string, status *api.ElasticsearchStatus) (int, *api.ElasticsearchNodeStatus) {
for index, status := range status.Nodes {
if status.DeploymentName == name || status.StatefulSetName == name {
return index, &status
}
}
return NOT_FOUND_INDEX, &api.ElasticsearchNodeStatus{}
}
func rolePodStateMap(namespace, clusterName string, client client.Client) map[api.ElasticsearchNodeRole]api.PodStateMap {
clientList, _ := GetPodList(
namespace,
map[string]string{
"component": "elasticsearch",
"cluster-name": clusterName,
"es-node-client": "true",
},
client,
)
dataList, _ := GetPodList(
namespace,
map[string]string{
"component": "elasticsearch",
"cluster-name": clusterName,
"es-node-data": "true",
},
client,
)
masterList, _ := GetPodList(
namespace,
map[string]string{
"component": "elasticsearch",
"cluster-name": clusterName,
"es-node-master": "true",
},
client,
)
return map[api.ElasticsearchNodeRole]api.PodStateMap{
api.ElasticsearchRoleClient: podStateMap(clientList.Items),
api.ElasticsearchRoleData: podStateMap(dataList.Items),
api.ElasticsearchRoleMaster: podStateMap(masterList.Items),
}
}
func podStateMap(podList []v1.Pod) api.PodStateMap {
stateMap := map[api.PodStateType][]string{
api.PodStateTypeReady: {},
api.PodStateTypeNotReady: {},
api.PodStateTypeFailed: {},
}
for _, pod := range podList {
switch pod.Status.Phase {
case v1.PodPending:
stateMap[api.PodStateTypeNotReady] = append(stateMap[api.PodStateTypeNotReady], pod.Name)
case v1.PodRunning:
if isPodReady(pod) {
stateMap[api.PodStateTypeReady] = append(stateMap[api.PodStateTypeReady], pod.Name)
} else {
stateMap[api.PodStateTypeNotReady] = append(stateMap[api.PodStateTypeNotReady], pod.Name)
}
case v1.PodFailed:
stateMap[api.PodStateTypeFailed] = append(stateMap[api.PodStateTypeFailed], pod.Name)
}
}
return stateMap
}
func isPodReady(pod v1.Pod) bool {
for _, container := range pod.Status.ContainerStatuses {
if !container.Ready {
return false
}
}
return true
}
func (er *ElasticsearchRequest) updateNodeConditions(status *api.ElasticsearchStatus) {
esClient := er.esClient
cluster := er.cluster
// Get all pods based on status.Nodes[] and check their conditions
// get pod with label 'node-name=node.getName()'
thresholdEnabled, err := esClient.GetThresholdEnabled()
if err != nil {
logrus.Debugf("Unable to check if threshold is enabled for %v", cluster.Name)
}
if thresholdEnabled {
// refresh value of thresholds in case they changed...
er.refreshDiskWatermarkThresholds()
}
for nodeIndex := range status.Nodes {
node := &status.Nodes[nodeIndex]
nodeName := "unknown name"
if node.DeploymentName != "" {
nodeName = node.DeploymentName
} else {
if node.StatefulSetName != "" {
nodeName = node.StatefulSetName
}
}
nodePodList, _ := GetPodList(
cluster.Namespace,
map[string]string{
"component": "elasticsearch",
"cluster-name": cluster.Name,
"node-name": nodeName,
},
er.client,
)
for _, nodePod := range nodePodList.Items {
isUnschedulable := false
for _, podCondition := range nodePod.Status.Conditions {
if podCondition.Type == v1.PodScheduled && podCondition.Status == v1.ConditionFalse {
podCondition.Type = v1.PodReasonUnschedulable
podCondition.Status = v1.ConditionTrue
updatePodUnschedulableCondition(node, podCondition)
isUnschedulable = true
}
}
if isUnschedulable {
continue
}
updatePodUnschedulableCondition(node, v1.PodCondition{
Status: v1.ConditionFalse,
})
// if the pod can't be scheduled we shouldn't enter here
for _, containerStatus := range nodePod.Status.ContainerStatuses {
if containerStatus.Name == "elasticsearch" {
if containerStatus.State.Waiting != nil {
updatePodNotReadyCondition(
node,
api.ESContainerWaiting,
containerStatus.State.Waiting.Reason,
containerStatus.State.Waiting.Message,
)
} else {
updatePodNotReadyCondition(
node,
api.ESContainerWaiting,
"",
"",
)
}
if containerStatus.State.Terminated != nil {
updatePodNotReadyCondition(
node,
api.ESContainerTerminated,
containerStatus.State.Terminated.Reason,
containerStatus.State.Terminated.Message,
)
} else {
updatePodNotReadyCondition(
node,
api.ESContainerTerminated,
"",
"",
)
}
}
if containerStatus.Name == "proxy" {
if containerStatus.State.Waiting != nil {
updatePodNotReadyCondition(
node,
api.ProxyContainerWaiting,
containerStatus.State.Waiting.Reason,
containerStatus.State.Waiting.Message,
)
} else {
updatePodNotReadyCondition(
node,
api.ProxyContainerWaiting,
"",
"",
)
}
if containerStatus.State.Terminated != nil {
updatePodNotReadyCondition(
node,
api.ProxyContainerTerminated,
containerStatus.State.Terminated.Reason,
containerStatus.State.Terminated.Message,
)
} else {
updatePodNotReadyCondition(
node,
api.ProxyContainerTerminated,
"",
"",
)
}
}
}
if !thresholdEnabled {
// disk threshold is not enabled, continue to next node
continue
}
usage, percent, err := esClient.GetNodeDiskUsage(nodeName)
if err != nil {
logrus.Debugf("Unable to get disk usage for %v", nodeName)
continue
}
if exceedsLowWatermark(usage, percent) {
if exceedsHighWatermark(usage, percent) {
updatePodNodeStorageCondition(
node,
"Disk Watermark High",
fmt.Sprintf("Disk storage usage for node is %vb (%v%%). Shards will be relocated from this node.", usage, percent),
)
} else {
updatePodNodeStorageCondition(
node,
"Disk Watermark Low",
fmt.Sprintf("Disk storage usage for node is %vb (%v%%). Shards will be not be allocated on this node.", usage, percent),
)
}
} else {
if percent > float64(0.0) {
// if we were able to pull the usage but it isn't above the thresholds -- clear the status message
updatePodNodeStorageCondition(node, "", "")
}
}
}
}
}
func (er *ElasticsearchRequest) refreshDiskWatermarkThresholds() {
//quantity, err := resource.ParseQuantity(string)
low, high, err := er.esClient.GetDiskWatermarks()
if err != nil {
logrus.Debugf("Unable to refresh disk watermarks from cluster, using defaults")
}
switch low.(type) {
case float64:
value := low.(float64)
DISK_WATERMARK_LOW_PCT = &value
DISK_WATERMARK_LOW_ABS = nil
case string:
value, err := resource.ParseQuantity(strings.ToUpper(low.(string)))
if err != nil {
logrus.Warnf("Unable to parse %v: %v", low.(string), err)
}
DISK_WATERMARK_LOW_ABS = &value
DISK_WATERMARK_LOW_PCT = nil
default:
// error
logrus.Warnf("Unknown type for low: %T", low)
}
switch high.(type) {
case float64:
value := high.(float64)
DISK_WATERMARK_HIGH_PCT = &value
DISK_WATERMARK_HIGH_ABS = nil
case string:
value, err := resource.ParseQuantity(strings.ToUpper(high.(string)))
if err != nil {
logrus.Warnf("Unable to parse %v: %v", high.(string), err)
}
DISK_WATERMARK_HIGH_ABS = &value
DISK_WATERMARK_HIGH_PCT = nil
default:
// error
logrus.Warnf("Unknown type for high: %T", high)
}
}
func exceedsLowWatermark(usage string, percent float64) bool {
return exceedsWatermarks(usage, percent, DISK_WATERMARK_LOW_ABS, DISK_WATERMARK_LOW_PCT)
}
func exceedsHighWatermark(usage string, percent float64) bool {
return exceedsWatermarks(usage, percent, DISK_WATERMARK_HIGH_ABS, DISK_WATERMARK_HIGH_PCT)
}
func exceedsWatermarks(usage string, percent float64, watermarkUsage *resource.Quantity, watermarkPercent *float64) bool {
if usage == "" || percent < float64(0) {
return false
}
quantity, err := resource.ParseQuantity(usage)
if err != nil {
logrus.Warnf("Unable to parse usage quantity %v: %v", usage, err)
return false
}
// if quantity is > watermarkUsage and is used
if watermarkUsage != nil && quantity.Cmp(*watermarkUsage) == 1 {
return true
}
if watermarkPercent != nil && percent > *watermarkPercent {
return true
}
return false
}
func updatePodCondition(node *api.ElasticsearchNodeStatus, condition *api.ClusterCondition) bool {
if node.Conditions == nil {
node.Conditions = make([]api.ClusterCondition, 0, 4)
}
// Try to find this node condition.
conditionIndex, oldCondition := getPodCondition(node, condition.Type)
if condition.Status == v1.ConditionFalse { | if oldCondition != nil {
node.Conditions = append(node.Conditions[:conditionIndex], node.Conditions[conditionIndex+1:]...)
return true
}
return false
}
if oldCondition == nil {
// We are adding new node condition.
node.Conditions = append(node.Conditions, *condition)
return true
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message
node.Conditions[conditionIndex] = *condition
return !isEqual
}
func getPodCondition(node *api.ElasticsearchNodeStatus, conditionType api.ClusterConditionType) (int, *api.ClusterCondition) {
if node == nil {
return -1, nil
}
for i := range node.Conditions {
if node.Conditions[i].Type == conditionType {
return i, &node.Conditions[i]
}
}
return -1, nil
}
func updatePodUnschedulableCondition(node *api.ElasticsearchNodeStatus, podCondition v1.PodCondition) bool {
return updatePodCondition(node, &api.ClusterCondition{
Type: api.Unschedulable,
Status: podCondition.Status,
Reason: podCondition.Reason,
Message: podCondition.Message,
LastTransitionTime: podCondition.LastTransitionTime,
})
}
func updatePodNotReadyCondition(node *api.ElasticsearchNodeStatus, conditionType api.ClusterConditionType, reason, message string) bool {
var status v1.ConditionStatus
if message == "" && reason == "" {
status = v1.ConditionFalse
} else {
status = v1.ConditionTrue
}
return updatePodCondition(node, &api.ClusterCondition{
Type: conditionType,
Status: status,
Reason: reason,
Message: message,
LastTransitionTime: metav1.Now(),
})
}
func updatePodNodeStorageCondition(node *api.ElasticsearchNodeStatus, reason, message string) bool {
var status v1.ConditionStatus
if message == "" && reason == "" {
status = v1.ConditionFalse
} else {
status = v1.ConditionTrue
}
return updatePodCondition(node, &api.ClusterCondition{
Type: api.NodeStorage,
Status: status,
Reason: reason,
Message: message,
LastTransitionTime: metav1.Now(),
})
}
func updateStatusConditions(status *api.ElasticsearchStatus) {
if status.Conditions == nil {
status.Conditions = make([]api.ClusterCondition, 0, 4)
}
if _, condition := getESNodeCondition(status.Conditions, api.UpdatingSettings); condition == nil {
updateUpdatingSettingsCondition(status, v1.ConditionFalse)
}
if _, condition := getESNodeCondition(status.Conditions, api.ScalingUp); condition == nil {
updateScalingUpCondition(status, v1.ConditionFalse)
}
if _, condition := getESNodeCondition(status.Conditions, api.ScalingDown); condition == nil {
updateScalingDownCondition(status, v1.ConditionFalse)
}
if _, condition := getESNodeCondition(status.Conditions, api.Restarting); condition == nil {
updateRestartingCondition(status, v1.ConditionFalse)
}
}
func isPodUnschedulableConditionTrue(conditions []api.ClusterCondition) bool {
_, condition := getESNodeCondition(conditions, api.Unschedulable)
return condition != nil && condition.Status == v1.ConditionTrue
}
func isPodImagePullBackOff(conditions []api.ClusterCondition) bool {
condition := getESNodeConditionWithReason(conditions, api.ESContainerWaiting, "ImagePullBackOff")
return condition != nil && condition.Status == v1.ConditionTrue
}
func getESNodeCondition(conditions []api.ClusterCondition, conditionType api.ClusterConditionType) (int, *api.ClusterCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
}
func getESNodeConditionWithReason(conditions []api.ClusterCondition, conditionType api.ClusterConditionType, conditionReason string) *api.ClusterCondition {
if conditions == nil {
return nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
if conditions[i].Reason == conditionReason {
return &conditions[i]
}
}
}
return nil
}
func updateESNodeCondition(status *api.ElasticsearchStatus, condition *api.ClusterCondition) bool {
condition.LastTransitionTime = metav1.Now()
// Try to find this node condition.
conditionIndex, oldCondition := getESNodeCondition(status.Conditions, condition.Type)
if condition.Status == v1.ConditionFalse {
if oldCondition != nil {
status.Conditions = append(status.Conditions[:conditionIndex], status.Conditions[conditionIndex+1:]...)
return true
}
return false
}
if oldCondition == nil {
// We are adding new node condition.
status.Conditions = append(status.Conditions, *condition)
return true
}
// We are updating an existing condition, so we need to check if it has changed.
if condition.Status == oldCondition.Status {
condition.LastTransitionTime = oldCondition.LastTransitionTime
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message &&
condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime)
status.Conditions[conditionIndex] = *condition
// Return true if one of the fields have changed.
return !isEqual
}
func updateConditionWithRetry(dpl *api.Elasticsearch, value v1.ConditionStatus,
executeUpdateCondition func(*api.ElasticsearchStatus, v1.ConditionStatus) bool, client client.Client) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if getErr := client.Get(context.TODO(), types.NamespacedName{Name: dpl.Name, Namespace: dpl.Namespace}, dpl); getErr != nil {
logrus.Debugf("Could not get Elasticsearch %v: %v", dpl.Name, getErr)
return getErr
}
executeUpdateCondition(&dpl.Status, value)
if updateErr := client.Update(context.TODO(), dpl); updateErr != nil {
logrus.Debugf("Failed to update Elasticsearch %v status: %v", dpl.Name, updateErr)
return updateErr
}
return nil
})
return retryErr
}
func updateInvalidMasterCountCondition(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
var message string
var reason string
if value == v1.ConditionTrue {
message = fmt.Sprintf("Invalid master nodes count. Please ensure there are no more than %v total nodes with master roles", maxMasterCount)
reason = "Invalid Settings"
} else {
message = ""
reason = ""
}
return updateESNodeCondition(status, &api.ClusterCondition{
Type: api.InvalidMasters,
Status: value,
Reason: reason,
Message: message,
})
}
func updateInvalidDataCountCondition(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
var message string
var reason string
if value == v1.ConditionTrue {
message = "No data nodes requested. Please ensure there is at least 1 node with data roles"
reason = "Invalid Settings"
} else {
message = ""
reason = ""
}
return updateESNodeCondition(status, &api.ClusterCondition{
Type: api.InvalidData,
Status: value,
Reason: reason,
Message: message,
})
}
func updateInvalidUUIDChangeCondition(cluster *api.Elasticsearch, value v1.ConditionStatus, message string, client client.Client) error {
var reason string
if value == v1.ConditionTrue {
reason = "Invalid Spec"
} else {
reason = ""
}
return updateConditionWithRetry(
cluster,
value,
func(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
return updateESNodeCondition(&cluster.Status, &api.ClusterCondition{
Type: api.InvalidUUID,
Status: value,
Reason: reason,
Message: message,
})
},
client,
)
}
func updateInvalidReplicationCondition(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
var message string
var reason string
if value == v1.ConditionTrue {
message = "Wrong RedundancyPolicy selected. Choose different RedundancyPolicy or add more nodes with data roles"
reason = "Invalid Settings"
} else {
message = ""
reason = ""
}
return updateESNodeCondition(status, &api.ClusterCondition{
Type: api.InvalidRedundancy,
Status: value,
Reason: reason,
Message: message,
})
}
func updateUpdatingSettingsCondition(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
return updateESNodeCondition(status, &api.ClusterCondition{
Type: api.UpdatingSettings,
Status: value,
})
}
func updateScalingUpCondition(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
return updateESNodeCondition(status, &api.ClusterCondition{
Type: api.ScalingUp,
Status: value,
})
}
func updateScalingDownCondition(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
return updateESNodeCondition(status, &api.ClusterCondition{
Type: api.ScalingDown,
Status: value,
})
}
func updateRestartingCondition(status *api.ElasticsearchStatus, value v1.ConditionStatus) bool {
return updateESNodeCondition(status, &api.ClusterCondition{
Type: api.Restarting,
Status: value,
})
} | |
user_builder.rs | use diesel::prelude::*;
use models::User;
use uuid::Uuid;
pub struct UserBuilder<'a> {
first_name: String,
last_name: String,
email: Option<String>,
phone: Option<String>,
password: String,
connection: &'a PgConnection,
}
impl<'a> UserBuilder<'a> {
pub fn new(connection: &'a PgConnection) -> Self {
let x = Uuid::new_v4();
UserBuilder {
first_name: "Jeff".into(),
last_name: "Wilco".into(),
email: Some(format!("jeff{}@tari.com", x).into()),
phone: Some("555-555-5555".into()),
password: "examplePassword".into(),
connection,
}
}
pub fn with_first_name(mut self, first_name: &str) -> Self {
self.first_name = first_name.to_string();
self
}
pub fn with_last_name(mut self, last_name: &str) -> Self {
self.last_name = last_name.to_string();
self
}
pub fn with_password(mut self, password: String) -> Self {
self.password = password;
self
}
pub fn with_email(mut self, email: String) -> Self {
self.email = Some(email);
self
}
pub fn with_phone(mut self, phone: String) -> Self {
self.phone = Some(phone);
self
}
pub fn with_no_email(mut self) -> Self {
self.email = None;
self | }
pub fn with_no_phone(mut self) -> Self {
self.phone = None;
self
}
pub fn finish(&self) -> User {
User::create(
Some(self.first_name.to_string()),
Some(self.last_name.to_string()),
self.email.clone(),
self.phone.clone(),
&self.password,
)
.commit(None, self.connection)
.unwrap()
}
} | |
home.js | import React, { Component } from 'react';
import Header from './header/header';
import Banner from './banner/banner';
import Navigation from './navigation';
import FooterTop from './footer/footer_top';
import FooterBottom from './footer/footer_bottom';
class Home extends Component {
render() {
return (
<div>
<Header />
<Banner />
<Navigation />
| </div>
<div className="footer">
<FooterTop />
<FooterBottom />
</div>
</div>
);
}
}
export default Home; | <div className="homeWrapper">
<div className="container">
Home
</div> |
gflags.py | #!/usr/bin/env python
#
# Copyright (c) 2002, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the command
line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. For more info, see
"OUTPUT FOR --helpxml" below.
--flagfile=foo read flags from file foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
FLAGS VALIDATORS: If your program:
- requires flag X to be specified
- needs flag Y to match a regular expression
- or requires any more general constraint to be satisfied
then validators are for you!
Each validator represents a constraint over one flag, which is enforced
starting from the initial parsing of the flags and until the program
terminates.
Also, lower_bound and upper_bound for numerical flags are enforced using flag
validators.
Howto:
If you want to enforce a constraint over one flag, use
gflags.RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS)
After flag values are initially parsed, and after any change to the specified
flag, method checker(flag_value) will be executed. If constraint is not
satisfied, an IllegalFlagValue exception will be raised. See
RegisterValidator's docstring for a detailed explanation on how to construct
your own checker.
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('my_version', 0, 'Version number.')
gflags.DEFINE_string('filename', None, 'Input file name', short_name='f')
gflags.RegisterValidator('my_version',
lambda value: value % 2 == 0,
message='--my_version must be divisible by 2')
gflags.MarkFlagAsRequired('filename')
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %d year old %s' % (FLAGS.age, FLAGS.gender)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module)
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', '[email protected]',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path.
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
OUTPUT FOR --helpxml:
The --helpxml flag generates output with the following structure:
<?xml version="1.0"?>
<AllFlags>
<program>PROGRAM_BASENAME</program>
<usage>MAIN_MODULE_DOCSTRING</usage>
(<flag>
[<key>yes</key>]
<file>DECLARING_MODULE</file>
<name>FLAG_NAME</name>
<meaning>FLAG_HELP_MESSAGE</meaning>
<default>DEFAULT_FLAG_VALUE</default>
<current>CURRENT_FLAG_VALUE</current>
<type>FLAG_TYPE</type>
[OPTIONAL_ELEMENTS]
</flag>)*
</AllFlags>
Notes:
1. The output is intentionally similar to the output generated by the
C++ command-line flag library. The few differences are due to the
Python flags that do not have a C++ equivalent (at least not yet),
e.g., DEFINE_list.
2. New XML elements may be added in the future.
3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
pass for this flag on the command-line. E.g., for a flag defined
using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
4. CURRENT_FLAG_VALUE is produced using str(). This means that the
string 'false' will be represented in the same way as the boolean
False. Using repr() would have removed this ambiguity and simplified
parsing, but would have broken the compatibility with the C++
command-line flags.
5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
flags: lower_bound, upper_bound (for flags that specify bounds),
enum_value (for enum flags), list_separator (for flags that consist of
a list of values, separated by a special token).
6. We do not provide any example here: please use --helpxml instead.
This module requires at least python 2.2.1 to run.
"""
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import cgi
import getopt
import os
import re
import string
import struct
import sys
from future.utils import with_metaclass
# pylint: disable-msg=C6204
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
import gflags_validators
# pylint: enable-msg=C6204
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModuleObjectAndName():
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
globals_for_frame = sys._getframe(depth).f_globals
module, module_name = _GetModuleObjectAndName(globals_for_frame)
if module_name is not None:
return module, module_name
raise AssertionError("No module was found")
def _GetCallingModule():
"""Returns the name of the module that's calling into this module."""
return _GetCallingModuleObjectAndName()[1]
def _GetThisModuleObjectAndName():
"""Returns: (module object, module name) for this module."""
return _GetModuleObjectAndName(globals())
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
class CantOpenFlagFileError(FlagsError):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
pass
class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
"""Special case of DuplicateFlag -- SWIG flag value can't be set to None.
This can be raised when a duplicate flag is created. Even if allow_override is
True, we still abort if the new value is None, because it's currently
impossible to pass None default value back to SWIG. See FlagValues.SetDefault
for details.
"""
pass
class DuplicateFlagError(DuplicateFlag):
"""A DuplicateFlag whose message cites the conflicting definitions.
A DuplicateFlagError conveys more information than a DuplicateFlag,
namely the modules where the conflicting definitions occur. This
class was created to avoid breaking external modules which depend on
the existing DuplicateFlags interface.
"""
def __init__(self, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
"""
self.flagname = flagname
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
msg = "The flag '%s' is defined twice. First from %s, Second from %s" % (
self.flagname, first_module, second_module)
DuplicateFlag.__init__(self, msg)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an UnrecognizedFlag.
# Since there are external modules that create DuplicateFlags, the interface to
# DuplicateFlag shouldn't change. The flagvalue will be assigned the full value
# of the flag and its argument, if any, allowing handling of unrecognized flags
# in an exception handler.
# If flagvalue is the empty string, then this exception is an due to a
# reference to a flag that was not already defined.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname, flagvalue=''):
self.flagname = flagname
self.flagvalue = flagvalue
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
if (not sys.stdout.isatty()) or (termios is None) or (fcntl is None):
return _help_width
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable
if columns >= 40:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _help_width))
except (TypeError, IOError, struct.error):
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophe and then align the following lines while others have the
apostrophes on a separate line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitespace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in range(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespace into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First line indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that cannot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an empty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def _GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A pair consisting of (1) module object and (2) module name (a
string). Returns (None, None) if the module could not be
identified.
"""
# The use of .items() (instead of .iteritems()) is NOT a mistake: if
# a parallel thread imports a module while we iterate over
# .iteritems() (not nice, but possible), we get a RuntimeError ...
# Hence, we use the slightly slower but safer .items().
for name, module in list(sys.modules.items()):
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
# Pick a more informative name for the main module.
name = sys.argv[0]
return (module, name)
return (None, None)
def _GetMainModule():
"""Returns: string, name of the module from which execution started."""
# First, try to use the same logic used by _GetCallingModuleObjectAndName(),
# i.e., call _GetModuleObjectAndName(). For that we first need to
# find the dictionary that the main module uses to store the
# globals.
#
# That's (normally) the same dictionary object that the deepest
# (oldest) stack frame is using for globals.
deepest_frame = sys._getframe(0)
while deepest_frame.f_back is not None:
deepest_frame = deepest_frame.f_back
globals_for_main_module = deepest_frame.f_globals
main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1]
# The above strategy fails in some cases (e.g., tools that compute
# code coverage by redefining, among other things, the main module).
# If so, just use sys.argv[0]. We can probably always do this, but
# it's safest to try to use the same logic as _GetCallingModuleObjectAndName()
if main_module_name is None:
main_module_name = sys.argv[0]
return main_module_name
class FlagValues(object):
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module id (int) -> list of Flag objects that are defined by
# that module.
self.__dict__['__flags_by_module_id'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
"""Use GNU-style scanning. Allows mixing of flag and non-flag arguments.
See http://docs.python.org/library/getopt.html#getopt.gnu_getopt
Args:
use_gnu_getopt: wether or not to use GNU style scanning.
"""
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def FlagsByModuleIdDict(self):
"""Returns the dictionary of module_id -> list of defined flags.
Returns:
A dictionary. Its keys are module IDs (ints). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module_id']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterFlagByModuleId(self, module_id, flag):
"""Records the module that defines a specific flag.
Args:
module_id: An int, the ID of the Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module_id = self.FlagsByModuleIdDict()
flags_by_module_id.setdefault(module_id, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def FindModuleDefiningFlag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module, flags in self.FlagsByModuleDict().items():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module
return default
def FindModuleIdDefiningFlag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module_id, flags in self.FlagsByModuleIdDict().items():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module_id
return default
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().items():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
try:
self[flag_name] = flag
except DuplicateFlagError:
raise DuplicateFlagError(flag_name, self,
other_flag_values=flag_values)
def RemoveFlagValues(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: registry containing flags to remove.
"""
for flag_name in flag_values.FlagDict():
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (name in fl and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
module, module_name = _GetCallingModuleObjectAndName()
if (self.FindModuleDefiningFlag(name) == module_name and
id(module) != self.FindModuleIdDefiningFlag(name)):
# If the flag has already been defined by a module with the same name,
# but a different ID, we can stop here because it indicates that the
# module is simply being imported a subsequent time.
return
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (short_name in fl and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
self._AssertValidators(fl[name].validators)
return value
def _AssertAllValidators(self):
all_validators = set()
for flag in self.FlagDict().values():
for validator in flag.validators:
all_validators.add(validator)
self._AssertValidators(all_validators)
def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(gflags_validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValue: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.Verify(self)
except gflags_validators.Error as e:
message = validator.PrintFlagsWithValues(self)
raise IllegalFlagValue('%s: %s' % (message, str(e)))
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
gflags.DEFINE_integer('foo', 1, 'Integer flag.')
del gflags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurrences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.FlagsByModuleIdDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.items():
# while (as opposed to if) takes care of multiple occurrences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
fl[name].SetDefault(value)
self._AssertValidators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return iter(self.FlagDict())
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in list(fl.items()):
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurrences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in list(fl.items()):
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError as e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, re-raise the exception as a FlagsError
raise FlagsError(e)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
(args[arg_index].startswith('--' + e.opt + '='))):
unrecognized_opts.append((e.opt, args[arg_index]))
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if name in fl:
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt, value in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt, value)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
ret_val = argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
ret_val = argv[:1] + original_argv[-len(unparsed_args):]
else:
ret_val = argv[:1]
self._AssertAllValidators()
return ret_val
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in list(self.FlagDict().values()):
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return list(self.FlagDict())
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
list(_SPECIAL_FLAGS.FlagDict().values()),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
list(self.FlagDict().values()) + list(_SPECIAL_FLAGS.FlagDict().values()),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flag in flagset: continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in list(fl.items()):
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError as e_msg:
raise CantOpenFlagFileError('ERROR:: Unable to open flagfile: %s' % e_msg)
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
sys.stderr.write('Warning: Hit circular flagfile dependency: %s\n' %
(sub_filename,))
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguments
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv.extend(
self.__GetFlagFileLines(flag_filename, parsed_file_list))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
"""
s = ''
for flag in list(self.FlagDict().values()):
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
from http://code.google.com/p/google-gflags
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
http://code.google.com/p/google-gflags
We also use a few new elements (e.g., <key>), but we do not
interfere / overlap with existing XML elements used by the C++
library. Please maintain this consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
def AddValidator(self, validator):
"""Register new flags validator to be checked.
Args:
validator: gflags_validators.Validator
Raises:
AttributeError: if validators work with a non-existing flag.
"""
for flag_name in validator.GetFlagsNames():
flag = self.FlagDict()[flag_name]
flag.validators.append(validator)
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _StrOrUnicode(value):
"""Converts value to a python string or, if necessary, unicode-string."""
try:
return str(value)
except UnicodeEncodeError:
return str(value)
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
# Convert non-ascii characters to entities. Note: requires python >=2.3
s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'uΈ'
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = _StrOrUnicode(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
safe_value_str = _MakeXMLSafe(value_str)
outfile.write('%s<%s>%s</%s>\n' % (indent, name, safe_value_str, name))
class Flag(object):
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.validators = []
self.SetDefault(default)
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
def __lt__(self, other):
if isinstance(other, Flag):
return id(self) < id(other)
return NotImplemented
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(_StrOrUnicode(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError as e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
# TODO(olexiy): Users can directly call this method, bypassing all flags
# validators (we don't have FlagValues here, so we can not check
# validators).
# The simplest solution I see is to make this method private.
# Another approach would be to store reference to the corresponding
# FlagValues with each flag, but this seems to be an overkill.
if value is None and self.allow_override:
raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
default_serialized = self.serializer.Serialize(self.default)
else:
default_serialized = self.default
_WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(mcs, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for mcs with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
args: Positional initializer arguments.
kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(mcs, *args, **kwargs)
else:
instances = mcs._instances
key = (mcs,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(mcs, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(mcs, *args)
class ArgumentParser(with_metaclass(_ArgumentParserCache, object)):
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer(object):
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return _StrOrUnicode(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([_StrOrUnicode(x) for x in value])
# Flags validators
def RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: string, name of the flag to be checked.
checker: method to validate the flag.
input - value of the corresponding flag (string, boolean, etc.
This value will be passed to checker by the library). See file's
docstring for examples.
output - Boolean.
Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags_validators.Error(desired_error_message).
message: error text to be shown to the user if checker returns False.
If checker raises gflags_validators.Error, message from the raised
Error will be shown.
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name,
checker,
message))
def MarkFlagAsRequired(flag_name, flag_values=FLAGS):
"""Ensure that flag is not None during program execution.
Registers a flag validator, which will follow usual validator
rules.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
RegisterValidator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values):
"""Enforce lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser). Provides lower
and upper bounds, and help text to display.
name: string, name of the flag
flag_values: FlagValues
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def Checker(value):
if value is not None and parser.IsOutsideBounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise gflags_validators.Error(message)
return True
RegisterValidator(name,
Checker,
flag_values=flag_values)
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
module, module_name = _GetCallingModuleObjectAndName()
flag_values._RegisterFlagByModule(module_name, flag)
flag_values._RegisterFlagByModuleId(id(module), flag)
def _InternalDeclareKeyFlags(flag_names,
flag_values=FLAGS, key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object that the flags listed in
flag_names have registered with (the value of the flag_values
argument from the DEFINE_* calls that defined those flags).
This should almost never need to be overridden.
key_flag_values: A FlagValues object that (among possibly many
other things) keeps track of the key flags for each module.
Default None means "same as flag_values". This should almost
never need to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
key_flag_values = key_flag_values or flag_values
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _SPECIAL_FLAGS into account.
if module == _GetThisModuleObjectAndName()[0]:
_InternalDeclareKeyFlags(
# As we associate flags with _GetCallingModuleObjectAndName(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use _GetKeyFlagsForModule.
# Instead, we take all flags from _SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[f.name for f in list(_SPECIAL_FLAGS.FlagDict().values())],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print(doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]))
if flags:
print("flags:")
print(flags)
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print(doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]))
if flags:
print("flags:")
print(flags)
sys.exit(1)
#
# Numeric parser - base class for Integer and Float parsers
#
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def IsOutsideBounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def Parse(self, argument):
val = self.Convert(argument)
if self.IsOutsideBounds(val):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
def Convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
# End of Numeric Parser
#
# FLOAT FLAGS
#
|
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Type(self):
return 'float'
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# INTEGER FLAGS
#
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
return int(argument, base)
else:
return int(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
super(EnumParser, self).__init__()
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS) | class FloatParser(NumericParser):
"""Parser of floating point values. |
update.py | # -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import boto3
import clamav
from common import AV_DEFINITION_PATH
from common import AV_DEFINITION_S3_BUCKET
from common import AV_DEFINITION_S3_PREFIX
from common import CLAMAVLIB_PATH
from common import get_timestamp
import shutil
def | (event, context):
# s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
print("Script starting at %s\n" % (get_timestamp()))
for root, dirs, files in os.walk(AV_DEFINITION_PATH):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# to_download = clamav.update_defs_from_s3(
# s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX
# )
print("Skipping clamav definition download %s\n" % (get_timestamp()))
# for download in to_download.values():
# s3_path = download["s3_path"]
# local_path = download["local_path"]
# print("Downloading definition file %s from s3://%s" % (local_path, s3_path))
# s3.Bucket(AV_DEFINITION_S3_BUCKET).download_file(s3_path, local_path)
# print("Downloading definition file %s complete!" % (local_path))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
# If main.cvd gets updated (very rare), we will need to force freshclam
# to download the compressed version to keep file sizes down.
# The existence of main.cud is the trigger to know this has happened.
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cud")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cud"))
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cvd")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cvd"))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
clamav.upload_defs_to_s3(
s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX, AV_DEFINITION_PATH
)
print("Script finished at %s\n" % get_timestamp())
| lambda_handler |
utils.py | """
Utilities Functions.
Copyright (c) 2014 NavPy Developers. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in
LICENSE.txt
"""
import numpy as _np
import sys
def input_check_Nx1(x):
"""
Check x to be of dimension Nx1 and reshape it as a 1-D array
Adhika Lie
"""
x = _np.atleast_1d(x)
theSize = _np.shape(x)
if(len(theSize) > 1):
# 1. Input must be of size N x 1
if ((theSize[0] != 1) & (theSize[1] != 1)):
raise ValueError('Not an N x 1 array')
# 2. Make it into a 1-D array
x = x.reshape(_np.size(x))
elif (theSize[0] == 1):
x = x[0]
return x, _np.size(x)
def input_check_Nx3(x):
"""
Check x to be of dimension Nx3
Adhika Lie
"""
x = _np.atleast_2d(x)
theSize = _np.shape(x)
if(len(theSize) > 1):
# 1. Input must be of size N x 3
if ((theSize[0] != 3) & (theSize[1] != 3)):
raise ValueError('Not a N x 3 array')
# 2. Make it into a Nx3 array
if (theSize[1] != 3):
x = x.T
N = x.shape[0]
# 3. If N == 1, make it into a 1-D array
if (x.shape[0] == 1):
x = x.reshape(x.shape[1])
return x, N
def loadtxt2dic(filename):
"""
Loads text file of key:value pairs into a dictionary.
Usage notes:
-Lines begining with '#' are treated as comments and skipped.
-Blank lines are also skipped
-Keys and values should be separated by '=' or ':', extra spaces are fine.
-A matrix/scalar are stored floats ONLY if the text has a decimal
Hamid M. (original)
Adhika Lie
"""
fid = open(filename, 'r')
param = {}
prev_line = ''
for line in fid:
# Remove Extra Spaces
line = prev_line + line.strip()
print(line)
# Skip lines beginning with # or blank
# Note: Python treats '' as False
if(line.startswith('#') or line.startswith('\n') or (not line)):
continue
# If line ends with a comma, it continues to the next line.
if(line.endswith(',')):
prev_line = line.strip()
continue
else:
prev_line = ''
# Split item
item = line.split('#', 1)[0].strip() # May have comment after the line
item = item.replace(':', ' ').replace('=', ' ').split(None, 1)
if(len(item) == 0):
continue
try:
param[item[0]] = eval(item[1])
if(type(eval(item[1])) == list):
param[item[0]] = _np.array(eval(item[1]))
except NameError:
param[item[0]] = item[1]
fid.close()
return param
def ask_ok(prompt, retries=4, complaint='Yes or no, please!'):
"""
Prompt user for for 'yes' or 'no' response
Taken from Python Documentation with small modifications
http://docs.python.org/tutorial/controlflow.html
Example:
>>> ask_ok('Do you really want to quit?')
| Hamid M. May 2012
"""
while True:
ok = raw_input(prompt).lower()
if ok in ('y', 'ye', 'yes', '1'):
return True
if ok in ('n', 'no', 'nop', 'nope', '0'):
return False
retries = retries - 1
if retries < 0:
raise IOError('refusenik user')
print(complaint)
def status_update(i, drl, message, bar_length=20):
"""
To create progress bar + short error message
Parameters
----------
i is the counter of where you are in the "count"
drl is the total data record length
message is your message line (keep it short!)
Adhika Lie
"""
percent = float(i) / drl
hashes = '#' * int(round(percent * bar_length))
spaces = ' ' * (bar_length - len(hashes))
mmsg = message
if(len(mmsg) > 0):
if(mmsg[-1] != '\n'):
mmsg = mmsg + '\n'
sys.stdout.write("\r[%s] %3d%% :: %s" %
(hashes + spaces, int(round(percent * 100)), mmsg))
sys.stdout.flush() | |
model_bt_assembly_transform_definition_params.go | /*
* Onshape REST API
*
* The Onshape REST API consumed by all clients.
*
* API version: 1.113
* Contact: [email protected]
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package onshape
import (
"encoding/json"
)
// BTAssemblyTransformDefinitionParams struct for BTAssemblyTransformDefinitionParams
type BTAssemblyTransformDefinitionParams struct {
IsRelative *bool `json:"isRelative,omitempty"`
Occurrences *[]BTOccurrence74 `json:"occurrences,omitempty"`
Transform *[]float64 `json:"transform,omitempty"`
}
// NewBTAssemblyTransformDefinitionParams instantiates a new BTAssemblyTransformDefinitionParams object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTAssemblyTransformDefinitionParams() *BTAssemblyTransformDefinitionParams {
this := BTAssemblyTransformDefinitionParams{}
return &this
}
// NewBTAssemblyTransformDefinitionParamsWithDefaults instantiates a new BTAssemblyTransformDefinitionParams object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTAssemblyTransformDefinitionParamsWithDefaults() *BTAssemblyTransformDefinitionParams {
this := BTAssemblyTransformDefinitionParams{}
return &this
}
// GetIsRelative returns the IsRelative field value if set, zero value otherwise.
func (o *BTAssemblyTransformDefinitionParams) GetIsRelative() bool {
if o == nil || o.IsRelative == nil |
return *o.IsRelative
}
// GetIsRelativeOk returns a tuple with the IsRelative field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTAssemblyTransformDefinitionParams) GetIsRelativeOk() (*bool, bool) {
if o == nil || o.IsRelative == nil {
return nil, false
}
return o.IsRelative, true
}
// HasIsRelative returns a boolean if a field has been set.
func (o *BTAssemblyTransformDefinitionParams) HasIsRelative() bool {
if o != nil && o.IsRelative != nil {
return true
}
return false
}
// SetIsRelative gets a reference to the given bool and assigns it to the IsRelative field.
func (o *BTAssemblyTransformDefinitionParams) SetIsRelative(v bool) {
o.IsRelative = &v
}
// GetOccurrences returns the Occurrences field value if set, zero value otherwise.
func (o *BTAssemblyTransformDefinitionParams) GetOccurrences() []BTOccurrence74 {
if o == nil || o.Occurrences == nil {
var ret []BTOccurrence74
return ret
}
return *o.Occurrences
}
// GetOccurrencesOk returns a tuple with the Occurrences field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTAssemblyTransformDefinitionParams) GetOccurrencesOk() (*[]BTOccurrence74, bool) {
if o == nil || o.Occurrences == nil {
return nil, false
}
return o.Occurrences, true
}
// HasOccurrences returns a boolean if a field has been set.
func (o *BTAssemblyTransformDefinitionParams) HasOccurrences() bool {
if o != nil && o.Occurrences != nil {
return true
}
return false
}
// SetOccurrences gets a reference to the given []BTOccurrence74 and assigns it to the Occurrences field.
func (o *BTAssemblyTransformDefinitionParams) SetOccurrences(v []BTOccurrence74) {
o.Occurrences = &v
}
// GetTransform returns the Transform field value if set, zero value otherwise.
func (o *BTAssemblyTransformDefinitionParams) GetTransform() []float64 {
if o == nil || o.Transform == nil {
var ret []float64
return ret
}
return *o.Transform
}
// GetTransformOk returns a tuple with the Transform field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTAssemblyTransformDefinitionParams) GetTransformOk() (*[]float64, bool) {
if o == nil || o.Transform == nil {
return nil, false
}
return o.Transform, true
}
// HasTransform returns a boolean if a field has been set.
func (o *BTAssemblyTransformDefinitionParams) HasTransform() bool {
if o != nil && o.Transform != nil {
return true
}
return false
}
// SetTransform gets a reference to the given []float64 and assigns it to the Transform field.
func (o *BTAssemblyTransformDefinitionParams) SetTransform(v []float64) {
o.Transform = &v
}
func (o BTAssemblyTransformDefinitionParams) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.IsRelative != nil {
toSerialize["isRelative"] = o.IsRelative
}
if o.Occurrences != nil {
toSerialize["occurrences"] = o.Occurrences
}
if o.Transform != nil {
toSerialize["transform"] = o.Transform
}
return json.Marshal(toSerialize)
}
type NullableBTAssemblyTransformDefinitionParams struct {
value *BTAssemblyTransformDefinitionParams
isSet bool
}
func (v NullableBTAssemblyTransformDefinitionParams) Get() *BTAssemblyTransformDefinitionParams {
return v.value
}
func (v *NullableBTAssemblyTransformDefinitionParams) Set(val *BTAssemblyTransformDefinitionParams) {
v.value = val
v.isSet = true
}
func (v NullableBTAssemblyTransformDefinitionParams) IsSet() bool {
return v.isSet
}
func (v *NullableBTAssemblyTransformDefinitionParams) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTAssemblyTransformDefinitionParams(val *BTAssemblyTransformDefinitionParams) *NullableBTAssemblyTransformDefinitionParams {
return &NullableBTAssemblyTransformDefinitionParams{value: val, isSet: true}
}
func (v NullableBTAssemblyTransformDefinitionParams) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTAssemblyTransformDefinitionParams) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
| {
var ret bool
return ret
} |
deaths.py | """Deaths indicators."""
from etl.common import to_json_stat, write_to_file
from etl.config_deaths import deaths_cfg as cfg
from etlstat.extractor.extractor import xlsx
import json
import pandas as pd
def transform(df, periods, prefix=''):
"""Slice dataframe. Generate time period column.
df (dataframe): dataset
periods (int): number of time periods
prefix (str): prefix for time periods
"""
for i in range(0, len(df)):
period1 = str(df.loc[i, 'Año'])
period2 = '{:0>2}'.format(df.loc[i, 'Mes'])
df.loc[i, 'period'] = prefix + period1 + '-' + period2
df.drop(columns={'Año', 'Mes'}, axis=1, inplace=True)
df.rename(columns={'period': 'Mes'}, inplace=True)
df = df.tail(periods)
df = df.round(2)
return df
def replace_month(json_str):
"" | # Read input files
data = xlsx(cfg.path.input)
# Datasets
df_global = pd.DataFrame()
indicators = []
for key in cfg.series:
print(key)
variables = [
'Año', 'Mes',
cfg.series[key].variables[0],
cfg.series[key].moving_avg[0]]
if (len(cfg.series[key].variables) == 2):
variables.append(cfg.series[key].variables[1])
variables.append(cfg.series[key].moving_avg[1])
df = data[cfg.file]\
[cfg.series[key].sheet][variables].copy()
# Drop NA rows, if any
df.dropna(axis=0, how='all', inplace=True)
# Rename variables
df.rename(
columns={
cfg.series[key].variables[0]: 'Cantabria',
cfg.series[key].moving_avg[0]: 'Cantabria_MM'},
inplace=True)
if (len(cfg.series[key].variables) == 2):
df.rename(
columns={
cfg.series[key].variables[1]: 'España',
cfg.series[key].moving_avg[1]: 'España_MM'},
inplace=True)
# Remove .0 from Año and Mes
df['Año'] = df['Año'].astype(str).replace('\.0', '', regex=True)
df['Mes'] = df['Mes'].astype(str).replace('\.0', '', regex=True)
# Merge global dataset
df_cant = df[['Año', 'Mes', 'Cantabria']].copy()
df_cant = transform(df_cant, cfg.periods.global_deaths, 'Cantabria - ')
df_cant.set_index('Mes', inplace=True)
df_cant = df_cant.transpose()
df_cant.insert(0, 'Categoria', cfg.series[key].category)
df_cant[' - Indicadores'] = cfg.series[key].label
if (len(cfg.series[key].variables) == 2):
df_esp = df[['Año', 'Mes', 'España']].copy()
df_esp = transform(df_esp, cfg.periods.global_deaths, 'España - ')
df_esp.set_index('Mes', inplace=True)
df_esp = df_esp.transpose()
df_esp[' - Indicadores'] = cfg.series[key].label
df_cant = df_cant.merge(df_esp, on=' - Indicadores')
indicators.append(df_cant)
# Generate JSON-Stat dataset
df = transform(df, cfg.periods.deaths)
vars = ['Cantabria', 'Cantabria_MM']
if (len(cfg.series[key].variables) == 2):
vars.append('España')
vars.append('España_MM')
json_file = to_json_stat(
df,
['Mes'],
vars,
cfg.series[key].source)
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
cfg.series[key].unit
json_obj['note'] = cfg.series[key].note
json_file = json.dumps(json_obj)
json_file = replace_month(json_file)
write_to_file(json_file, cfg.path.output + cfg.series[key].json)
# Generate CSV global dataset
df_global = pd.concat(indicators, axis=0, verify_integrity=False)
df_global.to_csv(cfg.path.output + cfg.globals.csv, index=False)
print('\nEnd of process. Files generated successfully.')
| "Replace month number by its name."""
json_str = json_str.replace('-01"', '-Ene"')
json_str = json_str.replace('-02"', '-Feb"')
json_str = json_str.replace('-03"', '-Mar"')
json_str = json_str.replace('-04"', '-Abr"')
json_str = json_str.replace('-05"', '-May"')
json_str = json_str.replace('-06"', '-Jun"')
json_str = json_str.replace('-07"', '-Jul"')
json_str = json_str.replace('-08"', '-Ago"')
json_str = json_str.replace('-09"', '-Sep"')
json_str = json_str.replace('-10"', '-Oct"')
json_str = json_str.replace('-11"', '-Nov"')
json_str = json_str.replace('-12"', '-Dic"')
return json_str
|
apps.py | from django.apps import AppConfig
class | (AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'basic_api'
| BasicApiConfig |
main.go | package main
import (
"encoding/json"
"errors" | "fmt"
"os"
)
func extractChildlen(key string, raw []byte) (interface{}, error) {
var root interface{}
err := json.Unmarshal(raw, &root)
if err != nil {
panic(err)
}
n, ok := root.(map[string]interface{})
if !ok {
return nil, errors.New("invalid structure")
}
e, ok := n[key]
if !ok {
return nil, errors.New("invalid structure")
}
return e, nil
}
func main() {
jsonStr := []byte(`{
"hoge": [
"a", "b", "c"
]
}`)
e, err := extractChildlen("hoge", jsonStr)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
fmt.Println(e)
} | |
0005_auto_20191213_1623.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-12-13 21:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0004_presenter_profile_image_thumb'),
]
operations = [
migrations.CreateModel(
name='EmbeddedVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video_id', models.CharField(max_length=50)),
('public', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='EmbedEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('html_template', models.TextField(help_text='Use {{ID}} which will get swapped in for the EmbeddedVideo.video_id.')),
('url_help', models.CharField(blank=True, help_text='Used to help the user figure out where the video_id is.', max_length=100)),
],
),
migrations.AlterField(
model_name='presenter',
name='profile_image_thumb',
field=models.ImageField(blank=True, editable=False, upload_to='thumbs'),
),
migrations.AddField(
model_name='embeddedvideo',
name='engine', | field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='website.EmbedEngine'),
),
migrations.AddField(
model_name='presentation',
name='video',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='website.EmbeddedVideo'),
),
] | |
packet.py | from typing import List, Optional
from .constants import HEADER_LENGTH, VALID_HEADER_TYPES_FESL, VALID_HEADER_TYPES_THEATER, \
VALID_HEADER_ERROR_INDICATORS, HEADER_BYTE_ORDER
from .exceptions import Error
class Packet:
header: bytes
body: bytes
def __init__(self, header: bytes = b'', body: bytes = b''):
self.header = header
self.body = body
@classmethod
def build(cls, header_stub: bytes, body_data: bytes, tid: Optional[int] = None):
"""
Build and return a new packet from a given header stub (first 8 header bytes) and the given body data
:param header_stub: First 8 bytes of the packet header
:param body_data: Data to use as packet body
:param tid: Transaction id for packet
:return: New packet with valid length indicators
"""
# Add packet length indicators to header
header = header_stub + b'\x00\x00\x00\x00'
# Add "tail" to body
body = body_data + b'\n\x00'
self = cls(header, body)
# Update transaction id if present
if tid is not None:
self.set_tid(tid)
# Update length indicators
self.set_length_indicators()
return self
def set_tid(self, tid: int) -> None:
pass
def get_tid(self) -> int:
pass
def set_length_indicators(self) -> None:
"""
Set/update length indicators in packet header
"""
# Determine total length of packet
packet_length = len(self.header) + len(self.body)
# Deconstruct header bytes into bytearray
header_array = bytearray(self.header)
# Update length indicators
header_array[8] = packet_length >> 24
header_array[9] = packet_length >> 16
header_array[10] = packet_length >> 8
header_array[11] = packet_length & 255
# Update header
self.header = bytes(header_array)
def indicated_length(self) -> int:
# Sum of the last four header elements indicates the length of the entire packet
# => validate indicators match total length of received data
return self.bytes2int(self.header[8:12])
def indicated_body_length(self) -> int:
"""
Get length of packet body as indicated by header (total indicated length - header length)
:return: Indicated and expected length of packet body
"""
return self.indicated_length() - len(self.header)
def validate(self) -> None:
self.validate_header()
self.validate_body()
def get_data(self) -> bytes:
"""
Get packet data (body without any trailing \x00 byte)
"""
return self.body[:-1] if len(self.body) > 0 and self.body[-1] == 0 else self.body
def get_data_lines(self) -> List[bytes]:
"""
Get packet data split into lines
"""
return self.get_data().split(b'\n')
@staticmethod
def bytes2int(b: bytes) -> int:
return int.from_bytes(b, byteorder=HEADER_BYTE_ORDER)
def __str__(self):
return (self.header + self.body).__str__()
def __bytes__(self):
return self.header + self.body
def validate_header(self) -> None:
"""
Make sure every header
and
contains 12 bytes
contains a non-zero packet body indicator
"""
if not len(self.header) == HEADER_LENGTH or self.bytes2int(self.header[8:12]) <= 0:
raise Error('Packet header is not valid')
def validate_body(self) -> None:
# Validate indicated length matches total length of received data
if self.indicated_length() != len(self.header) + len(self.body):
raise Error('Received packet with invalid body')
class FeslPacket(Packet):
def set_tid(self, tid: int) -> None:
"""
Set/update the transaction id/packet counter in packet header
"""
# Deconstruct header bytes into bytearray
header_array = bytearray(self.header)
# Update transaction id bytes
header_array[5] = tid >> 16 & 255
header_array[6] = tid >> 8 & 255
header_array[7] = tid & 255
self.header = bytes(header_array)
def get_tid(self) -> int:
"""
Get transaction id from packet header
:return: transaction id as int
"""
return self.bytes2int(self.header[5:8])
def validate_header(self) -> None:
super().validate_header()
"""
Any valid FESL header also
and
starts with a valid type (e.g. "rank") which is
followed by a valid packet count indicator (\x00 = ping packet, \x80 = single packet, \xb0 = multi packet)
"""
valid = self.header[:4] in VALID_HEADER_TYPES_FESL and self.header[4] in [0, 128, 176]
if not valid:
raise Error('Packet header is not valid')
class | (Packet):
def set_tid(self, tid: int) -> None:
"""
Set/update the transaction id/packet counter in packet body (requires re-calculation of length indicators)
"""
# Remove body "tail", add tid and add "tail" again
self.body = self.body[:-2] + b'\nTID=' + str(tid).encode('utf8') + b'\n\x00'
def get_tid(self) -> int:
"""
Get transaction id from packet body
:return: transaction id as int
"""
lines = self.get_data_lines()
tid_line = next((line for line in lines if b'TID=' in line), b'')
tid_bytes = tid_line[4:]
if not tid_bytes.isalnum():
return 0
return int(tid_bytes)
def validate_header(self) -> None:
super().validate_header()
"""
Any valid theater header also
and
starts with a valid type (e.g. "GDAT") which is
or
followed by \x00\x00\x00\x00 (4 zero bytes, indicating no-error/success)
followed by a valid 4-byte error indicator (Theater indicates errors in header, not body)
Theater error response packet headers are treated as valid here because we do need to read their body in order
to not leave bytes "on the line". Also, they are not invalid responses just because they indicate an error.
"""
valid = (self.header[:4] in VALID_HEADER_TYPES_THEATER and
(self.bytes2int(self.header[4:8]) == 0 or self.header[4:8] in VALID_HEADER_ERROR_INDICATORS))
if not valid:
raise Error('Packet header is not valid')
| TheaterPacket |
goroutine_channel.go | package main
import "fmt"
func | (a []int, c chan int) {
total := 0
for _, v := range a {
total += v
}
c <- total // send total to c
}
func main() {
a := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(a[:len(a)/2], c)
go sum(a[len(a)/2:], c)
x, y := <-c, <-c // receive from c
fmt.Println(x, y, x+y)
}
| sum |
test_app.py | import pytest
from app import create_app
@pytest.fixture
def request_header_secret():
return "dev"
@pytest.fixture
def request_body_positive():
return {"query": "I am having a great day!"}
@pytest.fixture
def request_body_negative():
return {"query": "I am feeling sad today"}
@pytest.fixture
def http_error_METHOD_NOT_ALLOWED():
return 405
@pytest.fixture
def http_error_BAD_REQUEST():
return 400
@pytest.fixture
def http_OK():
return 200
@pytest.fixture
def flask_client():
app = create_app()
with app.test_client() as client:
yield client
## TESTS
#########
# Index/ Health Check Test
def test_health_check(flask_client):
res = flask_client.get("/")
assert b"up & running" in res.data
## OK REQUESTS Tests
####################
def test_predict_positive(flask_client, http_OK, request_body_positive, request_header_secret):
res = flask_client.post("/predict", json=request_body_positive, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_OK
assert b"POSITIVE" in res.data
def test_predict_negative(flask_client, http_OK, request_body_negative, request_header_secret):
res = flask_client.post("/predict", json=request_body_negative, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_OK
assert b"NEGATIVE" in res.data
## BAD REQUESTS Tests
####################
def test_GET_instead_POST(flask_client, http_error_METHOD_NOT_ALLOWED, request_header_secret):
res = flask_client.get("/predict", json={"query": ""}, headers={"Secret-Key": request_header_secret}) |
## Body
def test_None_body(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json=None, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_empty_body(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
## Query
def test_none_query(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": None}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_empty_query(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": ""}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_non_string_numerical(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": 456123}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_non_string_object(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": ["I am happy"]}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST | assert res.status_code == http_error_METHOD_NOT_ALLOWED |
cmp.spec.ts | import { Cmp273Component } from './cmp';
describe('Cmp273Component', () => {
it('should add', () => {
expect(new Cmp273Component().add273(1)).toBe(274);
}); | }); |
|
index.js | // @flow
import universal from 'react-universal-component';
import { ErrorState, Loading } from '../../components';
| export default universal(() => import('./Locations'), { loading: Loading, error: ErrorState }); | |
field.go | package apierror
// Required creates a new RequiredFieldError
func Required(field string) RequiredFieldError {
return RequiredFieldError{field: field}
}
// RequiredFieldError represents a missing required field in HTTP request
type RequiredFieldError struct {
field string
}
// Error returns user friendly message for RequiredFieldError
func (f RequiredFieldError) Error() string {
return "field is required: " + f.field
}
// Custom creates a new CustomFieldError
func Custom(field string, customReason string) CustomFieldError |
// CustomFieldError represents a custom error for a field in HTTP request
type CustomFieldError struct {
field string
customReason string
}
// Error returns user friendly message for CustomFieldError
func (f CustomFieldError) Error() string {
return "field is " + f.customReason + ": " + f.field
}
| {
return CustomFieldError{field: field, customReason: customReason}
} |
sharedaccesssignature.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
from azure.cosmosdb.table.common._common_conversion import (
_sign_string,
)
from azure.cosmosdb.table.common.sharedaccesssignature import (
SharedAccessSignature,
_SharedAccessHelper,
_QueryStringConstants,
)
from ._constants import X_MS_VERSION
class TableSharedAccessSignature(SharedAccessSignature):
'''
Provides a factory for creating file and share access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to generate the shares access signatures.
'''
super(TableSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
def generate_table(self, table_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None,
start_pk=None, start_rk=None,
end_pk=None, end_rk=None):
'''
Generates a shared access signature for the table.
Use the returned signature with the sas_token parameter of TableService.
:param str table_name:
Name of table.
:param TablePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_table_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.cosmosdb.table.common.models.Protocol` for possible values.
:param str start_pk:
The minimum partition key accessible with this shared access
signature. startpk must accompany startrk. Key values are inclusive.
If omitted, there is no lower bound on the table entities that can
be accessed.
:param str start_rk:
The minimum row key accessible with this shared access signature.
startpk must accompany startrk. Key values are inclusive. If
omitted, there is no lower bound on the table entities that can be
accessed.
:param str end_pk:
The maximum partition key accessible with this shared access
signature. endpk must accompany endrk. Key values are inclusive. If
omitted, there is no upper bound on the table entities that can be
accessed.
:param str end_rk:
The maximum row key accessible with this shared access signature. | sas = _TableSharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol, X_MS_VERSION)
sas.add_id(id)
sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk)
# Table names must be signed lower case
resource_path = table_name.lower()
sas.add_resource_signature(self.account_name, self.account_key, 'table', resource_path)
return sas.get_token()
class _TableQueryStringConstants( _QueryStringConstants):
TABLE_NAME = 'tn'
class _TableSharedAccessHelper(_SharedAccessHelper):
def __init__(self):
self.query_dict = {}
def add_table_access_ranges(self, table_name, start_pk, start_rk,
end_pk, end_rk):
self._add_query(_TableQueryStringConstants.TABLE_NAME, table_name)
self._add_query(_TableQueryStringConstants.START_PK, start_pk)
self._add_query(_TableQueryStringConstants.START_RK, start_rk)
self._add_query(_TableQueryStringConstants.END_PK, end_pk)
self._add_query(_TableQueryStringConstants.END_RK, end_rk)
def add_resource_signature(self, account_name, account_key, service, path):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource +
get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
string_to_sign += \
(get_value_to_append(_QueryStringConstants.START_PK) +
get_value_to_append(_QueryStringConstants.START_RK) +
get_value_to_append(_QueryStringConstants.END_PK) +
get_value_to_append(_QueryStringConstants.END_RK))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign)) | endpk must accompany endrk. Key values are inclusive. If omitted,
there is no upper bound on the table entities that can be accessed.
''' |
ext.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Experimental extensions to `std` for Windows.
//!
//! For now, this module is limited to extracting handles, file
//! descriptors, and sockets, but its functionality will grow over
//! time.
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "io_ext",
reason = "organization may change slightly and the primitives \
provided may be tweaked")]
pub mod io {
use fs;
use libc;
use net;
use sys_common::AsInner;
#[allow(deprecated)]
use old_io;
/// Raw HANDLEs.
pub type Handle = libc::HANDLE;
/// Raw SOCKETs.
pub type Socket = libc::SOCKET;
/// Extract raw handles.
pub trait AsRawHandle {
/// Extract the raw handle, without taking any ownership.
fn as_raw_handle(&self) -> Handle;
}
#[allow(deprecated)]
impl AsRawHandle for old_io::fs::File {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
impl AsRawHandle for fs::File {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle().raw()
}
}
#[allow(deprecated)]
impl AsRawHandle for old_io::pipe::PipeStream {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
#[allow(deprecated)]
impl AsRawHandle for old_io::net::pipe::UnixStream {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
#[allow(deprecated)]
impl AsRawHandle for old_io::net::pipe::UnixListener {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
#[allow(deprecated)]
impl AsRawHandle for old_io::net::pipe::UnixAcceptor {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
/// Extract raw sockets.
pub trait AsRawSocket {
fn as_raw_socket(&self) -> Socket;
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::tcp::TcpStream {
fn as_raw_socket(&self) -> Socket {
self.as_inner().fd()
}
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::tcp::TcpListener {
fn as_raw_socket(&self) -> Socket {
self.as_inner().socket()
}
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::tcp::TcpAcceptor {
fn as_raw_socket(&self) -> Socket {
self.as_inner().socket()
}
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::udp::UdpSocket {
fn as_raw_socket(&self) -> Socket {
self.as_inner().fd()
}
}
impl AsRawSocket for net::TcpStream {
fn as_raw_socket(&self) -> Socket { *self.as_inner().socket().as_inner() }
}
impl AsRawSocket for net::TcpListener {
fn as_raw_socket(&self) -> Socket { *self.as_inner().socket().as_inner() }
}
impl AsRawSocket for net::UdpSocket {
fn as_raw_socket(&self) -> Socket { *self.as_inner().socket().as_inner() }
}
}
/// Windows-specific extensions to the primitives in the `std::ffi` module.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod ffi {
use ffi::{OsString, OsStr};
use sys::os_str::Buf;
use sys_common::wtf8::Wtf8Buf;
use sys_common::{FromInner, AsInner};
pub use sys_common::wtf8::EncodeWide;
/// Windows-specific extensions to `OsString`.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait OsStringExt {
/// Create an `OsString` from a potentially ill-formed UTF-16 slice of
/// 16-bit code units.
///
/// This is lossless: calling `.encode_wide()` on the resulting string
/// will always return the original code units.
#[stable(feature = "rust1", since = "1.0.0")]
fn from_wide(wide: &[u16]) -> Self;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl OsStringExt for OsString {
fn from_wide(wide: &[u16]) -> OsString {
FromInner::from_inner(Buf { inner: Wtf8Buf::from_wide(wide) })
}
}
/// Windows-specific extensions to `OsStr`.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait OsStrExt {
/// Re-encode an `OsStr` as a wide character sequence,
/// i.e. potentially ill-formed UTF-16.
///
/// This is lossless. Note that the encoding does not include a final
/// null.
#[stable(feature = "rust1", since = "1.0.0")]
fn encode_wide(&self) -> EncodeWide;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl OsStrExt for OsStr {
fn encode_wide(&self) -> EncodeWide {
self.as_inner().inner.encode_wide()
}
}
}
/// Windows-specific extensions for the primitives in `std::fs`
#[unstable(feature = "fs_ext", reason = "may require more thought/methods")]
pub mod fs {
use fs::OpenOptions;
use sys_common::AsInnerMut;
/// Windows-specific extensions to `OpenOptions`
pub trait OpenOptionsExt {
/// Override the `dwDesiredAccess` argument to the call to `CreateFile`
/// with the specified value.
fn desired_access(&mut self, access: i32) -> &mut Self;
/// Override the `dwCreationDisposition` argument to the call to
/// `CreateFile` with the specified value.
///
/// This will override any values of the standard `create` flags, for
/// example.
fn creation_disposition(&mut self, val: i32) -> &mut Self;
/// Override the `dwFlagsAndAttributes` argument to the call to
/// `CreateFile` with the specified value.
///
/// This will override any values of the standard flags on the
/// `OpenOptions` structure.
fn flags_and_attributes(&mut self, val: i32) -> &mut Self;
/// Override the `dwShareMode` argument to the call to `CreateFile` with
/// the specified value.
///
/// This will override any values of the standard flags on the
/// `OpenOptions` structure.
fn share_mode(&mut self, val: i32) -> &mut Self;
}
impl OpenOptionsExt for OpenOptions {
fn desired_access(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().desired_access(access); self
}
fn creation_disposition(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().creation_disposition(access); self
}
fn flags_and_attributes(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().flags_and_attributes(access); self
}
fn share_mode(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().share_mode(access); self
}
}
}
/// A prelude for conveniently writing platform-specific code.
///
/// Includes all extension traits, and some important type definitions. | pub use super::io::{Socket, Handle, AsRawSocket, AsRawHandle};
#[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
pub use super::ffi::{OsStrExt, OsStringExt};
#[doc(no_inline)]
pub use super::fs::OpenOptionsExt;
} | #[stable(feature = "rust1", since = "1.0.0")]
pub mod prelude {
#[doc(no_inline)] |
test_rotation.py | """
ckwg +31
Copyright 2016-2017 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for vital.types.Rotation class
"""
import ctypes
import math
import unittest
import nose.tools
import numpy
from vital.types import Rotation
def array_normalize(a, dtype=None):
a = numpy.asarray(a, dtype)
return a / numpy.linalg.norm(a)
class TestVitalRotation (unittest.TestCase):
def test_new_default(self):
# That these even construct
rot_d = Rotation(ctypes.c_double)
nose.tools.assert_equal(rot_d._ctype, ctypes.c_double)
nose.tools.assert_equal(rot_d._spec, 'd')
rot_f = Rotation(ctypes.c_float)
nose.tools.assert_equal(rot_f._ctype, ctypes.c_float)
nose.tools.assert_equal(rot_f._spec, 'f')
def test_eq(self):
# Identities should equal
r1 = Rotation(ctypes.c_double)
r2 = Rotation(ctypes.c_double)
nose.tools.assert_equal(r1, r2)
r1 = Rotation(ctypes.c_float)
r2 = Rotation(ctypes.c_float)
nose.tools.assert_equal(r1, r2)
r1 = Rotation(ctypes.c_double)
r2 = Rotation(ctypes.c_float)
# r2 should get converted into a double instance for checking
nose.tools.assert_equal(r1, r2)
r1 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
r2 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
nose.tools.assert_equal(r1, r2)
r1 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
r2 = Rotation.from_quaternion([-1,-2,-3,-4], ctype=ctypes.c_double)
assert r1.angle_from(r2) < 1e-12
def test_to_matrix(self):
# Default value should be identity
rot_d = Rotation(ctypes.c_double)
numpy.testing.assert_array_equal(
rot_d.matrix(), numpy.eye(3)
)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_array_equal(
rot_f.matrix(), numpy.eye(3)
)
def test_to_quaternion(self):
rot_d = Rotation(ctypes.c_double)
numpy.testing.assert_array_equal(rot_d.quaternion(),
[[0],
[0],
[0],
[1]])
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_array_equal(rot_f.quaternion(),
[[0],
[0],
[0],
[1]])
def test_to_axis_angle(self):
# expected identity: [0,0,1] and 0
ident_axis = [[0],
[0],
[1]]
ident_angle = 0
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_equal(rot_d.axis(), ident_axis)
nose.tools.assert_equal(rot_d.angle(), ident_angle)
numpy.testing.assert_equal(rot_f.axis(), ident_axis)
nose.tools.assert_equal(rot_f.angle(), ident_angle)
def test_to_rodrigues(self):
# rodrigues identity: [0,0,0]
ident_rod = [[0],
[0],
[0]]
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
rod = rot_d.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
rod = rot_f.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
def test_to_ypr(self):
# ypr identity: (pi/2, 0, pi)
ident_ypr = (math.pi / 2, 0, -math.pi)
ident_ypr_float = map(lambda v: ctypes.c_float(v).value, ident_ypr)
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_equal(
rot_d.yaw_pitch_roll(),
ident_ypr
)
numpy.testing.assert_equal(
rot_f.yaw_pitch_roll(),
ident_ypr_float
)
def test_from_quaternion(self):
q = array_normalize([[+2],
[-1],
[-3],
[+0]], float)
r = Rotation.from_quaternion(q)
numpy.testing.assert_equal(
r.quaternion(), q
)
def test_from_rodrigues(self):
rod_list_1 = [[0],
[0],
[0]]
r1 = Rotation.from_rodrigues(rod_list_1)
numpy.testing.assert_equal(r1.rodrigues(), rod_list_1)
# This one will get normalized by magnitude in rotation instance
# This vector's is less than 2*pi, so we should expect this vector to be
# returned as is.
rod2 = numpy.array([[ 2],
[ -1],
[0.5]])
nod2_normed = array_normalize(rod2)
print 'r2 2-norm:', numpy.linalg.norm(rod2)
print 'r2-normed:', nod2_normed
r2 = Rotation.from_rodrigues(rod2)
numpy.testing.assert_array_almost_equal(
r2.rodrigues(), rod2,
decimal=14, # 1e-14
)
def test_from_aa(self):
# Axis should come out of rotation normalized
angle = 0.8
axis = numpy.array([[-3],
[2],
[1]])
axis_norm = array_normalize(axis)
r = Rotation.from_axis_angle(axis, angle)
nose.tools.assert_equal(angle, r.angle())
numpy.testing.assert_equal(axis_norm, r.axis())
def test_from_ypr(self):
y = 1.2
p = 0.3
r = -1.0
# XXX
rot = Rotation.from_ypr(y, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0XX
rot = Rotation.from_ypr(0, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# X0X
rot = Rotation.from_ypr(y, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# XX0
rot = Rotation.from_ypr(y, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 00X
rot = Rotation.from_ypr(0, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0X0
rot = Rotation.from_ypr(0, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# X00
rot = Rotation.from_ypr(y, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 000
rot = Rotation.from_ypr(0, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
def test_from_matrix(self):
# Create a non-identity matrix from a different constructor that we
# assume works
# Create new rotation with that matrix.
# New rotation to_matrix method should produce the same matrix
pre_r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]])
mat = pre_r.matrix()
r = Rotation.from_matrix(mat)
numpy.testing.assert_allclose(mat, r.matrix(), 1e-15)
def test_inverse(self):
# quaternion calc from:
# https://www.wolframalpha.com/input/?i=quaternion:+0%2B2i-j-3k&lk=3
r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]], ctype=ctypes.c_double)
r_inv = r.inverse()
e_inv = array_normalize([[-1/7.],
[+1/14.],
[+3/14.],
[0]])
numpy.testing.assert_allclose(
r_inv.quaternion(),
e_inv,
1e-15
)
r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]], ctype=ctypes.c_float)
r_inv = r.inverse()
numpy.testing.assert_allclose(
r_inv.quaternion(),
e_inv,
1e-7
)
def | (self):
# Normalize quaternaion vector.
expected_quat = array_normalize([[+2.],
[-1.],
[-3.],
[+0.]])
r_ident_d = Rotation(ctypes.c_double)
r_ident_f = Rotation(ctypes.c_float)
r_other_d = Rotation.from_quaternion(expected_quat, ctypes.c_double)
r_other_f = Rotation.from_quaternion(expected_quat, ctypes.c_float)
r_res_d = r_ident_d.compose(r_other_d)
nose.tools.assert_is_not(r_other_d, r_res_d)
numpy.testing.assert_equal(r_res_d, r_other_d)
numpy.testing.assert_equal(r_res_d.quaternion(), expected_quat)
r_res_f = r_ident_f.compose(r_other_f)
nose.tools.assert_is_not(r_other_f, r_res_f)
numpy.testing.assert_equal(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Should also work with multiply operator
r_res_d = r_ident_d * r_other_d
nose.tools.assert_is_not(r_other_d, r_res_d)
numpy.testing.assert_equal(r_res_d, r_other_d)
numpy.testing.assert_equal(r_res_d.quaternion(), expected_quat)
r_res_f = r_ident_f * r_other_f
nose.tools.assert_is_not(r_other_f, r_res_f)
numpy.testing.assert_equal(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Rotation of non-congruent types should be converted automatically
r_res_d = r_ident_d.compose(r_other_f)
nose.tools.assert_is_not(r_res_d, r_other_f)
numpy.testing.assert_allclose(r_res_d.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_d.quaternion(), expected_quat,
1e-7)
r_res_f = r_ident_f.compose(r_other_d)
nose.tools.assert_is_not(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Equality check between types should pass due to integrety resolution
# inside function.
r_res_d = r_ident_d * r_other_f
nose.tools.assert_is_not(r_res_d, r_other_f)
numpy.testing.assert_allclose(r_res_d.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_d.quaternion(), expected_quat,
1e-7)
r_res_f = r_ident_f * r_other_d
nose.tools.assert_is_not(r_res_f, r_other_f)
numpy.testing.assert_equal(r_res_f.quaternion(),
r_other_f.quaternion())
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
def test_rotation_vector(self):
vec = [[1],
[0],
[0]]
vec_expected = [[0],
[1],
[0]]
r_axis = [[0],
[0],
[1]]
r_angle = math.pi / 2.
r = Rotation.from_axis_angle(r_axis, r_angle)
vec_rotated = r.rotate_vector(vec)
numpy.testing.assert_array_almost_equal(vec_expected, vec_rotated)
# should be able to multiply a rotation as a left-hand side arg with a
# 3x1 vector as the right-hand side arg
vec_rotated = r * vec
numpy.testing.assert_array_almost_equal(vec_expected, vec_rotated)
def test_interpolation(self):
x_d = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_double)
y_d = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 2, ctypes.c_double)
r_d = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 4, ctypes.c_double)
x_f = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_float)
y_f = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 2, ctypes.c_float)
r_f = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 4, ctypes.c_float)
z_d = Rotation.interpolate(x_d, y_d, 0.5)
z_f = Rotation.interpolate(x_f, y_f, 0.5)
nose.tools.assert_almost_equal((z_d.inverse() * r_d).angle(), 0, 14)
nose.tools.assert_almost_equal((z_f.inverse() * r_f).angle(), 0, 6)
# Should auto-convert different y-type into x's type for computation.
# Return should be of x's type.
z_d = Rotation.interpolate(x_d, y_f, 0.5)
nose.tools.assert_is(z_d._ctype, x_d._ctype)
nose.tools.assert_is_not(z_d._ctype, y_f._ctype)
nose.tools.assert_almost_equal((z_d.inverse() * r_d).angle(), 0, 14)
z_f = Rotation.interpolate(x_f, y_d, 0.5)
nose.tools.assert_is(z_f._ctype, x_f._ctype)
nose.tools.assert_is_not(z_f._ctype, y_d._ctype)
nose.tools.assert_almost_equal((z_f.inverse() * r_f).angle(), 0, 6)
def test_interpolated_rotations(self):
x = Rotation.from_axis_angle([[1], [0], [0]], 0)
a = math.pi / 2
y = Rotation.from_axis_angle([[0], [1], [0]], a)
i_list = Rotation.interpolated_rotations(x, y, 3)
nose.tools.assert_equal([i._ctype for i in i_list],
[ctypes.c_double] * 3)
i0_e_axis, i0_e_angle = [[0], [1], [0]], a * 0.25
i1_e_axis, i1_e_angle = [[0], [1], [0]], a * 0.50
i2_e_axis, i2_e_angle = [[0], [1], [0]], a * 0.75
numpy.testing.assert_almost_equal(i_list[0].axis(), i0_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[0].angle(), i0_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[1].axis(), i1_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[1].angle(), i1_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[2].axis(), i2_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[2].angle(), i2_e_angle, 14)
# Mixed types
a = math.pi / 2
x = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_float)
y = Rotation.from_axis_angle([[0], [1], [0]], a)
i_list = Rotation.interpolated_rotations(x, y, 3)
nose.tools.assert_equal([i._ctype for i in i_list],
[ctypes.c_float] * 3)
numpy.testing.assert_almost_equal(i_list[0].axis(), i0_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[0].angle(), i0_e_angle, 6)
numpy.testing.assert_almost_equal(i_list[1].axis(), i1_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[1].angle(), i1_e_angle, 6)
numpy.testing.assert_almost_equal(i_list[2].axis(), i2_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[2].angle(), i2_e_angle, 6)
| test_compose |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.