filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/test_framework/util.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
VPUBD_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
vpubd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "vpub.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("server=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser' + str(n), 'rpcpass' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_vpubd_start(process, url, i):
'''
Wait for vpubd to start. This means that RPC is accessible and fully initialized.
Raise an exception if vpubd exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('vpubd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run vpubds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("VPUBD", "vpubd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: vpubd started, waiting for RPC to come up")
wait_for_vpubd_start(vpubd_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vpub.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a vpubd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("VPUBD", "vpubd")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()), "-regtest", "-sporkkey=923EhWh2bJHynX6d4Tqt2Q75bhTDCT1b4kff3qzDKDZHZ6pkQs7"]
if extra_args is not None: args.extend(extra_args)
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: phroed started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_vpubd_start(vpubd_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple vpubds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
vpubd_processes[i].wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
del vpubd_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
wait_vpubds()
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_vpubds():
# Wait for all vpubds to cleanly exit
for vpubd in vpubd_processes.values():
vpubd.wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
vpubd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.setgenerate(True, int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.setgenerate(True, 1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
|
[] |
[] |
[
"PYTHON_DEBUG",
"VPUBD"
] |
[]
|
["PYTHON_DEBUG", "VPUBD"]
|
python
| 2 | 0 | |
test/extended/util/test.go
|
package util
import (
"context"
"flag"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/gomega"
"k8s.io/klog/v2"
kapiv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
kclientset "k8s.io/client-go/kubernetes"
rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/generated"
// this appears to inexplicably auto-register global flags.
_ "k8s.io/kubernetes/test/e2e/storage/drivers"
projectv1 "github.com/openshift/api/project/v1"
securityv1client "github.com/openshift/client-go/security/clientset/versioned"
"github.com/openshift/origin/pkg/version"
)
var (
reportFileName string
syntheticSuite string
quiet bool
)
var TestContext *e2e.TestContextType = &e2e.TestContext
func InitStandardFlags() {
e2e.RegisterCommonFlags(flag.CommandLine)
e2e.RegisterClusterFlags(flag.CommandLine)
// replaced by a bare import above.
//e2e.RegisterStorageFlags()
}
func InitTest(dryRun bool) error {
InitDefaultEnvironmentVariables()
// interpret synthetic input in `--ginkgo.focus` and/or `--ginkgo.skip`
ginkgo.BeforeEach(checkSyntheticInput)
TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false"
TestContext.VerifyServiceAccount = true
testfiles.AddFileSource(testfiles.BindataFileSource{
Asset: generated.Asset,
AssetNames: generated.AssetNames,
})
TestContext.KubectlPath = "kubectl"
TestContext.KubeConfig = KubeConfigPath()
os.Setenv("KUBECONFIG", TestContext.KubeConfig)
// "debian" is used when not set. At least GlusterFS tests need "custom".
// (There is no option for "rhel" or "centos".)
TestContext.NodeOSDistro = "custom"
TestContext.MasterOSDistro = "custom"
// load and set the host variable for kubectl
if !dryRun {
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: TestContext.KubeConfig}, &clientcmd.ConfigOverrides{})
cfg, err := clientConfig.ClientConfig()
if err != nil {
return err
}
TestContext.Host = cfg.Host
}
reportFileName = os.Getenv("TEST_REPORT_FILE_NAME")
if reportFileName == "" {
reportFileName = "junit"
}
quiet = os.Getenv("TEST_OUTPUT_QUIET") == "true"
// Ensure that Kube tests run privileged (like they do upstream)
TestContext.CreateTestingNS = createTestingNS
klog.V(2).Infof("Extended test version %s", version.Get().String())
return nil
}
func ExecuteTest(t ginkgo.GinkgoTestingT, suite string) {
var r []ginkgo.Reporter
if dir := os.Getenv("TEST_REPORT_DIR"); len(dir) > 0 {
TestContext.ReportDir = dir
}
if TestContext.ReportDir != "" {
if err := os.MkdirAll(TestContext.ReportDir, 0755); err != nil {
klog.Errorf("Failed creating report directory: %v", err)
}
defer e2e.CoreDump(TestContext.ReportDir)
}
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = "Skipped"
}
gomega.RegisterFailHandler(ginkgo.Fail)
if TestContext.ReportDir != "" {
r = append(r, reporters.NewJUnitReporter(path.Join(TestContext.ReportDir, fmt.Sprintf("%s_%02d.xml", reportFileName, config.GinkgoConfig.ParallelNode))))
}
WithCleanup(func() {
if quiet {
r = append(r, NewSimpleReporter())
ginkgo.RunSpecsWithCustomReporters(t, suite, r)
} else {
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, suite, r)
}
})
}
var testsStarted bool
// requiresTestStart indicates this code should never be called from within init() or
// Ginkgo test definition.
//
// We explictly prevent Run() from outside of a test because it means that
// test initialization may be expensive. Tests should not vary definition
// based on a cluster, they should be static in definition. Always use framework.Skipf()
// if your test should not be run based on a dynamic condition of the cluster.
func requiresTestStart() {
if !testsStarted {
panic("May only be called from within a test case")
}
}
// WithCleanup instructs utility methods to move out of dry run mode so there are no side
// effects due to package initialization of Ginkgo tests, and then after the function
// completes cleans up any artifacts created by this project.
func WithCleanup(fn func()) {
testsStarted = true
// Initialize the fixture directory. If we were the ones to initialize it, set the env
// var so that child processes inherit this directory and take responsibility for
// cleaning it up after we exit.
fixtureDir, init := fixtureDirectory()
if init {
os.Setenv("OS_TEST_FIXTURE_DIR", fixtureDir)
defer func() {
os.Setenv("OS_TEST_FIXTURE_DIR", "")
os.RemoveAll(fixtureDir)
}()
}
fn()
}
// InitDefaultEnvironmentVariables makes sure certain required env vars are available
// in the case that extended tests are invoked directly via calls to ginkgo/extended.test
func InitDefaultEnvironmentVariables() {
if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 {
os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts"))
}
}
// isGoModulePath returns true if the packagePath reported by reflection is within a
// module and given module path. When go mod is in use, module and modulePath are not
// contiguous as they were in older golang versions with vendoring, so naive contains
// tests fail.
//
// historically: ".../vendor/k8s.io/kubernetes/test/e2e"
// go.mod: "k8s.io/[email protected]/test/e2e"
//
func isGoModulePath(packagePath, module, modulePath string) bool {
return regexp.MustCompile(fmt.Sprintf(`\b%s(@[^/]*|)/%s\b`, regexp.QuoteMeta(module), regexp.QuoteMeta(modulePath))).MatchString(packagePath)
}
func isOriginTest() bool {
return isGoModulePath(ginkgo.CurrentGinkgoTestDescription().FileName, "github.com/openshift/origin", "test")
}
func isKubernetesE2ETest() bool {
return isGoModulePath(ginkgo.CurrentGinkgoTestDescription().FileName, "k8s.io/kubernetes", "test/e2e")
}
func testNameContains(name string) bool {
return strings.Contains(ginkgo.CurrentGinkgoTestDescription().FullTestText, name)
}
func skipTestNamespaceCustomization() bool {
return testNameContains("should always delete fast") || testNameContains("should delete fast enough")
}
// createTestingNS ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs
func createTestingNS(baseName string, c kclientset.Interface, labels map[string]string) (*kapiv1.Namespace, error) {
if !strings.HasPrefix(baseName, "e2e-") {
baseName = "e2e-" + baseName
}
ns, err := e2e.CreateTestingNS(baseName, c, labels)
if err != nil {
return ns, err
}
// Add anyuid and privileged permissions for upstream tests
if strings.HasPrefix(baseName, "e2e-k8s-") || (isKubernetesE2ETest() && !skipTestNamespaceCustomization()) {
clientConfig, err := GetClientConfig(KubeConfigPath())
if err != nil {
return ns, err
}
securityClient, err := securityv1client.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
// add the "privileged" scc to ensure pods that explicitly
// request extra capabilities are not rejected
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "privileged")
// add the "anyuid" scc to ensure pods that don't specify a
// uid don't get forced into a range (mimics upstream
// behavior)
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "anyuid")
// add the "hostmount-anyuid" scc to ensure pods using hostPath
// can execute tests
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "hostmount-anyuid")
// The intra-pod test requires that the service account have
// permission to retrieve service endpoints.
rbacClient, err := rbacv1client.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
addRoleToE2EServiceAccounts(rbacClient, []kapiv1.Namespace{*ns}, "view")
// in practice too many kube tests ignore scheduling constraints
allowAllNodeScheduling(c, ns.Name)
}
return ns, err
}
// checkSyntheticInput selects tests based on synthetic skips or focuses
func checkSyntheticInput() {
checkSuiteSkips()
}
// checkSuiteSkips ensures Origin/Kubernetes synthetic skip labels are applied
// DEPRECATED: remove in a future release
func checkSuiteSkips() {
switch {
case isOriginTest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Origin") {
ginkgo.Skip("skipping all openshift/origin tests")
}
case isKubernetesE2ETest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Kubernetes") {
ginkgo.Skip("skipping all k8s.io/kubernetes tests")
}
}
}
var longRetry = wait.Backoff{Steps: 100}
// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto.
func allowAllNodeScheduling(c kclientset.Interface, namespace string) {
err := retry.RetryOnConflict(longRetry, func() error {
ns, err := c.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{})
if err != nil {
return err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
ns.Annotations[projectv1.ProjectNodeSelector] = ""
_, err = c.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{})
return err
})
if err != nil {
FatalErr(err)
}
}
func addE2EServiceAccountsToSCC(securityClient securityv1client.Interface, namespaces []kapiv1.Namespace, sccName string) {
// Because updates can race, we need to set the backoff retries to be > than the number of possible
// parallel jobs starting at once. Set very high to allow future high parallelism.
err := retry.RetryOnConflict(longRetry, func() error {
scc, err := securityClient.SecurityV1().SecurityContextConstraints().Get(context.Background(), sccName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
return nil
}
return err
}
for _, ns := range namespaces {
if isE2ENamespace(ns.Name) {
scc.Groups = append(scc.Groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
}
}
if _, err := securityClient.SecurityV1().SecurityContextConstraints().Update(context.Background(), scc, metav1.UpdateOptions{}); err != nil {
return err
}
return nil
})
if err != nil {
FatalErr(err)
}
}
func isE2ENamespace(ns string) bool {
return true
//return strings.HasPrefix(ns, "e2e-") ||
// strings.HasPrefix(ns, "aggregator-") ||
// strings.HasPrefix(ns, "csi-") ||
// strings.HasPrefix(ns, "deployment-") ||
// strings.HasPrefix(ns, "disruption-") ||
// strings.HasPrefix(ns, "gc-") ||
// strings.HasPrefix(ns, "kubectl-") ||
// strings.HasPrefix(ns, "proxy-") ||
// strings.HasPrefix(ns, "provisioning-") ||
// strings.HasPrefix(ns, "statefulset-") ||
// strings.HasPrefix(ns, "services-")
}
func addRoleToE2EServiceAccounts(rbacClient rbacv1client.RbacV1Interface, namespaces []kapiv1.Namespace, roleName string) {
err := retry.RetryOnConflict(longRetry, func() error {
for _, ns := range namespaces {
if isE2ENamespace(ns.Name) && ns.Status.Phase != kapiv1.NamespaceTerminating {
_, err := rbacClient.RoleBindings(ns.Name).Create(context.Background(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{GenerateName: "default-" + roleName, Namespace: ns.Name},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: roleName,
},
Subjects: []rbacv1.Subject{
{Name: "default", Namespace: ns.Name, Kind: rbacv1.ServiceAccountKind},
},
}, metav1.CreateOptions{})
if err != nil {
e2e.Logf("Warning: Failed to add role to e2e service account: %v", err)
}
}
}
return nil
})
if err != nil {
FatalErr(err)
}
}
|
[
"\"DELETE_NAMESPACE\"",
"\"TEST_REPORT_FILE_NAME\"",
"\"TEST_OUTPUT_QUIET\"",
"\"TEST_REPORT_DIR\"",
"\"ARTIFACT_DIR\""
] |
[] |
[
"DELETE_NAMESPACE",
"TEST_REPORT_DIR",
"TEST_REPORT_FILE_NAME",
"TEST_OUTPUT_QUIET",
"ARTIFACT_DIR"
] |
[]
|
["DELETE_NAMESPACE", "TEST_REPORT_DIR", "TEST_REPORT_FILE_NAME", "TEST_OUTPUT_QUIET", "ARTIFACT_DIR"]
|
go
| 5 | 0 | |
examples/tci/v20190318/SubmitTraditionalClassTask.go
|
package main
import (
"fmt"
"github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/common"
"github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/common/errors"
"github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/common/profile"
tci "github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/tci/v20190318"
)
func main() {
credential := common.NewCredential(
// os.Getenv("TENCENTCLOUD_SECRET_ID"),
// os.Getenv("TENCENTCLOUD_SECRET_KEY"),
"", "",
)
cpf := profile.NewClientProfile()
cpf.HttpProfile.ReqMethod = "POST"
cpf.HttpProfile.ReqTimeout = 30
cpf.HttpProfile.Endpoint = "tci.tencentcloudapi.com"
client, _ := tci.NewClient(credential, "ap-guangzhou", cpf)
req := tci.NewSubmitTraditionalClassTaskRequest()
req.FileContent = common.StringPtr("https://edu-test-1253131631.cos.ap-guangzhou.myqcloud.com/aieduautotest/autotest_vedio.mp4")
req.FileType = common.StringPtr("vod_url")
req.LibrarySet = common.StringPtrs([]string{"library_15603955264181591716"})
// 通过client对象调用想要访问的接口,需要传入请求对象
response, err := client.SubmitTraditionalClassTask(req)
// 处理异常
fmt.Println(err)
if _, ok := err.(*errors.TencentCloudSDKError); ok {
fmt.Printf("An API error has returned: %s", err)
return
}
// 非SDK异常,直接失败。实际代码中可以加入其他的处理。
if err != nil {
panic(err)
}
// 打印返回的json字符串
fmt.Printf("%s", response.ToJsonString())
}
|
[
"\"TENCENTCLOUD_SECRET_ID\"",
"\"TENCENTCLOUD_SECRET_KEY\""
] |
[] |
[
"TENCENTCLOUD_SECRET_ID",
"TENCENTCLOUD_SECRET_KEY"
] |
[]
|
["TENCENTCLOUD_SECRET_ID", "TENCENTCLOUD_SECRET_KEY"]
|
go
| 2 | 0 | |
ssolib/auth.go
|
package ssolib
import (
"errors"
"html/template"
"net/http"
"os"
"strconv"
"github.com/RangelReale/osin"
"github.com/go-sql-driver/mysql"
"github.com/mijia/sweb/log"
"github.com/mijia/sweb/server"
"golang.org/x/net/context"
"github.com/laincloud/sso/ssolib/models/app"
"github.com/laincloud/sso/ssolib/models/iuser"
"github.com/laincloud/sso/ssolib/models/oauth2"
)
var loginTemplate *template.Template
func init() {
templatesPath := os.Getenv("TEMPLATES_PATH")
if templatesPath == "" {
templatesPath = "./templates"
}
loginTemplate = template.Must(template.ParseFiles(templatesPath + "/login.html"))
}
// var loginTemplate = template.Must(template.New("login").Parse(`
// <html><body>
// <p>{{.ClientName}} is requesting your permission to access your information</p>
// {{if .Scopes}}
// Requested permissions:
// <ul>
// {{range .Scopes}}
// <li>{{.}}</li>
// {{end}}
// </ul>
// {{end}}
// <form action="{{.FormURL}}" method="POST">
// Login: <input type="text" name="login" /><br/>
// Password: <input type="password" name="password" /><br/>
// <input type="submit" />
// {{if .Err }}<div class="error">{{.Err}}</div>{{end}}
// </form>
// </body></html>
// `))
// Why not use strings.Split directly? Because it can only handle single space delimeter.
var (
reverse = func(s *Server, route string, params ...interface{}) string {
return s.Reverse(route, params...)
}
)
type loginTemplateContext struct {
ClientName string
FormURL string
Err error
Scopes []string
}
func (s *Server) AuthorizationEndpoint(ctx context.Context, w http.ResponseWriter, r *http.Request) context.Context {
log.Debugf("sso_debug: sso_oauth_auth api begin.")
oauth2p := getOAuth2Provider(ctx)
resp := oauth2p.NewResponse()
defer func() {
if resp.IsError && resp.InternalError != nil {
log.Error(resp.InternalError)
}
resp.Close()
}()
// response_type 必须带 code, 或者 token 之一,当 response_type 带有 token 时,可以有 id_token
// 换句话说,当前的 response_type 只有三种情况
// "token" "code" "token id_token"
r.ParseForm()
oidc := false
res_type := osin.AuthorizeRequestType(r.Form.Get("response_type"))
if IsAuthorizeRequestTypeEqual(res_type, TOKEN_IDTOKEN) {
oidc = true
// 因为 TOKEN_IDTOKEN 的处理逻辑和 token 的处理逻辑有相同之处,这里使用 token 传入 osin 减少重复代码
r.Form.Set("response_type", string(osin.TOKEN))
res_type = osin.TOKEN
}
ar := oauth2p.HandleAuthorizeRequest(resp, r)
if ar == nil {
http.Error(w, "Page not found", http.StatusNotFound)
return ctx
}
mctx := getModelContext(ctx)
var tmplContextErr error
if r.Method == "POST" {
login, password := r.FormValue("login"), r.FormValue("password")
log.Debugf("sso_debug: sso_oauth_auth api load info from db begin.")
ub := getUserBackend(ctx)
if s.queryUser { // for detail errors of login, i.e. "no such user"
u, err := ub.GetUserByFeature(login)
log.Debug(u)
if err != nil {
log.Debugf("sso_debug: sso_oauth_auth api load user info from db fail.")
if err == iuser.ErrUserNotFound {
tmplContextErr = errors.New("No such user")
} else {
if mysqlError, ok := err.(*mysql.MySQLError); ok {
if mysqlError.Number == 1267 {
// for "Illegal mix of collations (latin1_swedish_ci,IMPLICIT) and (utf8_general_ci,COERCIBLE) for operation '='"
log.Info(err.Error())
tmplContextErr = errors.New("No such user")
} else {
panic(err)
}
} else {
panic(err)
}
}
} else if ok, _ := ub.AuthPassword(u.GetSub(), password); ok {
if res_type == osin.CODE {
ar.UserData = oauth2.AuthorizeUserData{UserId: u.GetId()}
} else if res_type == osin.TOKEN {
ar.UserData = oauth2.AccessUserData{UserId: u.GetId()}
} else {
panic("unknown response_type and osin didn't handle it")
}
ar.Authorized = true
oauth2p.FinishAuthorizeRequest(resp, r, ar)
if oidc {
setIDTokenInResponseOutput(ctx, resp, r.Form.Get("client_id"),
u.GetId(), r.Form.Get("nonce"), resp.Output["access_token"].(string))
}
osin.OutputJSON(resp, w, r)
log.Debugf("sso_debug: sso_oauth_auth api load info from db end.")
return ctx
} else {
tmplContextErr = errors.New("incorrect password")
}
log.Debugf("sso_debug: sso_oauth_auth api load info from db end.")
} else { // only gives "no such user or incorrect password "
if ok, u, err := ub.AuthPasswordByFeature(login, password); ok {
if res_type == osin.CODE {
ar.UserData = oauth2.AuthorizeUserData{UserId: u.GetId()}
} else if res_type == osin.TOKEN {
ar.UserData = oauth2.AccessUserData{UserId: u.GetId()}
} else {
panic("unknown response_type and osin didn't handle it")
}
ar.Authorized = true
oauth2p.FinishAuthorizeRequest(resp, r, ar)
if oidc {
setIDTokenInResponseOutput(ctx, resp, r.Form.Get("client_id"),
u.GetId(), r.Form.Get("nonce"), resp.Output["access_token"].(string))
}
osin.OutputJSON(resp, w, r)
log.Debugf("sso_debug: sso_oauth_auth api load info from db end.")
return ctx
} else {
log.Debug(err)
tmplContextErr = errors.New("user does not exist or password is incorrect")
}
}
}
appIdString := ar.Client.GetId()
appId, err := strconv.Atoi(appIdString)
if err != nil {
http.Error(w, "Client not found", http.StatusNotFound)
return ctx
}
log.Debugf("sso_debug: sso_oauth_auth api begin load app info from db.")
ap, err := app.GetApp(mctx, appId)
if err != nil {
if err == app.ErrAppNotFound {
http.Error(w, "Client not found", http.StatusNotFound)
} else {
http.Error(w, "Error when verify client", http.StatusInternalServerError)
}
return ctx
}
log.Debugf("sso_debug: sso_oauth_auth api end load app info from db.")
tmplContext := loginTemplateContext{
ClientName: ap.FullName,
FormURL: reverse(s, "PostLoginForm") + "?" + r.URL.RawQuery,
Err: tmplContextErr,
Scopes: split(ar.Scope),
}
s.renderHtmlTemplate(w, loginTemplate, tmplContext)
log.Debugf("sso_debug: sso_oauth_auth api end.")
return ctx
}
type AuthenticateWare struct {
}
func NewAuthenticateMiddleware() server.Middleware {
return &AuthenticateWare{}
}
func (aw *AuthenticateWare) ServeHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, next server.Handler) context.Context {
log.Debugf("sso_debug: sso_server_http api begin")
if _, ok := r.Header["Authorization"]; !ok && r.FormValue("access_token") == "" {
// No auth info
return next(ctx, w, r)
}
user, scope := func() (iuser.User, string) {
oauth2p := getOAuth2Provider(ctx)
resp := oauth2p.NewResponse()
defer resp.Close()
log.Debugf("sso_debug: sso_server_http parse request begin")
ir := oauth2p.HandleInfoRequest(resp, r)
if ir == nil {
return nil, ""
}
userData, ok := ir.AccessData.UserData.(oauth2.AccessUserData)
if !ok {
panic("Load userinfo failed")
}
log.Debugf("sso_debug: sso_server_http parse request end")
log.Debugf("sso_debug: sso_server_http get user from db begin")
ub := getUserBackend(ctx)
user, err := ub.GetUser(userData.UserId)
if err != nil {
panic(err)
}
log.Debugf("sso_debug: sso_server_http get user from db end")
return user, ir.AccessData.Scope
}()
if user != nil {
ctx = context.WithValue(ctx, "user", user)
ctx = context.WithValue(ctx, "scope", split(scope))
}
log.Debugf("sso_debug: sso_server_http api end")
return next(ctx, w, r)
}
func requireLogin(ctx context.Context) error {
user := getCurrentUser(ctx)
if user == nil {
return errors.New("require login in")
}
return nil
}
func requireScope(ctx context.Context, scope string) error {
err := requireLogin(ctx)
if err != nil {
return err
}
scopes := getScope(ctx)
if scopes != nil {
for _, s := range scopes {
if s == scope {
return nil
}
}
}
return errors.New("require scope: " + scope)
}
|
[
"\"TEMPLATES_PATH\""
] |
[] |
[
"TEMPLATES_PATH"
] |
[]
|
["TEMPLATES_PATH"]
|
go
| 1 | 0 | |
lib/gh.py
|
import os
from github import Github
def create_issue(title, body):
try:
github_client = Github(os.getenv("GITHUB_ACCESS_TOKEN"))
repo = github_client.get_repo(os.getenv("GITHUB_REPO"))
label = repo.get_label("bug")
repo.create_issue(title=title, body=body, labels=[label])
except Exception as e:
print("Error when creating GitHub issue (check your token): {}".format(e))
|
[] |
[] |
[
"GITHUB_ACCESS_TOKEN",
"GITHUB_REPO"
] |
[]
|
["GITHUB_ACCESS_TOKEN", "GITHUB_REPO"]
|
python
| 2 | 0 | |
examples/text_summarization/prophetnet/evaluate/gigaword/bs_pyrouge.py
|
from __future__ import print_function, unicode_literals, division
import codecs
import logging
import os
import platform
import re
from functools import partial
from subprocess import check_output
from tempfile import mkdtemp
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from pyrouge.utils import log
from pyrouge.utils.file_utils import verify_dir
REMAP = {
"-lrb-": "(",
"-rrb-": ")",
"-lcb-": "{",
"-rcb-": "}",
"-lsb-": "[",
"-rsb-": "]",
"``": '"',
"''": '"'
}
def clean(x):
return re.sub(r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(clean(output_string.lower()))
logger.info("Saved processed files to {}.".format(output_dir))
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
self.temp_dir = temp_dir
self.log = log.get_global_console_logger()
self.log.setLevel(logging.WARNING)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
def sent_split_to_string(s):
return "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(input_dir, output_dir,
Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(
i=i, text=sent) for i, sent in enumerate(
sentences, start=1)
]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(
title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir,
system_filename_pattern,
model_dir,
model_filename_pattern,
config_file_path,
system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = [model_filename_pattern.replace('#ID#', id)]
# model_filenames = Rouge155.__get_model_filenames_for_id(
# id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception("Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id, system_dir, system_filename, model_dir,
model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp(dir=self.temp_dir)
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern, self._model_dir,
self._model_filename_pattern, self._config_file, system_id)
self.log.info("Written ROUGE configuration to {}".format(
self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
self.log.info("Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self,
system_id=1,
split_sentences=False,
rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
# 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception("ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(task_id, system_id, system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = [
"<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)
]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir,
model_elems=model_elems,
peer_root=system_dir,
peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp(dir=self.temp_dir)
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info("Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir,
new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e',
self._data_dir,
'-c',
95,
# '-2',
# '-1',
# '-U',
'-m',
# '-v',
'-r',
1000,
'-n',
2,
# '-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
cmd/build-go-service/main.go
|
// Public Domain (-) 2018-present, The Elko Authors.
// See the Elko UNLICENSE file for details.
package main
import (
"os"
"path/filepath"
"runtime"
"strings"
"github.com/tav/golly/log"
"github.com/tav/golly/optparse"
"github.com/tav/golly/process"
)
var (
gobin string
pkgPath string
pkgPaths []string
srcPath string
)
func initGo() {
goroot := runtime.GOROOT()
gobin = filepath.Join(goroot, "bin", "go")
gopath := os.Getenv("GOPATH")
paths := strings.Split(gopath, string(os.PathListSeparator))
if len(paths) == 0 {
log.Fatalf("Invalid value for the GOPATH environment variable: %q", gopath)
}
gopath = paths[0]
if gopath == "" {
log.Fatalf("Invalid value for the GOPATH environment variable: %q", gopath)
}
osArch := runtime.GOOS + "_" + runtime.GOARCH
pkgPath = filepath.Join(gopath, "pkg", osArch)
pkgPaths = []string{
filepath.Join(goroot, "pkg", osArch), pkgPath,
}
srcPath = filepath.Join(gopath, "src")
}
func main() {
opts := optparse.New("Usage: build-go-service [OPTIONS] PATH\n")
opts.SetVersion("0.0.1")
goimports := opts.Flags("-g", "--goimports").Label("FILE").String(
"Path to the .goimports file")
installDeps := opts.Flags("-i", "--install-deps").Bool(
"Install the dependencies specified by the .goimports file")
output := opts.Flags("-o", "--output").Label("FILE").String(
"Path to output the generated binary")
os.Args[0] = "build-go-service"
args := opts.Parse(os.Args)
initGo()
if *installDeps {
Install(*goimports)
} else if len(args) == 0 {
opts.PrintUsage()
process.Exit(1)
} else {
Build(args[0], *output, *goimports)
}
process.Exit(0)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
sample-operators/mysql-schema/src/main/java/io/javaoperatorsdk/operator/sample/MySQLSchemaReconciler.java
|
package io.javaoperatorsdk.operator.sample;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.fabric8.kubernetes.api.model.Secret;
import io.javaoperatorsdk.operator.api.reconciler.Context;
import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusHandler;
import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusUpdateControl;
import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
import io.javaoperatorsdk.operator.sample.dependent.SchemaDependentResource;
import io.javaoperatorsdk.operator.sample.dependent.SecretDependentResource;
import io.javaoperatorsdk.operator.sample.schema.Schema;
import static io.javaoperatorsdk.operator.sample.dependent.SchemaDependentResource.decode;
import static io.javaoperatorsdk.operator.sample.dependent.SecretDependentResource.MYSQL_SECRET_USERNAME;
import static java.lang.String.format;
@ControllerConfiguration(
dependents = {
@Dependent(type = SecretDependentResource.class),
@Dependent(type = SchemaDependentResource.class, name = SchemaDependentResource.NAME)
})
public class MySQLSchemaReconciler
implements Reconciler<MySQLSchema>, ErrorStatusHandler<MySQLSchema> {
static final Logger log = LoggerFactory.getLogger(MySQLSchemaReconciler.class);
public MySQLSchemaReconciler() {}
@Override
public UpdateControl<MySQLSchema> reconcile(MySQLSchema schema, Context<MySQLSchema> context) {
// we only need to update the status if we just built the schema, i.e. when it's present in the
// context
Secret secret = context.getSecondaryResource(Secret.class).orElseThrow();
return context.getSecondaryResource(Schema.class, SchemaDependentResource.NAME).map(s -> {
updateStatusPojo(schema, s, secret.getMetadata().getName(),
decode(secret.getData().get(MYSQL_SECRET_USERNAME)));
log.info("Schema {} created - updating CR status", s.getName());
return UpdateControl.patchStatus(schema);
}).orElse(UpdateControl.noUpdate());
}
@Override
public ErrorStatusUpdateControl<MySQLSchema> updateErrorStatus(MySQLSchema schema,
Context<MySQLSchema> context,
Exception e) {
SchemaStatus status = new SchemaStatus();
status.setUrl(null);
status.setUserName(null);
status.setSecretName(null);
status.setStatus("ERROR: " + e.getMessage());
schema.setStatus(status);
return ErrorStatusUpdateControl.updateStatus(schema);
}
private void updateStatusPojo(MySQLSchema mySQLSchema, Schema schema, String secretName,
String userName) {
SchemaStatus status = new SchemaStatus();
status.setUrl(
format(
"jdbc:mysql://%1$s/%2$s",
System.getenv("MYSQL_HOST"), schema.getName()));
status.setUserName(userName);
status.setSecretName(secretName);
status.setStatus("CREATED");
mySQLSchema.setStatus(status);
}
}
|
[
"\"MYSQL_HOST\""
] |
[] |
[
"MYSQL_HOST"
] |
[]
|
["MYSQL_HOST"]
|
java
| 1 | 0 | |
vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go
|
package couchdb
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/physical"
dockertest "gopkg.in/ory-am/dockertest.v3"
)
func TestCouchDBBackend(t *testing.T) {
cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t)
defer cleanup()
logger := logging.NewVaultLogger(log.Debug)
b, err := NewCouchDBBackend(map[string]string{
"endpoint": endpoint,
"username": username,
"password": password,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}
func TestTransactionalCouchDBBackend(t *testing.T) {
cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t)
defer cleanup()
logger := logging.NewVaultLogger(log.Debug)
b, err := NewTransactionalCouchDBBackend(map[string]string{
"endpoint": endpoint,
"username": username,
"password": password,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}
func prepareCouchdbDBTestContainer(t *testing.T) (cleanup func(), retAddress, username, password string) {
// If environment variable is set, assume caller wants to target a real
// DynamoDB.
if os.Getenv("COUCHDB_ENDPOINT") != "" {
return func() {}, os.Getenv("COUCHDB_ENDPOINT"), os.Getenv("COUCHDB_USERNAME"), os.Getenv("COUCHDB_PASSWORD")
}
pool, err := dockertest.NewPool("")
if err != nil {
t.Fatalf("Failed to connect to docker: %s", err)
}
resource, err := pool.Run("couchdb", "1.6", []string{})
if err != nil {
t.Fatalf("Could not start local DynamoDB: %s", err)
}
retAddress = "http://localhost:" + resource.GetPort("5984/tcp")
cleanup = func() {
err := pool.Purge(resource)
if err != nil {
t.Fatalf("Failed to cleanup local DynamoDB: %s", err)
}
}
// exponential backoff-retry, because the couchDB may not be able to accept
// connections yet
if err := pool.Retry(func() error {
var err error
resp, err := http.Get(retAddress)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("expected couchdb to return status code 200, got (%s) instead.", resp.Status)
}
return nil
}); err != nil {
t.Fatalf("Could not connect to docker: %s", err)
}
dbName := fmt.Sprintf("vault-test-%d", time.Now().Unix())
{
req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", retAddress, dbName), nil)
if err != nil {
t.Fatalf("Could not create create database request: %q", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Could not create database: %q", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bs, _ := ioutil.ReadAll(resp.Body)
t.Fatalf("Failed to create database: %s %s\n", resp.Status, string(bs))
}
}
{
req, err := http.NewRequest("PUT", fmt.Sprintf("%s/_config/admins/admin", retAddress), strings.NewReader(`"admin"`))
if err != nil {
t.Fatalf("Could not create admin user request: %q", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Could not create admin user: %q", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bs, _ := ioutil.ReadAll(resp.Body)
t.Fatalf("Failed to create admin user: %s %s\n", resp.Status, string(bs))
}
}
return cleanup, retAddress + "/" + dbName, "admin", "admin"
}
|
[
"\"COUCHDB_ENDPOINT\"",
"\"COUCHDB_ENDPOINT\"",
"\"COUCHDB_USERNAME\"",
"\"COUCHDB_PASSWORD\""
] |
[] |
[
"COUCHDB_USERNAME",
"COUCHDB_PASSWORD",
"COUCHDB_ENDPOINT"
] |
[]
|
["COUCHDB_USERNAME", "COUCHDB_PASSWORD", "COUCHDB_ENDPOINT"]
|
go
| 3 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('..'))
import sphinx_rtd_theme
from builtins import str
import re
import subprocess
import os
# -- Project information -----------------------------------------------------
project = u'NVIDIA DALI'
copyright = u'2018, NVIDIA Corporation'
author = u'NVIDIA Corporation'
version_long = u'0.0.0'
with open("../VERSION") as f:
version_long = f.readline()
version_short = re.match('^[\d]+\.[\d]+', version_long).group(0)
git_sha = os.getenv("GIT_SHA")
if not git_sha:
try:
git_sha = subprocess.check_output(["git", "log", "--pretty=format:'%h'", "-n1"]).decode('ascii').replace("'","").strip()
except:
git_sha = u'0000000'
git_sha = git_sha[:7] if len(git_sha) > 7 else git_sha
version = str(version_long + u"-" + git_sha)
# The full version, including alpha/beta/rc tags
release = str(version_long)
# generate table of supported operators and their devices
subprocess.call(["python", "supported_op_devices.py", "op_inclusion"])
# hack: version is used for html creation, so put the version picker
# link here as well:
version = version + """<br/>
Version select: <select onChange="window.location.href = this.value" onFocus="this.selectedIndex = -1">
<option value="https://docs.nvidia.com/deeplearning/sdk/dali-developer-guide/">Current release</option>
<option value="https://docs.nvidia.com/deeplearning/sdk/dali-master-branch-user-guide/docs/">master (unstable)</option>
<option value="https://docs.nvidia.com/deeplearning/sdk/dali-archived/index.html">Older releases</option>
</select>"""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.ifconfig',
'sphinx.ext.extlinks',
'nbsphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': 'https://docs.nvidia.com/deeplearning/sdk/dali-developer-guide/',
'collapse_navigation': False,
'display_version': True,
'logo_only': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NVIDIADALIdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NVIDIADALI.tex', u'NVIDIA DALI Documentation',
u'NVIDIA Corporation', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nvidiadali', u'NVIDIA DALI Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NVIDIADALI', u'NVIDIA DALI Documentation',
author, 'NVIDIADALI', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
extlinks = {'issue': ('https://github.com/NVIDIA/DALI/issues/%s',
'issue '),
'fileref': ('https://github.com/NVIDIA/DALI/tree/' + (git_sha if git_sha != u'0000000' else "master") + '/%s', ''),}
|
[] |
[] |
[
"GIT_SHA"
] |
[]
|
["GIT_SHA"]
|
python
| 1 | 0 | |
tests/test_mag_orms.py
|
import pytest
import unittest
import os
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from ai_research.mag.mag_orm import Base
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
class TestMag(unittest.TestCase):
"""Check that the MAG ORM works as expected"""
engine = create_engine(os.getenv("test_postgresdb"))
Session = sessionmaker(engine)
def setUp(self):
"""Create the temporary table"""
Base.metadata.create_all(self.engine)
def tearDown(self):
"""Drop the temporary table"""
Base.metadata.drop_all(self.engine)
def test_build(self):
pass
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"test_postgresdb"
] |
[]
|
["test_postgresdb"]
|
python
| 1 | 0 | |
cmd/siad/daemon.go
|
package main
import (
"errors"
"fmt"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"gitlab.com/NebulousLabs/Sia/build"
"gitlab.com/NebulousLabs/Sia/crypto"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/profile"
mnemonics "gitlab.com/NebulousLabs/entropy-mnemonics"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
)
// passwordPrompt securely reads a password from stdin.
func passwordPrompt(prompt string) (string, error) {
fmt.Print(prompt)
pw, err := terminal.ReadPassword(int(syscall.Stdin))
fmt.Println()
return string(pw), err
}
// verifyAPISecurity checks that the security values are consistent with a
// sane, secure system.
func verifyAPISecurity(config Config) error {
// Make sure that only the loopback address is allowed unless the
// --disable-api-security flag has been used.
if !config.Siad.AllowAPIBind {
addr := modules.NetAddress(config.Siad.APIaddr)
if !addr.IsLoopback() {
if addr.Host() == "" {
return fmt.Errorf("a blank host will listen on all interfaces, did you mean localhost:%v?\nyou must pass --disable-api-security to bind Siad to a non-localhost address", addr.Port())
}
return errors.New("you must pass --disable-api-security to bind Siad to a non-localhost address")
}
return nil
}
// If the --disable-api-security flag is used, enforce that
// --authenticate-api must also be used.
if config.Siad.AllowAPIBind && !config.Siad.AuthenticateAPI {
return errors.New("cannot use --disable-api-security without setting an api password")
}
return nil
}
// processNetAddr adds a ':' to a bare integer, so that it is a proper port
// number.
func processNetAddr(addr string) string {
_, err := strconv.Atoi(addr)
if err == nil {
return ":" + addr
}
return addr
}
// processModules makes the modules string lowercase to make checking if a
// module in the string easier, and returns an error if the string contains an
// invalid module character.
func processModules(modules string) (string, error) {
modules = strings.ToLower(modules)
validModules := "cghmrtwe"
invalidModules := modules
for _, m := range validModules {
invalidModules = strings.Replace(invalidModules, string(m), "", 1)
}
if len(invalidModules) > 0 {
return "", errors.New("Unable to parse --modules flag, unrecognized or duplicate modules: " + invalidModules)
}
return modules, nil
}
// processProfileFlags checks that the flags given for profiling are valid.
func processProfileFlags(profile string) (string, error) {
profile = strings.ToLower(profile)
validProfiles := "cmt"
invalidProfiles := profile
for _, p := range validProfiles {
invalidProfiles = strings.Replace(invalidProfiles, string(p), "", 1)
}
if len(invalidProfiles) > 0 {
return "", errors.New("Unable to parse --profile flags, unrecognized or duplicate flags: " + invalidProfiles)
}
return profile, nil
}
// processConfig checks the configuration values and performs cleanup on
// incorrect-but-allowed values.
func processConfig(config Config) (Config, error) {
var err1, err2 error
config.Siad.APIaddr = processNetAddr(config.Siad.APIaddr)
config.Siad.RPCaddr = processNetAddr(config.Siad.RPCaddr)
config.Siad.HostAddr = processNetAddr(config.Siad.HostAddr)
config.Siad.Modules, err1 = processModules(config.Siad.Modules)
config.Siad.Profile, err2 = processProfileFlags(config.Siad.Profile)
err3 := verifyAPISecurity(config)
err := build.JoinErrors([]error{err1, err2, err3}, ", and ")
if err != nil {
return Config{}, err
}
return config, nil
}
// unlockWallet is called on siad startup and attempts to automatically
// unlock the wallet with the given password string.
func unlockWallet(w modules.Wallet, password string) error {
var validKeys []crypto.TwofishKey
dicts := []mnemonics.DictionaryID{"english", "german", "japanese"}
for _, dict := range dicts {
seed, err := modules.StringToSeed(password, dict)
if err != nil {
continue
}
validKeys = append(validKeys, crypto.TwofishKey(crypto.HashObject(seed)))
}
validKeys = append(validKeys, crypto.TwofishKey(crypto.HashObject(password)))
for _, key := range validKeys {
if err := w.Unlock(key); err == nil {
return nil
}
}
return modules.ErrBadEncryptionKey
}
// startDaemon uses the config parameters to initialize Sia modules and start
// siad.
func startDaemon(config Config) (err error) {
if config.Siad.AuthenticateAPI {
password := os.Getenv("SIA_API_PASSWORD")
if password != "" {
fmt.Println("Using SIA_API_PASSWORD environment variable")
config.APIPassword = password
} else {
// Prompt user for API password.
config.APIPassword, err = passwordPrompt("Enter API password: ")
if err != nil {
return err
}
if config.APIPassword == "" {
return errors.New("password cannot be blank")
}
}
}
// Print the siad Version and GitRevision
fmt.Println("Sia Daemon v" + build.Version)
if build.GitRevision == "" {
fmt.Println("WARN: compiled without build commit or version. To compile correctly, please use the makefile")
} else {
fmt.Println("Git Revision " + build.GitRevision)
}
// Install a signal handler that will catch exceptions thrown by mmap'd
// files.
// NOTE: ideally we would catch SIGSEGV here too, since that signal can
// also be thrown by an mmap I/O error. However, SIGSEGV can occur under
// other circumstances as well, and in those cases, we will want a full
// stack trace.
mmapChan := make(chan os.Signal, 1)
signal.Notify(mmapChan, syscall.SIGBUS)
go func() {
<-mmapChan
fmt.Println("A fatal I/O exception (SIGBUS) has occurred.")
fmt.Println("Please check your disk for errors.")
os.Exit(1)
}()
// Print a startup message.
fmt.Println("Loading...")
loadStart := time.Now()
srv, err := NewServer(config)
if err != nil {
return err
}
errChan := make(chan error)
go func() {
errChan <- srv.Serve()
}()
err = srv.loadModules()
if err != nil {
return err
}
// listen for kill signals
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, os.Kill, syscall.SIGTERM)
// Print a 'startup complete' message.
startupTime := time.Since(loadStart)
fmt.Println("Finished loading in", startupTime.Seconds(), "seconds")
// wait for Serve to return or for kill signal to be caught
err = func() error {
select {
case err := <-errChan:
return err
case <-sigChan:
fmt.Println("\rCaught stop signal, quitting...")
return srv.Close()
}
}()
if err != nil {
build.Critical(err)
}
return nil
}
// startDaemonCmd is a passthrough function for startDaemon.
func startDaemonCmd(cmd *cobra.Command, _ []string) {
var profileCPU, profileMem, profileTrace bool
profileCPU = strings.Contains(globalConfig.Siad.Profile, "c")
profileMem = strings.Contains(globalConfig.Siad.Profile, "m")
profileTrace = strings.Contains(globalConfig.Siad.Profile, "t")
if build.DEBUG {
profileCPU = true
profileMem = true
}
if profileCPU || profileMem || profileTrace {
go profile.StartContinuousProfile(globalConfig.Siad.ProfileDir, profileCPU, profileMem, profileTrace)
}
// Start siad. startDaemon will only return when it is shutting down.
err := startDaemon(globalConfig)
if err != nil {
die(err)
}
// Daemon seems to have closed cleanly. Print a 'closed' mesasge.
fmt.Println("Shutdown complete.")
}
|
[
"\"SIA_API_PASSWORD\""
] |
[] |
[
"SIA_API_PASSWORD"
] |
[]
|
["SIA_API_PASSWORD"]
|
go
| 1 | 0 | |
src/main/java/com/erudika/scoold/utils/SlackNotifier.java
|
package com.erudika.scoold.utils;
import com.erudika.para.utils.Config;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.DeserializationFeature;
public class SlackNotifier {
private static final Logger logger = LoggerFactory.getLogger(SlackNotifier.class);
private static final ExecutorService EXECUTOR = Executors.newFixedThreadPool(Config.EXECUTOR_THREADS);
public SlackNotifier() {
}
public boolean sendNotification(final String email, final String message) {
if (email == null) {
return false;
}
asyncExecute(new Runnable() {
public void run() {
try {
Client client = Client.create();
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
WebResource webResource = client
.resource("https://slack.com/api/users.lookupByEmail");
webResource.header("Authorization", "Bearer " + System.getenv("SLACK_APP_DEVOPS"));
webResource = webResource.queryParam("token", System.getenv("SLACK_APP_DEVOPS"));
webResource = webResource.queryParam("email", email);
ClientResponse response = webResource.type("application/x-www-form-urlencoded")
.get(ClientResponse.class);
String responseStr = response.getEntity(String.class);
SlackUser user = mapper.readValue(responseStr, SlackUser.class);
webResource = client.resource("https://slack.com/api/conversations.open");
webResource.header("Authorization", "Bearer " + System.getenv("BOT_TOKEN_ASKBOT"));
webResource = webResource.queryParam("token", System.getenv("BOT_TOKEN_ASKBOT"));
webResource = webResource.queryParam("users", user.user.id);
response = webResource.type("application/x-www-form-urlencoded")
.post(ClientResponse.class);
responseStr = response.getEntity(String.class);
SlackConversation conv = mapper.readValue(responseStr, SlackConversation.class);
webResource = client.resource("https://slack.com/api/chat.postMessage");
webResource.header("Authorization", "Bearer " + System.getenv("BOT_TOKEN_ASKBOT"));
webResource = webResource.queryParam("token", System.getenv("BOT_TOKEN_ASKBOT"));
webResource = webResource.queryParam("channel", conv.channel.id);
webResource = webResource.queryParam("text",message);
webResource = webResource.queryParam("as_user", "true");
response = webResource.type("application/x-www-form-urlencoded")
.post(ClientResponse.class);
} catch (Exception ex) {
logger.error("Slack Notification failed. {}", ex.getMessage());
}
}
});
return true;
}
private void asyncExecute(Runnable runnable) {
if (runnable != null) {
try {
EXECUTOR.execute(runnable);
} catch (RejectedExecutionException ex) {
logger.warn(ex.getMessage());
try {
runnable.run();
} catch (Exception e) {
logger.error(null, e);
}
}
}
}
static class SlackUser{
public SlackUserUser user;
}
static class SlackUserUser{
public String id;
}
static class SlackConversation{
public SlackChannel channel;
}
static class SlackChannel{
public String id;
}
}
|
[
"\"SLACK_APP_DEVOPS\"",
"\"SLACK_APP_DEVOPS\"",
"\"BOT_TOKEN_ASKBOT\"",
"\"BOT_TOKEN_ASKBOT\"",
"\"BOT_TOKEN_ASKBOT\"",
"\"BOT_TOKEN_ASKBOT\""
] |
[] |
[
"SLACK_APP_DEVOPS",
"BOT_TOKEN_ASKBOT"
] |
[]
|
["SLACK_APP_DEVOPS", "BOT_TOKEN_ASKBOT"]
|
java
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restaurant_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Study_2021/Study_Old/TacoCloud/.mvn/wrapper/MavenWrapperDownloader.java
|
/*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Properties;
public class MavenWrapperDownloader {
private static final String WRAPPER_VERSION = "0.5.6";
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if (mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if (mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
}
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
if (!outputFile.getParentFile().exists()) {
if (!outputFile.getParentFile().mkdirs()) {
System.out.println(
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
}
}
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
try {
downloadFileFromURL(url, outputFile);
System.out.println("Done");
System.exit(0);
} catch (Throwable e) {
System.out.println("- Error downloading");
e.printStackTrace();
System.exit(1);
}
}
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
String username = System.getenv("MVNW_USERNAME");
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
}
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
}
}
|
[
"\"MVNW_USERNAME\"",
"\"MVNW_PASSWORD\"",
"\"MVNW_USERNAME\"",
"\"MVNW_PASSWORD\""
] |
[] |
[
"MVNW_USERNAME",
"MVNW_PASSWORD"
] |
[]
|
["MVNW_USERNAME", "MVNW_PASSWORD"]
|
java
| 2 | 0 | |
checkr_test.go
|
package checkr
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func LoadKey(t *testing.T) string {
key := os.Getenv("CHECKR_KEY")
require.NotEmpty(t, key, "Env var 'CHECKR_KEY' must be set for testing")
return key
}
func TestClient(t *testing.T) {
// Test User-set API URL
url := "http://localhost"
c := NewClient("key", url)
assert.Equal(t, url, c.HostURL, "User given URL should be set on client")
}
|
[
"\"CHECKR_KEY\""
] |
[] |
[
"CHECKR_KEY"
] |
[]
|
["CHECKR_KEY"]
|
go
| 1 | 0 | |
pkg/daemon/gateway.go
|
package daemon
import (
"context"
"fmt"
"net"
"os"
"os/exec"
"strings"
"github.com/alauda/felix/ipsets"
kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/ovs"
"github.com/kubeovn/kube-ovn/pkg/util"
"github.com/vishvananda/netlink"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
)
const (
SubnetSet = "subnets"
SubnetNatSet = "subnets-nat"
LocalPodSet = "local-pod-ip-nat"
OtherNodeSet = "other-node"
IPSetPrefix = "ovn"
)
func (c *Controller) runGateway() {
if err := c.setIPSet(); err != nil {
klog.Errorf("failed to set gw ipsets")
}
if err := c.setIptables(); err != nil {
klog.Errorf("failed to set gw iptables")
}
if err := c.setGatewayBandwidth(); err != nil {
klog.Errorf("failed to set gw bandwidth, %v", err)
}
if err := c.setICGateway(); err != nil {
klog.Errorf("failed to set ic gateway, %v", err)
}
if err := c.setExGateway(); err != nil {
klog.Errorf("failed to set ex gateway, %v", err)
}
c.appendMssRule()
}
func (c *Controller) setIPSet() error {
protocols := make([]string, 2)
if c.protocol == kubeovnv1.ProtocolDual {
protocols[0] = kubeovnv1.ProtocolIPv4
protocols[1] = kubeovnv1.ProtocolIPv6
} else {
protocols[0] = c.protocol
}
for _, protocol := range protocols {
if c.ipset[protocol] == nil {
continue
}
subnets, err := c.getSubnetsCIDR(protocol)
if err != nil {
klog.Errorf("get subnets failed, %+v", err)
return err
}
localPodIPs, err := c.getLocalPodIPsNeedNAT(protocol)
if err != nil {
klog.Errorf("get local pod ips failed, %+v", err)
return err
}
subnetsNeedNat, err := c.getSubnetsNeedNAT(protocol)
if err != nil {
klog.Errorf("get need nat subnets failed, %+v", err)
return err
}
otherNode, err := c.getOtherNodes(protocol)
if err != nil {
klog.Errorf("failed to get node, %+v", err)
return err
}
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: SubnetSet,
Type: ipsets.IPSetTypeHashNet,
}, subnets)
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: LocalPodSet,
Type: ipsets.IPSetTypeHashIP,
}, localPodIPs)
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: SubnetNatSet,
Type: ipsets.IPSetTypeHashNet,
}, subnetsNeedNat)
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: OtherNodeSet,
Type: ipsets.IPSetTypeHashNet,
}, otherNode)
c.ipset[protocol].ApplyUpdates()
}
return nil
}
func (c *Controller) addIPSetMembers(setID, subnet, ip string) error {
podSubnet, err := c.subnetsLister.Get(subnet)
if err != nil {
klog.Errorf("get subnet %s failed, %+v", subnet, err)
return err
}
if !podSubnet.Spec.NatOutgoing ||
podSubnet.Spec.Vpc != util.DefaultVpc ||
podSubnet.Spec.GatewayType != kubeovnv1.GWDistributedType {
return nil
}
podIPs := strings.Split(ip, ",")
if protocol := util.CheckProtocol(ip); protocol == kubeovnv1.ProtocolDual {
c.ipset[kubeovnv1.ProtocolIPv4].AddMembers(setID, []string{podIPs[0]})
c.ipset[kubeovnv1.ProtocolIPv6].AddMembers(setID, []string{podIPs[1]})
c.ipset[kubeovnv1.ProtocolIPv4].ApplyUpdates()
c.ipset[kubeovnv1.ProtocolIPv6].ApplyUpdates()
} else {
c.ipset[protocol].AddMembers(setID, []string{podIPs[0]})
c.ipset[protocol].ApplyUpdates()
}
return nil
}
func (c *Controller) removeIPSetMembers(setID, subnet, ip string) error {
if subnet == "" || ip == "" {
return nil
}
podSubnet, err := c.subnetsLister.Get(subnet)
if err != nil {
klog.Errorf("get subnet %s failed, %+v", subnet, err)
return err
}
if !podSubnet.Spec.NatOutgoing ||
podSubnet.Spec.Vpc != util.DefaultVpc ||
podSubnet.Spec.GatewayType != kubeovnv1.GWDistributedType {
return nil
}
podIPs := strings.Split(ip, ",")
if protocol := util.CheckProtocol(ip); protocol == kubeovnv1.ProtocolDual {
c.ipset[kubeovnv1.ProtocolIPv4].RemoveMembers(setID, []string{podIPs[0]})
c.ipset[kubeovnv1.ProtocolIPv6].RemoveMembers(setID, []string{podIPs[1]})
c.ipset[kubeovnv1.ProtocolIPv4].ApplyUpdates()
c.ipset[kubeovnv1.ProtocolIPv6].ApplyUpdates()
} else {
c.ipset[protocol].RemoveMembers(setID, []string{podIPs[0]})
c.ipset[protocol].ApplyUpdates()
}
return nil
}
func (c *Controller) setIptables() error {
klog.V(3).Infoln("start to set up iptables")
node, err := c.nodesLister.Get(c.config.NodeName)
if err != nil {
klog.Errorf("failed to get node %s, %v", c.config.NodeName, err)
return err
}
hostIP := util.GetNodeInternalIP(*node)
var (
v4Rules = []util.IPTableRule{
// Prevent performing Masquerade on external traffic which arrives from a Node that owns the Pod/Subnet IP
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn40subnets src -m set ! --match-set ovn40other-node src -m set --match-set ovn40local-pod-ip-nat dst -j RETURN`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn40subnets src -m set ! --match-set ovn40other-node src -m set --match-set ovn40subnets-nat dst -j RETURN`, " ")},
// NAT if pod/subnet to external address
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn40local-pod-ip-nat src -m set ! --match-set ovn40subnets dst -j MASQUERADE`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn40subnets-nat src -m set ! --match-set ovn40subnets dst -j MASQUERADE`, " ")},
// masq traffic from hostport/nodeport
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(fmt.Sprintf(`-o ovn0 ! -s %s -j MASQUERADE`, hostIP), " ")},
// Input Accept
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn40subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn40subnets dst -j ACCEPT`, " ")},
// Forward Accept
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn40subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn40subnets dst -j ACCEPT`, " ")},
}
v6Rules = []util.IPTableRule{
// Prevent performing Masquerade on external traffic which arrives from a Node that owns the Pod/Subnet IP
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60local-pod-ip-nat dst -j RETURN`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60subnets-nat dst -j RETURN`, " ")},
// NAT if pod/subnet to external address
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn60local-pod-ip-nat src -m set ! --match-set ovn60subnets dst -j MASQUERADE`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn60subnets-nat src -m set ! --match-set ovn60subnets dst -j MASQUERADE`, " ")},
// masq traffic from hostport/nodeport
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(fmt.Sprintf(`-o ovn0 ! -s %s -j MASQUERADE`, hostIP), " ")},
// Input Accept
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn60subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn60subnets dst -j ACCEPT`, " ")},
// Forward Accept
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn60subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn60subnets dst -j ACCEPT`, " ")},
}
)
protocols := make([]string, 2)
if c.protocol == kubeovnv1.ProtocolDual {
protocols[0] = kubeovnv1.ProtocolIPv4
protocols[1] = kubeovnv1.ProtocolIPv6
} else {
protocols[0] = c.protocol
}
for _, protocol := range protocols {
if c.iptable[protocol] == nil {
continue
}
var iptableRules []util.IPTableRule
if protocol == kubeovnv1.ProtocolIPv4 {
iptableRules = v4Rules
} else {
iptableRules = v6Rules
}
iptableRules[0], iptableRules[1], iptableRules[3], iptableRules[4] =
iptableRules[4], iptableRules[3], iptableRules[1], iptableRules[0]
for _, iptRule := range iptableRules {
if strings.Contains(strings.Join(iptRule.Rule, " "), "ovn0") && protocol != util.CheckProtocol(hostIP) {
klog.V(3).Infof("ignore check iptable rule, protocol %v, hostIP %v", protocol, hostIP)
continue
}
exists, err := c.iptable[protocol].Exists(iptRule.Table, iptRule.Chain, iptRule.Rule...)
if err != nil {
klog.Errorf("check iptable rule exist failed, %+v", err)
return err
}
if !exists {
klog.Infof("iptables rules %s not exist, recreate iptables rules", strings.Join(iptRule.Rule, " "))
if err := c.iptable[protocol].Insert(iptRule.Table, iptRule.Chain, 1, iptRule.Rule...); err != nil {
klog.Errorf("insert iptable rule %s failed, %+v", strings.Join(iptRule.Rule, " "), err)
return err
}
}
}
}
return nil
}
func (c *Controller) setGatewayBandwidth() error {
node, err := c.config.KubeClient.CoreV1().Nodes().Get(context.Background(), c.config.NodeName, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get node, %v", err)
return err
}
ingress, egress := node.Annotations[util.IngressRateAnnotation], node.Annotations[util.EgressRateAnnotation]
ifaceId := fmt.Sprintf("node-%s", c.config.NodeName)
return ovs.SetInterfaceBandwidth(ifaceId, egress, ingress)
}
func (c *Controller) setICGateway() error {
node, err := c.config.KubeClient.CoreV1().Nodes().Get(context.Background(), c.config.NodeName, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get node, %v", err)
return err
}
enable := node.Labels[util.ICGatewayLabel]
if enable == "true" {
icEnabled, err := ovs.Exec(ovs.IfExists, "get", "open", ".", "external_ids:ovn-is-interconn")
if err != nil {
return fmt.Errorf("failed to get if ic enabled, %v", err)
}
if strings.Trim(icEnabled, "\"") != "true" {
if _, err := ovs.Exec("set", "open", ".", "external_ids:ovn-is-interconn=true"); err != nil {
return fmt.Errorf("failed to enable ic gateway, %v", err)
}
output, err := exec.Command("/usr/share/ovn/scripts/ovn-ctl", "restart_controller").CombinedOutput()
if err != nil {
return fmt.Errorf("failed to restart ovn-controller, %v, %q", err, output)
}
}
} else {
if _, err := ovs.Exec("set", "open", ".", "external_ids:ovn-is-interconn=false"); err != nil {
return fmt.Errorf("failed to disable ic gateway, %v", err)
}
}
return nil
}
func (c *Controller) setExGateway() error {
node, err := c.config.KubeClient.CoreV1().Nodes().Get(context.Background(), c.config.NodeName, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get node, %v", err)
return err
}
enable := node.Labels[util.ExGatewayLabel]
if enable == "true" {
cm, err := c.config.KubeClient.CoreV1().ConfigMaps("kube-system").Get(context.Background(), util.ExternalGatewayConfig, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get ovn-external-gw-config, %v", err)
return err
}
link, err := netlink.LinkByName(cm.Data["external-gw-nic"])
if err != nil {
klog.Errorf("failed to get nic %s, %v", cm.Data["external-gw-nic"], err)
return err
}
if err := netlink.LinkSetUp(link); err != nil {
klog.Errorf("failed to set gateway nic %s up, %v", cm.Data["external-gw-nic"], err)
return err
}
if _, err := ovs.Exec(
ovs.MayExist, "add-br", "br-external", "--",
ovs.MayExist, "add-port", "br-external", cm.Data["external-gw-nic"],
); err != nil {
return fmt.Errorf("failed to enable external gateway, %v", err)
}
output, err := ovs.Exec(ovs.IfExists, "get", "open", ".", "external-ids:ovn-bridge-mappings")
if err != nil {
return fmt.Errorf("failed to get external-ids, %v", err)
}
bridgeMappings := "external:br-external"
if output != "" && !util.IsStringIn(bridgeMappings, strings.Split(output, ",")) {
bridgeMappings = fmt.Sprintf("%s,%s", output, bridgeMappings)
}
output, err = ovs.Exec("set", "open", ".", fmt.Sprintf("external-ids:ovn-bridge-mappings=%s", bridgeMappings))
if err != nil {
return fmt.Errorf("failed to set bridg-mappings, %v: %q", err, output)
}
} else {
if _, err := ovs.Exec(
ovs.IfExists, "del-br", "br-external"); err != nil {
return fmt.Errorf("failed to disable external gateway, %v", err)
}
}
return nil
}
func (c *Controller) getLocalPodIPsNeedNAT(protocol string) ([]string, error) {
var localPodIPs []string
hostname := os.Getenv("KUBE_NODE_NAME")
allPods, err := c.podsLister.List(labels.Everything())
if err != nil {
klog.Errorf("list pods failed, %+v", err)
return nil, err
}
for _, pod := range allPods {
if pod.Spec.HostNetwork ||
pod.Status.PodIP == "" ||
pod.Annotations[util.LogicalSwitchAnnotation] == "" {
continue
}
subnet, err := c.subnetsLister.Get(pod.Annotations[util.LogicalSwitchAnnotation])
if err != nil {
klog.Errorf("get subnet %s failed, %+v", pod.Annotations[util.LogicalSwitchAnnotation], err)
continue
}
nsGWType := subnet.Spec.GatewayType
nsGWNat := subnet.Spec.NatOutgoing
if nsGWNat &&
subnet.Spec.Vpc == util.DefaultVpc &&
nsGWType == kubeovnv1.GWDistributedType &&
pod.Spec.NodeName == hostname {
if len(pod.Status.PodIPs) == 2 && protocol == kubeovnv1.ProtocolIPv6 {
localPodIPs = append(localPodIPs, pod.Status.PodIPs[1].IP)
} else if util.CheckProtocol(pod.Status.PodIP) == protocol {
localPodIPs = append(localPodIPs, pod.Status.PodIP)
}
}
}
klog.V(3).Infof("local pod ips %v", localPodIPs)
return localPodIPs, nil
}
func (c *Controller) getSubnetsNeedNAT(protocol string) ([]string, error) {
var subnetsNeedNat []string
subnets, err := c.subnetsLister.List(labels.Everything())
if err != nil {
klog.Errorf("list subnets failed, %v", err)
return nil, err
}
for _, subnet := range subnets {
if subnet.Spec.Vpc == util.DefaultVpc &&
subnet.Spec.GatewayType == kubeovnv1.GWCentralizedType &&
util.GatewayContains(subnet.Spec.GatewayNode, c.config.NodeName) &&
(subnet.Spec.Protocol == kubeovnv1.ProtocolDual || subnet.Spec.Protocol == protocol) &&
subnet.Spec.NatOutgoing {
cidrBlock := getCidrByProtocol(subnet.Spec.CIDRBlock, protocol)
subnetsNeedNat = append(subnetsNeedNat, cidrBlock)
}
}
return subnetsNeedNat, nil
}
func (c *Controller) getSubnetsCIDR(protocol string) ([]string, error) {
subnets, err := c.subnetsLister.List(labels.Everything())
if err != nil {
klog.Error("failed to list subnets")
return nil, err
}
ret := make([]string, 0, len(subnets)+3)
if c.config.NodeLocalDNSIP != "" && net.ParseIP(c.config.NodeLocalDNSIP) != nil && util.CheckProtocol(c.config.NodeLocalDNSIP) == protocol {
ret = append(ret, c.config.NodeLocalDNSIP)
}
for _, sip := range strings.Split(c.config.ServiceClusterIPRange, ",") {
if util.CheckProtocol(sip) == protocol {
ret = append(ret, sip)
}
}
for _, subnet := range subnets {
if subnet.Spec.Vpc == util.DefaultVpc {
cidrBlock := getCidrByProtocol(subnet.Spec.CIDRBlock, protocol)
ret = append(ret, cidrBlock)
}
}
return ret, nil
}
func (c *Controller) getOtherNodes(protocol string) ([]string, error) {
nodes, err := c.nodesLister.List(labels.Everything())
if err != nil {
klog.Error("failed to list nodes")
return nil, err
}
ret := make([]string, 0, len(nodes)-1)
for _, node := range nodes {
if node.Name == c.config.NodeName {
continue
}
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeInternalIP {
if util.CheckProtocol(addr.Address) == protocol {
ret = append(ret, addr.Address)
}
}
}
}
return ret, nil
}
//Generally, the MTU of the interface is set to 1400. But in special cases, a special pod (docker indocker) will introduce the docker0 interface to the pod. The MTU of docker0 is 1500.
//The network application in pod will calculate the TCP MSS according to the MTU of docker0, and then initiate communication with others. After the other party sends a response, the kernel protocol stack of Linux host will send ICMP unreachable message to the other party, indicating that IP fragmentation is needed, which is not supported by the other party, resulting in communication failure.
func (c *Controller) appendMssRule() {
if c.config.Iface != "" && c.config.MSS > 0 {
rule := fmt.Sprintf("-p tcp --tcp-flags SYN,RST SYN -o %s -j TCPMSS --set-mss %d", c.config.Iface, c.config.MSS)
MssMangleRule := util.IPTableRule{
Table: "mangle",
Chain: "POSTROUTING",
Rule: strings.Split(rule, " "),
}
switch c.protocol {
case kubeovnv1.ProtocolIPv4:
c.updateMssRuleByProtocol(c.protocol, MssMangleRule)
case kubeovnv1.ProtocolIPv6:
c.updateMssRuleByProtocol(c.protocol, MssMangleRule)
case kubeovnv1.ProtocolDual:
c.updateMssRuleByProtocol(kubeovnv1.ProtocolIPv4, MssMangleRule)
c.updateMssRuleByProtocol(kubeovnv1.ProtocolIPv6, MssMangleRule)
}
}
}
func (c *Controller) updateMssRuleByProtocol(protocol string, MssMangleRule util.IPTableRule) {
exists, err := c.iptable[protocol].Exists(MssMangleRule.Table, MssMangleRule.Chain, MssMangleRule.Rule...)
if err != nil {
klog.Errorf("check iptable rule %v failed, %+v", MssMangleRule.Rule, err)
return
}
if !exists {
klog.Infof("iptables rules %s not exist, append iptables rules", strings.Join(MssMangleRule.Rule, " "))
if err := c.iptable[protocol].Append(MssMangleRule.Table, MssMangleRule.Chain, MssMangleRule.Rule...); err != nil {
klog.Errorf("append iptable rule %v failed, %+v", MssMangleRule.Rule, err)
return
}
}
}
func getCidrByProtocol(cidr, protocol string) string {
var cidrStr string
if util.CheckProtocol(cidr) == kubeovnv1.ProtocolDual {
cidrBlocks := strings.Split(cidr, ",")
if protocol == kubeovnv1.ProtocolIPv4 {
cidrStr = cidrBlocks[0]
} else if protocol == kubeovnv1.ProtocolIPv6 {
cidrStr = cidrBlocks[1]
}
} else {
cidrStr = cidr
}
return cidrStr
}
|
[
"\"KUBE_NODE_NAME\""
] |
[] |
[
"KUBE_NODE_NAME"
] |
[]
|
["KUBE_NODE_NAME"]
|
go
| 1 | 0 | |
modules/pymol/plugins/installation.py
|
'''
PyMOL Plugins Engine, Installation Routines
(c) 2011-2012 Thomas Holder, PyMOL OS Fellow
License: BSD-2-Clause
'''
import os
# supported file types for installation. Do not support pyc and pyo binaries,
# we want text files that can be parsed for metadata.
zip_extensions = ['zip', 'tar.gz']
supported_extensions = ['py'] + zip_extensions
class InstallationCancelled(Exception):
pass
class BadInstallationFile(Exception):
pass
def get_default_user_plugin_path():
'''
User plugin directory defaults to ~/.pymol/startup on Linux and to
%APPDATA%\pymol\startup on windows.
'''
if 'APPDATA' in os.environ:
return os.path.join(os.environ['APPDATA'], 'pymol', 'startup')
return os.path.expanduser('~/.pymol/startup')
def is_writable(dirname):
'''
Return True if directory is writable.
'''
path = os.path.join(dirname, '__check_writable')
try:
f = open(path, 'wb')
f.close()
os.remove(path)
return True
except (IOError, OSError):
return False
def cmp_version(v1, v2):
'''
Compares two version strings. An empty version string is always considered
smaller than a non-empty version string.
Uses distutils.version.StrictVersion to evaluate non-empty version strings.
'''
if v1 == v2:
return 0
if v1 == '':
return -1
if v2 == '':
return 1
try:
from distutils.version import StrictVersion as Version
return cmp(Version(v1), Version(v2))
except:
print(' Warning: Version parsing failed for', v1, 'and/or', v2)
return 0
def get_name_and_ext(ofile):
'''
Given a filename, return module name and file extension.
Examples:
foo-1.0.py -> ('foo', 'py')
/foo/bar.tar.gz -> ('bar', 'tar.gz')
'''
import re
basename = os.path.basename(ofile)
pattern = r'(\w+).*\.(%s)$' % '|'.join(supported_extensions)
m = re.match(pattern, basename, re.IGNORECASE)
if m is None:
raise BadInstallationFile('Not a valid plugin filename (%s).' % (basename))
return m.group(1), m.group(2).lower()
def check_valid_name(name):
'''
Check if "name" is a valid python module name.
'''
if '.' in name:
raise BadInstallationFile('name must not contain dots (%s).' % repr(name))
def extract_zipfile(ofile, ext):
'''
Extract zip file to temporary directory
'''
if ext == 'zip':
import zipfile
zf = zipfile.ZipFile(ofile)
else:
import tarfile
zf = tarfile.open(ofile)
zf.namelist = zf.getnames
# make sure pathnames are not absolute
cwd = os.getcwd()
namelist = zf.namelist()
for f in namelist:
f = os.path.normpath(f)
if not os.path.abspath(f).startswith(cwd):
raise BadInstallationFile('ZIP file contains absolute path names')
# analyse structure
namedict = dict()
for f in namelist:
x = namedict
for part in f.split('/'): # even on windows this is a forward slash (not os.sep)
if part != '':
x = x.setdefault(part, {})
if len(namedict) == 0:
raise BadInstallationFile('Archive empty.')
# case 1: zip/<name>/__init__.py
names = [(name,)
for name in namedict
if '__init__.py' in namedict[name]]
if len(names) == 0:
# case 2: zip/<name>-<version>/<name>/__init__.py
names = [(pname, name)
for (pname, pdict) in namedict.items()
for name in pdict
if '__init__.py' in pdict[name]]
if len(names) == 0:
raise BadInstallationFile('Missing __init__.py')
if len(names) > 1:
# filter out "tests" directory
names = [n for n in names if n[-1] != 'tests']
if len(names) > 1:
raise BadInstallationFile('Archive must contain a single package.')
check_valid_name(names[0][-1])
# extract
import tempfile
tempdir = tempfile.mkdtemp()
zf.extractall(tempdir)
return tempdir, names[0]
def get_plugdir(parent=None):
'''
Get plugin directory, ask user if startup path has more than one entry
'''
from . import get_startup_path
plugdirs = get_startup_path()
if len(plugdirs) == 1:
return plugdirs[0]
import sys
if 'pmg_qt.mimic_tk' in sys.modules:
from pymol.Qt import QtWidgets
value, result = QtWidgets.QInputDialog.getItem(None,
'Select plugin directory',
'In which directory should the plugin be installed?', plugdirs)
return value if result else ''
dialog_selection = []
def plugdir_callback(result):
if result == 'OK':
dialog_selection[:] = dialog.getcurselection()
dialog.destroy()
import Pmw
dialog = Pmw.SelectionDialog(parent, title='Select plugin directory',
buttons = ('OK', 'Cancel'), defaultbutton='OK',
scrolledlist_labelpos='n',
label_text='In which directory should the plugin be installed?',
scrolledlist_items=plugdirs,
command=plugdir_callback)
dialog.component('scrolledlist').selection_set(0)
# wait for dialog to be closed
dialog.wait_window()
if not dialog_selection:
return ''
return dialog_selection[0]
def installPluginFromFile(ofile, parent=None, plugdir=None):
'''
Install plugin from file.
Takes python (.py) files and archives which contain a python module.
'''
import shutil
from . import startup, PluginInfo
from . import get_startup_path, set_startup_path, pref_get
from .legacysupport import tkMessageBox, get_tk_focused
if parent is None:
parent = get_tk_focused()
showinfo = tkMessageBox.showinfo
askyesno = tkMessageBox.askyesno
plugdirs = get_startup_path()
if not plugdir:
plugdir = get_plugdir()
if not plugdir:
return
if not is_writable(plugdir):
user_plugdir = get_default_user_plugin_path()
if not askyesno('Warning',
'Unable to write to the plugin directory.\n'
'Should a user plugin directory be created at\n' + user_plugdir + '?',
parent=parent):
showinfo('Error', 'Installation aborted', parent=parent)
return
if not os.path.exists(user_plugdir):
try:
os.makedirs(user_plugdir)
except OSError:
showinfo('Error', 'Could not create user plugin directory', parent=parent)
return
plugdir = user_plugdir
if plugdir not in plugdirs:
set_startup_path([plugdir] + get_startup_path(True))
def remove_if_exists(pathname, ask):
'''
Remove existing plugin files before reinstallation. Will not remove
files if installing into different startup directory.
'''
if not os.path.exists(pathname):
return
is_dir = os.path.isdir(pathname)
if ask:
if is_dir:
msg = 'Directory "%s" already exists, overwrite?' % pathname
else:
msg = 'File "%s" already exists, overwrite?' % pathname
if not tkMessageBox.askyesno('Confirm', msg, parent=parent):
raise InstallationCancelled('will not overwrite "%s"' % pathname)
if is_dir:
shutil.rmtree(pathname)
else:
os.remove(pathname)
def check_reinstall(name, pathname):
from . import plugins
if name not in plugins:
remove_if_exists(pathname, True)
return
v_installed = plugins[name].get_version()
v_new = PluginInfo(name, ofile).get_version()
c = cmp_version(v_new, v_installed)
if c > 0:
msg = 'An older version (%s) of this plugin is already installed. Install version %s now?' % (v_installed, v_new)
elif c == 0:
msg = 'Plugin already installed. Reinstall?'
else:
msg = 'A newer version (%s) of this plugin is already installed. Install anyway?' % (v_installed)
if not tkMessageBox.askokcancel('Confirm', msg, parent=parent):
raise InstallationCancelled
remove_if_exists(pathname, False)
name = "unknown" # fallback for error message
temppathnames = []
try:
name, ext = get_name_and_ext(ofile)
if ext in zip_extensions:
# import archive
tempdir, dirnames = extract_zipfile(ofile, ext)
temppathnames.append((tempdir, 1))
# install
name = dirnames[-1]
odir = os.path.join(tempdir, *dirnames)
ofile = os.path.join(odir, '__init__.py')
mod_dir = os.path.join(plugdir, name)
check_reinstall(name, mod_dir)
check_valid_name(name)
shutil.copytree(odir, mod_dir)
mod_file = os.path.join(mod_dir, '__init__.py')
elif name == '__init__':
# import directory
odir = os.path.dirname(ofile)
name = os.path.basename(odir)
mod_dir = os.path.join(plugdir, name)
check_reinstall(name, mod_dir)
check_valid_name(name)
shutil.copytree(odir, mod_dir)
mod_file = os.path.join(mod_dir, '__init__.py')
elif ext == 'py':
# import python file
mod_file = os.path.join(plugdir, name + '.py')
check_reinstall(name, mod_file)
check_valid_name(name)
shutil.copy(ofile, mod_file)
else:
raise UserWarning('this should never happen')
except InstallationCancelled:
showinfo('Info', 'Installation cancelled', parent=parent)
return
except Exception as e:
if pref_get('verbose', False):
import traceback
traceback.print_exc()
msg = 'Unable to install plugin "{}".\n{}'.format(name, e)
showinfo('Error', msg, parent=parent)
return
finally:
for (pathname, is_dir) in temppathnames:
if is_dir:
shutil.rmtree(pathname)
else:
os.remove(pathname)
prefix = startup.__name__
info = PluginInfo(name, mod_file, prefix + '.' + name)
if info.load(force=1):
showinfo('Success', 'Plugin "%s" has been installed.' % name, parent=parent)
else:
showinfo('Error', 'Plugin "%s" has been installed but initialization failed.' % name, parent=parent)
if info.get_citation_required():
if askyesno('Citation Required', 'This plugin requires citation. Show information now?'
'\n\n(You can always get this information from the Plugin Manager, click the "Info" button there)',
parent=parent):
from .managergui import plugin_info_dialog
plugin_info_dialog(parent, info)
# vi:expandtab:smarttab:sw=4
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
docs/source/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, NVIDIA CORPORATION.
#
# pygdf documentation build configuration file, created by
# sphinx-quickstart on Wed May 3 10:59:22 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'numpydoc',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
ipython_mplbackend = 'str'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'cusignal'
copyright = '2019-2020, NVIDIA'
author = 'NVIDIA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.12'
# The full version, including alpha/beta/rc tags.
release = '0.12.0a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
# only import and set the theme if we're building docs locally
# otherwise, readthedocs.org uses their theme by default,
# so no need to specify it
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cusignaldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cusignal.tex', 'cusignal Documentation',
'Continuum Analytics', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cusignal', 'cusignal Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cusignal', 'cusignal Documentation',
author, 'cusignal', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Config numpydoc
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
def setup(app):
app.add_stylesheet('params.css')
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
src/aat/java/uk/gov/hmcts/ccd/datastore/befta/DataStoreTestAutomationAdapter.java
|
package uk.gov.hmcts.ccd.datastore.befta;
import io.cucumber.java.Before;
import io.cucumber.java.Scenario;
import org.junit.AssumptionViolatedException;
import uk.gov.hmcts.befta.BeftaTestDataLoader;
import uk.gov.hmcts.befta.DefaultTestAutomationAdapter;
import uk.gov.hmcts.befta.dse.ccd.DataLoaderToDefinitionStore;
import uk.gov.hmcts.befta.exception.FunctionalTestException;
import uk.gov.hmcts.befta.player.BackEndFunctionalTestScenarioContext;
import uk.gov.hmcts.befta.util.BeftaUtils;
import uk.gov.hmcts.befta.util.EnvironmentVariableUtils;
import uk.gov.hmcts.befta.util.ReflectionUtils;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import static java.util.Optional.ofNullable;
import static uk.gov.hmcts.ccd.datastore.util.CaseIdHelper.hypheniseACaseId;
public class DataStoreTestAutomationAdapter extends DefaultTestAutomationAdapter {
private static Map<String, String> uniqueStringsPerTestData = new ConcurrentHashMap<>();
@Before("@elasticsearch")
public void skipElasticSearchTestsIfNotEnabled() {
if (!ofNullable(System.getenv("ELASTIC_SEARCH_FTA_ENABLED")).map(Boolean::valueOf).orElse(false)) {
throw new AssumptionViolatedException("Elastic Search not Enabled");
}
}
@Before
public void createUID(Scenario scenario) {
String tag = getDataFileTag(scenario);
String uid = tag + UUID.randomUUID().toString();
uniqueStringsPerTestData.put(tag,uid);
}
private synchronized String getDataFileTag(Scenario scenario) {
return scenario.getSourceTagNames().stream()
.filter(t -> t.startsWith("@S-"))
.findFirst()
.map(t -> t.substring(1))
.map(Object::toString)
.orElse("error cant find tag");
}
@Override
protected BeftaTestDataLoader buildTestDataLoader() {
return new DataLoaderToDefinitionStore(this,
DataLoaderToDefinitionStore.VALID_CCD_TEST_DEFINITIONS_PATH) {
@Override
protected void createRoleAssignment(String resource, String filename) {
// Do not create role assignments.
BeftaUtils.defaultLog("Will NOT create role assignments!");
}
};
}
@Override
public Object calculateCustomValue(BackEndFunctionalTestScenarioContext scenarioContext, Object key) {
String docAmUrl = EnvironmentVariableUtils.getRequiredVariable("CASE_DOCUMENT_AM_URL");
if (key.toString().startsWith("caseIdAsIntegerFrom")) {
String childContext = key.toString().replace("caseIdAsIntegerFrom_","");
try {
return (long) ReflectionUtils.deepGetFieldInObject(scenarioContext,"childContexts." + childContext
+ ".testData.actualResponse.body.id");
} catch (Exception e) {
throw new FunctionalTestException("Problem getting case id as long", e);
}
} else if (key.toString().startsWith("caseIdAsStringFrom")) {
String childContext = key.toString().replace("caseIdAsStringFrom_","");
try {
long longRef = (long) ReflectionUtils.deepGetFieldInObject(
scenarioContext,"childContexts." + childContext + ".testData.actualResponse.body.id");
return Long.toString(longRef);
} catch (Exception e) {
throw new FunctionalTestException("Problem getting case id as long", e);
}
} else if (key.toString().startsWith("HyphenisedCaseIdFromCaseCreation")) {
String childContext = key.toString().replace("HyphenisedCaseIdFromCaseCreation_","");
try {
long longRef = (long) ReflectionUtils.deepGetFieldInObject(
scenarioContext,"childContexts." + childContext + ".testData.actualResponse.body.id");
String result = hypheniseACaseId(Long.toString(longRef));
return result;
} catch (Exception e) {
throw new FunctionalTestException("Problem getting case id as long", e);
}
} else if (key.toString().startsWith("orgsAssignedUsers")) {
// extract args from key
// 0 - path to context holding organisationIdentifier
// 1 - (Optional) path to context holding previous value to use (otherwise: use 0)
// 2 - (Optional) amount to increment previous value by (otherwise: don't increment)
List<String> args = Arrays.asList(key.toString().replace("orgsAssignedUsers_","").split("\\|"));
String organisationIdentifierContextPath = args.get(0);
String previousValueContextPath = args.size() > 1 ? args.get(1) : null;
int incrementBy = args.size() > 2 ? Integer.parseInt(args.get(2)) : 0;
return calculateOrganisationsAssignedUsersPropertyWithValue(scenarioContext,
organisationIdentifierContextPath,
previousValueContextPath,
incrementBy);
} else if (key.toString().equals("UniqueString")) {
String scenarioTag;
try {
scenarioTag = scenarioContext.getParentContext().getCurrentScenarioTag();
} catch (NullPointerException e) {
scenarioTag = scenarioContext.getCurrentScenarioTag();
}
return uniqueStringsPerTestData.get(scenarioTag);
} else if (key.toString().startsWith("approximately ")) {
try {
String actualSizeFromHeaderStr = (String) ReflectionUtils.deepGetFieldInObject(scenarioContext,
"testData.actualResponse.headers.Content-Length");
String expectedSizeStr = key.toString().replace("approximately ", "");
int actualSize = Integer.parseInt(actualSizeFromHeaderStr);
int expectedSize = Integer.parseInt(expectedSizeStr);
if (Math.abs(actualSize - expectedSize) < (actualSize * 10 / 100)) {
return actualSizeFromHeaderStr;
}
return expectedSize;
} catch (Exception e) {
throw new FunctionalTestException("Problem checking acceptable response payload: ", e);
}
} else if (key.toString().startsWith("contains ")) {
try {
String actualValueStr = (String) ReflectionUtils.deepGetFieldInObject(scenarioContext,
"testData.actualResponse.body.__plainTextValue__");
String expectedValueStr = key.toString().replace("contains ", "");
if (actualValueStr.contains(expectedValueStr)) {
return actualValueStr;
}
return "expectedValueStr " + expectedValueStr + " not present in response ";
} catch (Exception e) {
throw new FunctionalTestException("Problem checking acceptable response payload: ", e);
}
} else if (key.equals("documentIdInTheResponse")) {
try {
String href = (String) ReflectionUtils
.deepGetFieldInObject(scenarioContext,
"testData.actualResponse.body.documents[0]._links.self.href");
return href.substring(href.length() - 36);
} catch (Exception exception) {
return "Error extracting the Document Id";
}
} else if (key.toString().equalsIgnoreCase("validSelfLink")) {
try {
String self = (String) ReflectionUtils.deepGetFieldInObject(scenarioContext,
"testData.actualResponse.body.documents[0]._links.self.href");
BeftaUtils.defaultLog("Self: " + self);
if (self != null && self.startsWith(docAmUrl + "/cases/documents/")) {
return self;
}
return docAmUrl + "/cases/documents/<a document id>";
} catch (Exception e) {
throw new FunctionalTestException("Couldn't get self link from response field", e);
}
} else if (key.toString().equalsIgnoreCase("validBinaryLink")) {
try {
String binary = (String) ReflectionUtils.deepGetFieldInObject(scenarioContext,
"testData.actualResponse.body.documents[0]._links.binary.href");
BeftaUtils.defaultLog("Binary: " + binary);
if (binary != null && binary.startsWith(docAmUrl + "/cases/documents/") && binary.endsWith("/binary")) {
return binary;
}
return docAmUrl + "/cases/documents/<a document id>/binary";
} catch (Exception e) {
throw new FunctionalTestException("Couldn't get binary link from response field", e);
}
}
return super.calculateCustomValue(scenarioContext, key);
}
private boolean elasticSearchFunctionalTestsEnabled() {
return ofNullable(System.getenv("ELASTIC_SEARCH_ENABLED")).map(Boolean::valueOf).orElse(false);
}
private Map<String, Object> calculateOrganisationsAssignedUsersPropertyWithValue(
BackEndFunctionalTestScenarioContext scenarioContext,
String organisationIdentifierContextPath,
String previousValueContextPath,
int incrementBy) {
String organisationIdentifierFieldPath = organisationIdentifierContextPath
+ ".testData.actualResponse.body.organisationIdentifier";
try {
String organisationIdentifier = ReflectionUtils.deepGetFieldInObject(scenarioContext,
organisationIdentifierFieldPath).toString();
String propertyName = "orgs_assigned_users." + organisationIdentifier;
int value = incrementBy; // default
// if path to previous value supplied : read it
if (previousValueContextPath != null) {
String previousValueFieldPath = previousValueContextPath
+ ".testData.actualResponse.body.supplementary_data."
+ propertyName.replace(".", "\\.");
Object previousValue = ReflectionUtils.deepGetFieldInObject(scenarioContext, previousValueFieldPath);
if (previousValue != null) {
value = Integer.parseInt(previousValue.toString()) + incrementBy; // and increment
}
}
return Collections.singletonMap(propertyName, value);
} catch (Exception e) {
throw new FunctionalTestException("Problem generating 'orgs_assigned_users' supplementary data property.",
e);
}
}
}
|
[
"\"ELASTIC_SEARCH_FTA_ENABLED\"",
"\"ELASTIC_SEARCH_ENABLED\""
] |
[] |
[
"ELASTIC_SEARCH_FTA_ENABLED",
"ELASTIC_SEARCH_ENABLED"
] |
[]
|
["ELASTIC_SEARCH_FTA_ENABLED", "ELASTIC_SEARCH_ENABLED"]
|
java
| 2 | 0 | |
test/integration/framework/etcd.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is forked from k8s.io/kubernetes/test/integration/framework/.
package framework
import (
"context"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"k8s.io/klog"
)
var etcdURL = ""
const installEtcd = `
Cannot find etcd, cannot run integration tests
Please download kube-apiserver and ensure it is somewhere in the PATH.
See tools/get-kube-binaries.sh
`
// getEtcdPath returns a path to an etcd executable.
func getEtcdPath() (string, error) {
bazelPath := filepath.Join(os.Getenv("RUNFILES_DIR"), "com_coreos_etcd/etcd")
p, err := exec.LookPath(bazelPath)
if err == nil {
return p, nil
}
return exec.LookPath("etcd")
}
// getAvailablePort returns a TCP port that is available for binding.
func getAvailablePort() (int, error) {
l, err := net.Listen("tcp", ":0")
if err != nil {
return 0, fmt.Errorf("could not bind to a port: %v", err)
}
// It is possible but unlikely that someone else will bind this port before we
// get a chance to use it.
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}
// startEtcd executes an etcd instance. The returned function will signal the
// etcd process and wait for it to exit.
func startEtcd() (func(), error) {
etcdPath, err := getEtcdPath()
if err != nil {
fmt.Fprintf(os.Stderr, installEtcd)
return nil, fmt.Errorf("could not find etcd in PATH: %v", err)
}
etcdPort, err := getAvailablePort()
if err != nil {
return nil, fmt.Errorf("could not get a port: %v", err)
}
etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort)
klog.Infof("starting etcd on %s", etcdURL)
etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data")
if err != nil {
return nil, fmt.Errorf("unable to make temp etcd data dir: %v", err)
}
klog.Infof("storing etcd data in: %v", etcdDataDir)
ctx, cancel := context.WithCancel(context.Background())
cmd := exec.CommandContext(
ctx,
etcdPath,
"--data-dir", etcdDataDir,
"--listen-client-urls", etcdURL,
"--advertise-client-urls", etcdURL,
"--listen-peer-urls", "http://127.0.0.1:0",
)
// Uncomment these to see etcd output in test logs.
//cmd.Stdout = os.Stdout
//cmd.Stderr = os.Stderr
stop := func() {
cancel()
err := cmd.Wait()
klog.Infof("etcd exit status: %v", err)
err = os.RemoveAll(etcdDataDir)
if err != nil {
klog.Warningf("error during etcd cleanup: %v", err)
}
}
if err := cmd.Start(); err != nil {
return nil, fmt.Errorf("failed to run etcd: %v", err)
}
return stop, nil
}
// EtcdURL returns the URL to connect to the etcd instance launched for this
// test environment. This can be shared as long as each component uses a unique
// key prefix.
func EtcdURL() string {
return etcdURL
}
|
[
"\"RUNFILES_DIR\""
] |
[] |
[
"RUNFILES_DIR"
] |
[]
|
["RUNFILES_DIR"]
|
go
| 1 | 0 | |
go/tracing/init.go
|
package tracing
import (
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
opentracing "github.com/opentracing/opentracing-go"
config "github.com/uber/jaeger-client-go/config"
)
var (
Enabled bool = false
Tracer opentracing.Tracer
gCloser io.Closer
)
func init() {
endpoint := os.Getenv("JAEGER_ENDPOINT")
if endpoint == "" {
return
}
InitTracer("di_store")
log.Infof("init tracer with endpoint %s", endpoint)
}
func Close() {
gCloser.Close()
}
func InitTracer(service string) (opentracing.Tracer, io.Closer) {
envConfig, err := config.FromEnv()
if err != nil {
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
}
envConfig.ServiceName = service
envConfig.Sampler.Type = "const"
envConfig.Sampler.Param = 1
envConfig.Reporter.LogSpans = true
tracer, closer, err := envConfig.NewTracer(config.Logger(&jaegerLogger{}))
if err != nil {
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
}
opentracing.SetGlobalTracer(tracer)
Enabled = true
Tracer = tracer
gCloser = closer
return tracer, closer
}
type jaegerLogger struct{}
func (l *jaegerLogger) Error(msg string) {
log.Error(msg)
}
func (l *jaegerLogger) Infof(msg string, args ...interface{}) {
log.Debugf(msg, args...)
}
|
[
"\"JAEGER_ENDPOINT\""
] |
[] |
[
"JAEGER_ENDPOINT"
] |
[]
|
["JAEGER_ENDPOINT"]
|
go
| 1 | 0 | |
qa/rpc-tests/rpcbind_test.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# TODO extend this test from the test framework (like all other tests)
import tempfile
import traceback
from test_framework.util import *
from test_framework.netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(self.num_nodes, tmpdir, [base_args + binds], connect_to)
try:
pid = kazugoldd_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_kazugoldds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(self.num_nodes, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = get_rpc_proxy(url, 1)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_kazugoldds()
def run_test(tmpdir):
assert(sys.platform.startswith('linux')) # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave kazugoldds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing kazugoldd/kazugold-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_kazugoldds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
legacy-tests/src/test/java/dev/morphia/TestBase.java
|
package dev.morphia;
import com.antwerkz.bottlerocket.BottleRocket;
import com.antwerkz.bottlerocket.clusters.MongoCluster;
import com.antwerkz.bottlerocket.clusters.ReplicaSet;
import com.github.zafarkhaja.semver.Version;
import com.mongodb.MongoClientSettings;
import com.mongodb.MongoClientSettings.Builder;
import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoCursor;
import com.mongodb.client.MongoDatabase;
import dev.morphia.mapping.MappedClass;
import dev.morphia.mapping.Mapper;
import dev.morphia.mapping.MapperOptions;
import dev.morphia.query.DefaultQueryFactory;
import org.apache.commons.io.FileUtils;
import org.bson.Document;
import org.junit.After;
import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.Random;
import static java.lang.String.format;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
@SuppressWarnings("WeakerAccess")
public abstract class TestBase {
protected static final String TEST_DB_NAME = "morphia_test";
private static final Logger LOG = LoggerFactory.getLogger(TestBase.class);
private static final MapperOptions mapperOptions = MapperOptions.DEFAULT;
private static MongoClient mongoClient;
private final MongoDatabase database;
private final Datastore ds;
protected TestBase() {
this.database = getMongoClient().getDatabase(TEST_DB_NAME);
this.ds = Morphia.createDatastore(getMongoClient(), database.getName());
ds.setQueryFactory(new DefaultQueryFactory());
}
static void startMongo() {
Builder builder = MongoClientSettings.builder();
try {
builder.uuidRepresentation(mapperOptions.getUuidRepresentation());
} catch (Exception ignored) {
// not a 4.0 driver
}
String mongodb = System.getenv("MONGODB");
File mongodbRoot = new File("target/mongo");
int port = new Random().nextInt(20000) + 30000;
try {
FileUtils.deleteDirectory(mongodbRoot);
} catch (IOException e) {
throw new RuntimeException(e.getMessage(), e);
}
Version version = mongodb != null ? Version.valueOf(mongodb) : BottleRocket.DEFAULT_VERSION;
final MongoCluster cluster = ReplicaSet.builder()
.version(mongodb != null ? Version.valueOf(mongodb) : BottleRocket.DEFAULT_VERSION)
.baseDir(mongodbRoot)
.port(port)
.size(version.lessThan(Version.forIntegers(4)) ? 3 : 1)
.build();
cluster.start();
mongoClient = cluster.getClient(builder);
}
public MongoDatabase getDatabase() {
return database;
}
public Datastore getDs() {
return ds;
}
public Mapper getMapper() {
return getDs().getMapper();
}
public MongoClient getMongoClient() {
if (mongoClient == null) {
startMongo();
}
return mongoClient;
}
public boolean isReplicaSet() {
return runIsMaster().get("setName") != null;
}
@Before
public void setUp() {
cleanup();
installSampleData();
}
@After
public void tearDown() {
cleanup();
}
protected void assertDocumentEquals(final Object expected, final Object actual) {
assertDocumentEquals("", expected, actual);
}
protected void checkMinServerVersion(final double version) {
assumeTrue(serverIsAtLeastVersion(version));
}
protected void cleanup() {
MongoDatabase db = getDatabase();
db.listCollectionNames().forEach(s -> {
if (!s.equals("zipcodes") && !s.startsWith("system.")) {
db.getCollection(s).drop();
}
});
}
protected int count(final MongoCursor<?> cursor) {
int count = 0;
while (cursor.hasNext()) {
cursor.next();
count++;
}
return count;
}
protected int count(final Iterator<?> iterator) {
int count = 0;
while (iterator.hasNext()) {
count++;
iterator.next();
}
return count;
}
protected MongoCollection<Document> getDocumentCollection(final Class<?> type) {
return getDatabase().getCollection(getMappedClass(type).getCollectionName());
}
protected List<Document> getIndexInfo(final Class<?> clazz) {
return getMapper().getCollection(clazz).listIndexes().into(new ArrayList<>());
}
protected MappedClass getMappedClass(final Class<?> aClass) {
Mapper mapper = getMapper();
mapper.map(aClass);
return mapper.getMappedClass(aClass);
}
protected double getServerVersion() {
String version = (String) getMongoClient()
.getDatabase("admin")
.runCommand(new Document("serverStatus", 1))
.get("version");
return Double.parseDouble(version.substring(0, 3));
}
/**
* @param version must be a major version, e.g. 1.8, 2,0, 2.2
* @return true if server is at least specified version
*/
protected boolean serverIsAtLeastVersion(final double version) {
return getServerVersion() >= version;
}
protected String toString(final Document document) {
return document.toJson(getMapper().getCodecRegistry().get(Document.class));
}
@SuppressWarnings({"rawtypes", "unchecked"})
private void assertDocumentEquals(final String path, final Object expected, final Object actual) {
assertSameNullity(path, expected, actual);
if (expected == null) {
return;
}
assertSameType(path, expected, actual);
if (expected instanceof Document) {
for (final Entry<String, Object> entry : ((Document) expected).entrySet()) {
final String key = entry.getKey();
Object expectedValue = entry.getValue();
Object actualValue = ((Document) actual).get(key);
assertDocumentEquals(path + "." + key, expectedValue, actualValue);
}
} else if (expected instanceof List) {
List list = (List) expected;
List copy = new ArrayList<>((List) actual);
Object o;
for (int i = 0; i < list.size(); i++) {
o = list.get(i);
boolean found = false;
final Iterator other = copy.iterator();
while (!found && other.hasNext()) {
try {
String newPath = format("%s[%d]", path, i);
assertDocumentEquals(newPath, o, other.next());
other.remove();
found = true;
} catch (AssertionError ignore) {
}
}
if (!found) {
fail(format("mismatch found at %s", path));
}
}
} else {
assertEquals(format("mismatch found at %s:%n%s", path, expected, actual), expected, actual);
}
}
private void assertSameNullity(final String path, final Object expected, final Object actual) {
if (expected == null && actual != null
|| actual == null && expected != null) {
assertEquals(format("mismatch found at %s:%n%s", path, expected, actual), expected, actual);
}
}
private void assertSameType(final String path, final Object expected, final Object actual) {
if (expected instanceof List && actual instanceof List) {
return;
}
if (!expected.getClass().equals(actual.getClass())) {
assertEquals(format("mismatch found at %s:%n%s", path, expected, actual), expected, actual);
}
}
private void download(final URL url, final File file) throws IOException {
LOG.info("Downloading zip data set to " + file);
try (InputStream inputStream = url.openStream(); FileOutputStream outputStream = new FileOutputStream(file)) {
byte[] read = new byte[49152];
int count;
while ((count = inputStream.read(read)) != -1) {
outputStream.write(read, 0, count);
}
}
}
private void installSampleData() {
File file = new File("zips.json");
try {
if (!file.exists()) {
file = new File("target/zips.json");
if (!file.exists()) {
download(new URL("https://media.mongodb.org/zips.json"), file);
}
}
MongoCollection<Document> zips = getDatabase().getCollection("zipcodes");
if (zips.countDocuments() == 0) {
LOG.info("Installing sample data");
MongoCollection<Document> zipcodes = getDatabase().getCollection("zipcodes");
Files.lines(file.toPath())
.forEach(l -> zipcodes.insertOne(Document.parse(l)));
}
} catch (Exception e) {
e.printStackTrace();
}
assumeTrue("Failed to process media files", file.exists());
}
private Document runIsMaster() {
return mongoClient.getDatabase("admin")
.runCommand(new Document("ismaster", 1));
}
}
|
[
"\"MONGODB\""
] |
[] |
[
"MONGODB"
] |
[]
|
["MONGODB"]
|
java
| 1 | 0 | |
addressdemo/addressdemo/wsgi.py
|
"""
WSGI config for addressdemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "addressdemo.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cb-spider/cloud-control-manager/cloud-driver/drivers/openstack/main/Test_Resources.go
|
package main
import (
"fmt"
cblog "github.com/cloud-barista/cb-log"
osdrv "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/drivers/openstack"
"github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/drivers/openstack/connect"
osrs "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/drivers/openstack/resources"
idrv "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/interfaces"
irs "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/interfaces/resources"
"github.com/davecgh/go-spew/spew"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
"io/ioutil"
"os"
)
var cblogger *logrus.Logger
func init() {
// cblog is a global variable.
cblogger = cblog.GetLogger("CB-SPIDER")
}
func testImageHandler(config Config) {
resourceHandler, err := getResourceHandler("image")
if err != nil {
panic(err)
}
imageHandler := resourceHandler.(irs.ImageHandler)
cblogger.Info("Test ImageHandler")
cblogger.Info("1. ListImage()")
cblogger.Info("2. GetImage()")
cblogger.Info("3. CreateImage()")
cblogger.Info("4. DeleteImage()")
cblogger.Info("5. Exit")
imageId := irs.IID{
SystemId: "c14a9728-eb03-4813-9e1a-8f57fe62b4fb", // Ubuntu 16.04
}
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListImage() ...")
if list, err := imageHandler.ListImage(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListImage()")
case 2:
cblogger.Info("Start GetImage() ...")
if imageInfo, err := imageHandler.GetImage(imageId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(imageInfo)
}
cblogger.Info("Finish GetImage()")
case 3:
cblogger.Info("Start CreateImage() ...")
reqInfo := irs.ImageReqInfo{
IId: irs.IID{
NameId: config.Openstack.Image.Name,
},
}
image, err := imageHandler.CreateImage(reqInfo)
if err != nil {
cblogger.Error(err)
}
imageId = image.IId
cblogger.Info("Finish CreateImage()")
case 4:
cblogger.Info("Start DeleteImage() ...")
if ok, err := imageHandler.DeleteImage(imageId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteImage()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
func testVPCHandler(config Config) {
resourceHandler, err := getResourceHandler("vpc")
if err != nil {
cblogger.Error(err)
}
vpcHandler := resourceHandler.(irs.VPCHandler)
cblogger.Info("Test VPCHandler")
cblogger.Info("1. ListVPC()")
cblogger.Info("2. GetVPC()")
cblogger.Info("3. CreateVPC()")
cblogger.Info("4. DeleteVPC()")
cblogger.Info("5. Exit")
vpcId := irs.IID{NameId: "CB-VNet2", SystemId: "fa517cc1-7d4a-4a6f-a0be-ff77761152b5"}
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVPC() ...")
if list, err := vpcHandler.ListVPC(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListVPC()")
case 2:
cblogger.Info("Start GetVPC() ...")
if vNetInfo, err := vpcHandler.GetVPC(vpcId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNetInfo)
}
cblogger.Info("Finish GetVPC()")
case 3:
cblogger.Info("Start CreateVPC() ...")
reqInfo := irs.VPCReqInfo{
IId: vpcId,
SubnetInfoList: []irs.SubnetInfo{
{
IId: irs.IID{
NameId: vpcId.NameId + "-subnet-1",
},
IPv4_CIDR: "180.0.10.0/24",
},
{
IId: irs.IID{
NameId: vpcId.NameId + "-subnet-2",
},
IPv4_CIDR: "180.0.20.0/24",
},
},
}
vpcInfo, err := vpcHandler.CreateVPC(reqInfo)
if err != nil {
cblogger.Error(err)
}
vpcId = vpcInfo.IId
spew.Dump(vpcInfo)
cblogger.Info("Finish CreateVPC()")
case 4:
cblogger.Info("Start DeleteVPC() ...")
if ok, err := vpcHandler.DeleteVPC(vpcId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteVPC()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
func testKeyPairHandler(config Config) {
resourceHandler, err := getResourceHandler("keypair")
if err != nil {
cblogger.Error(err)
}
keyPairHandler := resourceHandler.(irs.KeyPairHandler)
cblogger.Info("Test KeyPairHandler")
cblogger.Info("1. ListKey()")
cblogger.Info("2. GetKey()")
cblogger.Info("3. CreateKey()")
cblogger.Info("4. DeleteKey()")
cblogger.Info("5. Exit")
keypairIId := irs.IID{
NameId: "CB-Keypair",
}
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListKey() ...")
if keyPairList, err := keyPairHandler.ListKey(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(keyPairList)
}
cblogger.Info("Finish ListKey()")
case 2:
cblogger.Info("Start GetKey() ...")
if keyPairInfo, err := keyPairHandler.GetKey(keypairIId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(keyPairInfo)
}
cblogger.Info("Finish GetKey()")
case 3:
cblogger.Info("Start CreateKey() ...")
reqInfo := irs.KeyPairReqInfo{
IId: keypairIId,
}
if keyInfo, err := keyPairHandler.CreateKey(reqInfo); err != nil {
cblogger.Error(err)
} else {
keypairIId = keyInfo.IId
spew.Dump(keyInfo)
}
cblogger.Info("Finish CreateKey()")
case 4:
cblogger.Info("Start DeleteKey() ...")
if ok, err := keyPairHandler.DeleteKey(keypairIId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteKey()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
/*func testPublicIPHanlder(config Config) {
resourceHandler, err := getResourceHandler("publicip")
if err != nil {
cblogger.Error(err)
}
publicIPHandler := resourceHandler.(irs.PublicIPHandler)
cblogger.Info("Test PublicIPHandler")
cblogger.Info("1. ListPublicIP()")
cblogger.Info("2. GetPublicIP()")
cblogger.Info("3. CreatePublicIP()")
cblogger.Info("4. DeletePublicIP()")
cblogger.Info("5. Exit")
var publicIPId string
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListPublicIP() ...")
if publicList, err := publicIPHandler.ListPublicIP(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(publicList)
}
cblogger.Info("Finish ListPublicIP()")
case 2:
cblogger.Info("Start GetPublicIP() ...")
if publicInfo, err := publicIPHandler.GetPublicIP(publicIPId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(publicInfo)
}
cblogger.Info("Finish GetPublicIP()")
case 3:
cblogger.Info("Start CreatePublicIP() ...")
reqInfo := irs.PublicIPReqInfo{}
if publicIP, err := publicIPHandler.CreatePublicIP(reqInfo); err != nil {
cblogger.Error(err)
} else {
publicIPId = publicIP.Name
spew.Dump(publicIP)
}
cblogger.Info("Finish CreatePublicIP()")
case 4:
cblogger.Info("Start DeletePublicIP() ...")
if ok, err := publicIPHandler.DeletePublicIP(publicIPId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeletePublicIP()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}*/
func testSecurityHandler(config Config) {
resourceHandler, err := getResourceHandler("security")
if err != nil {
cblogger.Error(err)
}
securityHandler := resourceHandler.(irs.SecurityHandler)
cblogger.Info("Test SecurityHandler")
cblogger.Info("1. ListSecurity()")
cblogger.Info("2. GetSecurity()")
cblogger.Info("3. CreateSecurity()")
cblogger.Info("4. DeleteSecurity()")
cblogger.Info("5. Exit")
securityGroupIId := irs.IID{
NameId: "CB-SecGroup",
}
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListSecurity() ...")
if securityList, err := securityHandler.ListSecurity(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(securityList)
}
cblogger.Info("Finish ListSecurity()")
case 2:
cblogger.Info("Start GetSecurity() ...")
if secInfo, err := securityHandler.GetSecurity(securityGroupIId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(secInfo)
}
cblogger.Info("Finish GetSecurity()")
case 3:
cblogger.Info("Start CreateSecurity() ...")
reqInfo := irs.SecurityReqInfo{
IId: irs.IID{
NameId: securityGroupIId.NameId,
},
SecurityRules: &[]irs.SecurityRuleInfo{
{
FromPort: "22",
ToPort: "22",
IPProtocol: "TCP",
Direction: "inbound",
},
{
FromPort: "3306",
ToPort: "3306",
IPProtocol: "TCP",
Direction: "outbound",
},
{
IPProtocol: "ICMP",
Direction: "outbound",
},
},
}
if securityInfo, err := securityHandler.CreateSecurity(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(securityInfo)
securityGroupIId = securityInfo.IId
}
cblogger.Info("Finish CreateSecurity()")
case 4:
cblogger.Info("Start DeleteSecurity() ...")
if ok, err := securityHandler.DeleteSecurity(securityGroupIId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteSecurity()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
/*func testVNetworkHandler(config Config) {
resourceHandler, err := getResourceHandler("vnetwork")
if err != nil {
cblogger.Error(err)
}
vNetworkHandler := resourceHandler.(irs.VNetworkHandler)
cblogger.Info("Test VNetworkHandler")
cblogger.Info("1. ListVNetwork()")
cblogger.Info("2. GetVNetwork()")
cblogger.Info("3. CreateVNetwork()")
cblogger.Info("4. DeleteVNetwork()")
cblogger.Info("5. Exit")
vNetWorkName := "CB-VNet"
var vNetworkId string
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVNetwork() ...")
if list, err := vNetworkHandler.ListVNetwork(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListVNetwork()")
case 2:
cblogger.Info("Start GetVNetwork() ...")
if vNetInfo, err := vNetworkHandler.GetVNetwork(vNetworkId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNetInfo)
}
cblogger.Info("Finish GetVNetwork()")
case 3:
cblogger.Info("Start CreateVNetwork() ...")
reqInfo := irs.VNetworkReqInfo{
Name: vNetWorkName,
}
if vNetworkInfo, err := vNetworkHandler.CreateVNetwork(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNetworkInfo)
vNetworkId = vNetworkInfo.Id
}
cblogger.Info("Finish CreateVNetwork()")
case 4:
cblogger.Info("Start DeleteVNetwork() ...")
if ok, err := vNetworkHandler.DeleteVNetwork(vNetworkId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteVNetwork()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}*/
/*func testVNicHandler(config Config) {
resourceHandler, err := getResourceHandler("vnic")
if err != nil {
cblogger.Error(err)
}
vNicHandler := resourceHandler.(irs.VNicHandler)
cblogger.Info("Test VNicHandler")
cblogger.Info("1. ListVNic()")
cblogger.Info("2. GetVNic()")
cblogger.Info("3. CreateVNic()")
cblogger.Info("4. DeleteVNic()")
cblogger.Info("5. Exit")
vNicName := "CB-VNic"
var vNicId string
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVNic() ...")
if List, err := vNicHandler.ListVNic(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(List)
}
cblogger.Info("Finish ListVNic()")
case 2:
cblogger.Info("Start GetVNic() ...")
if vNicInfo, err := vNicHandler.GetVNic(vNicId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNicInfo)
}
cblogger.Info("Finish GetVNic()")
case 3:
cblogger.Info("Start CreateVNic() ...")
//todo : port로 맵핑
reqInfo := irs.VNicReqInfo{
Name: vNicName,
VNetId: "fe284dbf-e9f4-4add-a03f-9249cc30a2ac",
SecurityGroupIds: []string{"34585b5e-5ea8-49b5-b38b-0d395689c994", "6d4085c1-e915-487d-9e83-7a5b64f27237"},
//SubnetId: "fe284dbf-e9f4-4add-a03f-9249cc30a2ac",
}
if vNicInfo, err := vNicHandler.CreateVNic(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNicInfo)
vNicId = vNicInfo.Id
}
cblogger.Info("Finish CreateVNic()")
case 4:
cblogger.Info("Start DeleteVNic() ...")
if ok, err := vNicHandler.DeleteVNic(vNicId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteVNic()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}*/
func testRouterHandler(config Config) {
resourceHandler, err := getResourceHandler("router")
if err != nil {
cblogger.Error(err)
}
routerHandler := resourceHandler.(osrs.OpenStackRouterHandler)
cblogger.Info("Test RouterHandler")
cblogger.Info("1. ListRouter()")
cblogger.Info("2. GetRouter()")
cblogger.Info("3. CreateRouter()")
cblogger.Info("4. DeleteRouter()")
cblogger.Info("5. AddInterface()")
cblogger.Info("6. DeleteInterface()")
cblogger.Info("7. Exit")
var routerId string
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListRouter() ...")
routerHandler.ListRouter()
cblogger.Info("Finish ListRouter()")
case 2:
cblogger.Info("Start GetRouter() ...")
routerHandler.GetRouter(routerId)
cblogger.Info("Finish GetRouter()")
case 3:
cblogger.Info("Start CreateRouter() ...")
reqInfo := osrs.RouterReqInfo{
Name: config.Openstack.Router.Name,
GateWayId: config.Openstack.Router.GateWayId,
AdminStateUp: config.Openstack.Router.AdminStateUp,
}
router, err := routerHandler.CreateRouter(reqInfo)
if err != nil {
cblogger.Error(err)
}
routerId = router.Id
cblogger.Info("Finish CreateRouter()")
case 4:
cblogger.Info("Start DeleteRouter() ...")
routerHandler.DeleteRouter(routerId)
cblogger.Info("Finish DeleteRouter()")
case 5:
cblogger.Info("Start AddInterface() ...")
reqInfo := osrs.InterfaceReqInfo{
SubnetId: config.Openstack.Subnet.Id,
RouterId: routerId,
}
_, err := routerHandler.AddInterface(reqInfo)
if err != nil {
cblogger.Error(err)
}
cblogger.Info("Finish AddInterface()")
case 6:
cblogger.Info("Start DeleteInterface() ...")
_, err := routerHandler.DeleteInterface(routerId, config.Openstack.Subnet.Id)
if err != nil {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteInterface()")
case 7:
cblogger.Info("Exit")
break Loop
}
}
}
}
func testVMSpecHandler(config Config) {
resourceHandler, err := getResourceHandler("vmspec")
if err != nil {
panic(err)
}
vmSpecHandler := resourceHandler.(irs.VMSpecHandler)
cblogger.Info("Test VMSpecHandler")
cblogger.Info("1. ListVMSpec()")
cblogger.Info("2. GetVMSpec()")
cblogger.Info("3. ListOrgVMSpec()")
cblogger.Info("4. GetOrgVMSpec()")
cblogger.Info("5. Exit")
var vmSpecId string
vmSpecId = "babo"
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
region := config.Openstack.Region
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVMSpec() ...")
if list, err := vmSpecHandler.ListVMSpec(region); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListVMSpec()")
case 2:
cblogger.Info("Start GetVMSpec() ...")
if vmSpecInfo, err := vmSpecHandler.GetVMSpec(region, vmSpecId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vmSpecInfo)
}
cblogger.Info("Finish GetVMSpec()")
case 3:
cblogger.Info("Start ListOrgVMSpec() ...")
if listStr, err := vmSpecHandler.ListOrgVMSpec(region); err != nil {
cblogger.Error(err)
} else {
fmt.Println(listStr)
}
cblogger.Info("Finish ListOrgVMSpec()")
case 4:
cblogger.Info("Start GetOrgVMSpec() ...")
if vmSpecStr, err := vmSpecHandler.GetOrgVMSpec(region, vmSpecId); err != nil {
cblogger.Error(err)
} else {
fmt.Println(vmSpecStr)
}
cblogger.Info("Finish GetOrgVMSpec()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
func getResourceHandler(resourceType string) (interface{}, error) {
var cloudDriver idrv.CloudDriver
cloudDriver = new(osdrv.OpenStackDriver)
config := readConfigFile()
connectionInfo := idrv.ConnectionInfo{
CredentialInfo: idrv.CredentialInfo{
IdentityEndpoint: config.Openstack.IdentityEndpoint,
Username: config.Openstack.Username,
Password: config.Openstack.Password,
DomainName: config.Openstack.DomainName,
ProjectID: config.Openstack.ProjectID,
},
RegionInfo: idrv.RegionInfo{
Region: config.Openstack.Region,
},
}
cloudConnection, _ := cloudDriver.ConnectCloud(connectionInfo)
var resourceHandler interface{}
var err error
switch resourceType {
case "image":
resourceHandler, err = cloudConnection.CreateImageHandler()
case "keypair":
resourceHandler, err = cloudConnection.CreateKeyPairHandler()
//case "publicip":
// resourceHandler, err = cloudConnection.CreatePublicIPHandler()
case "security":
resourceHandler, err = cloudConnection.CreateSecurityHandler()
//case "vnetwork":
// resourceHandler, err = cloudConnection.CreateVNetworkHandler()
case "vpc":
resourceHandler, err = cloudConnection.CreateVPCHandler()
//case "vnic":
// resourceHandler, err = cloudConnection.CreateVNicHandler()
case "router":
osDriver := osdrv.OpenStackDriver{}
cloudConn, err := osDriver.ConnectCloud(connectionInfo)
if err != nil {
cblogger.Error(err)
}
osCloudConn := cloudConn.(*connect.OpenStackCloudConnection)
resourceHandler = osrs.OpenStackRouterHandler{Client: osCloudConn.NetworkClient}
case "vmspec":
resourceHandler, err = cloudConnection.CreateVMSpecHandler()
}
if err != nil {
return nil, err
}
return resourceHandler, nil
}
func showTestHandlerInfo() {
cblogger.Info("==========================================================")
cblogger.Info("[Test ResourceHandler]")
cblogger.Info("1. ImageHandler")
cblogger.Info("2. KeyPairHandler")
//cblogger.Info("3. PublicIPHandler")
cblogger.Info("4. SecurityHandler")
cblogger.Info("5. VPCHandler")
//cblogger.Info("6. VNicHandler")
cblogger.Info("7. RouterHandler")
cblogger.Info("8. VMSpecHandler")
cblogger.Info("9. Exit")
cblogger.Info("==========================================================")
}
func main() {
showTestHandlerInfo() // ResourceHandler 테스트 정보 출력
config := readConfigFile() // config.yaml 파일 로드
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
testImageHandler(config)
showTestHandlerInfo()
case 2:
testKeyPairHandler(config)
showTestHandlerInfo()
case 3:
//testPublicIPHanlder(config)
//showTestHandlerInfo()
case 4:
testSecurityHandler(config)
showTestHandlerInfo()
case 5:
//testVNetworkHandler(config)
testVPCHandler(config)
showTestHandlerInfo()
case 6:
//testVNicHandler(config)
//showTestHandlerInfo()
case 7:
testRouterHandler(config)
showTestHandlerInfo()
case 8:
testVMSpecHandler(config)
showTestHandlerInfo()
case 9:
cblogger.Info("Exit Test ResourceHandler Program")
break Loop
}
}
}
}
type Config struct {
Openstack struct {
DomainName string `yaml:"domain_name"`
IdentityEndpoint string `yaml:"identity_endpoint"`
Password string `yaml:"password"`
ProjectID string `yaml:"project_id"`
Username string `yaml:"username"`
Region string `yaml:"region"`
VMName string `yaml:"vm_name"`
ImageId string `yaml:"image_id"`
FlavorId string `yaml:"flavor_id"`
NetworkId string `yaml:"network_id"`
SecurityGroups string `yaml:"security_groups"`
KeypairName string `yaml:"keypair_name"`
ServerId string `yaml:"server_id"`
Image struct {
Name string `yaml:"name"`
} `yaml:"image_info"`
KeyPair struct {
Name string `yaml:"name"`
} `yaml:"keypair_info"`
PublicIP struct {
Name string `yaml:"name"`
} `yaml:"public_info"`
SecurityGroup struct {
Name string `yaml:"name"`
} `yaml:"security_group_info"`
VirtualNetwork struct {
Name string `yaml:"name"`
} `yaml:"vnet_info"`
Subnet struct {
Id string `yaml:"id"`
} `yaml:"subnet_info"`
Router struct {
Name string `yaml:"name"`
GateWayId string `yaml:"gateway_id"`
AdminStateUp bool `yaml:"adminstatup"`
} `yaml:"router_info"`
} `yaml:"openstack"`
}
func readConfigFile() Config {
// Set Environment Value of Project Root Path
rootPath := os.Getenv("CBSPIDER_PATH")
data, err := ioutil.ReadFile(rootPath + "/conf/config.yaml")
if err != nil {
cblogger.Error(err)
}
var config Config
err = yaml.Unmarshal(data, &config)
if err != nil {
cblogger.Error(err)
}
return config
}
|
[
"\"CBSPIDER_PATH\""
] |
[] |
[
"CBSPIDER_PATH"
] |
[]
|
["CBSPIDER_PATH"]
|
go
| 1 | 0 | |
pkg/client/informers/externalversions/factory.go
|
/*
Copyright 2020 The Volcano Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
versioned "volcano.sh/volcano/pkg/client/clientset/versioned"
batch "volcano.sh/volcano/pkg/client/informers/externalversions/batch"
bus "volcano.sh/volcano/pkg/client/informers/externalversions/bus"
internalinterfaces "volcano.sh/volcano/pkg/client/informers/externalversions/internalinterfaces"
scheduling "volcano.sh/volcano/pkg/client/informers/externalversions/scheduling"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Batch() batch.Interface
Bus() bus.Interface
Scheduling() scheduling.Interface
}
func (f *sharedInformerFactory) Batch() batch.Interface {
return batch.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Bus() bus.Interface {
return bus.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Scheduling() scheduling.Interface {
return scheduling.New(f, f.namespace, f.tweakListOptions)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
demo/ding_calendar/ding_calendar.go
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
dd "github.com/hugozhu/godingtalk"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
calendar "google.golang.org/api/calendar/v3"
)
var c *dd.DingTalkClient
var calendarId string
var staffId string
var timezone string
func init() {
c = dd.NewDingTalkClient(os.Getenv("corpid"), os.Getenv("corpsecret"))
calendarId = os.Getenv("calendar_id")
staffId = os.Getenv("staff_id")
timezone = os.Getenv("timezone")
if timezone == "" {
timezone = "Asia/Shanghai"
}
err := c.RefreshAccessToken()
if err != nil {
panic(err)
}
}
// Retrieve a token, saves the token, then returns the generated client.
func getClient(config *oauth2.Config) *http.Client {
// The file token.json stores the user's access and refresh tokens, and is
// created automatically when the authorization flow completes for the first
// time.
tokFile := "token.json"
tok, err := tokenFromFile(tokFile)
if err != nil {
tok = getTokenFromWeb(config)
saveToken(tokFile, tok)
}
return config.Client(context.Background(), tok)
}
// Request a token from the web, then returns the retrieved token.
func getTokenFromWeb(config *oauth2.Config) *oauth2.Token {
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline)
fmt.Printf("Go to the following link in your browser then type the "+
"authorization code: \n%v\n", authURL)
var authCode string
if _, err := fmt.Scan(&authCode); err != nil {
log.Fatalf("Unable to read authorization code: %v", err)
}
tok, err := config.Exchange(context.TODO(), authCode)
if err != nil {
log.Fatalf("Unable to retrieve token from web: %v", err)
}
return tok
}
// Retrieves a token from a local file.
func tokenFromFile(file string) (*oauth2.Token, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
tok := &oauth2.Token{}
err = json.NewDecoder(f).Decode(tok)
return tok, err
}
// Saves a token to a file path.
func saveToken(path string, token *oauth2.Token) {
fmt.Printf("Saving credential file to: %s\n", path)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Fatalf("Unable to cache oauth token: %v", err)
}
defer f.Close()
json.NewEncoder(f).Encode(token)
}
func eventsFromFile(file string) (map[string]dd.Event, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var cache map[string]dd.Event
err = json.NewDecoder(f).Decode(&cache)
return cache, err
}
func saveEvents(path string, cache map[string]dd.Event) {
fmt.Printf("Saving events map to: %s\n", path)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Fatalf("Unable to cache oauth token: %v", err)
}
defer f.Close()
json.NewEncoder(f).Encode(cache)
}
func main() {
b, err := ioutil.ReadFile("credentials.json")
if err != nil {
log.Fatalf("Unable to read client secret file: %v", err)
}
// If modifying these scopes, delete your previously saved token.json.
config, err := google.ConfigFromJSON(b, calendar.CalendarScope)
if err != nil {
log.Fatalf("Unable to parse client secret file to config: %v", err)
}
client := getClient(config)
srv, err := calendar.New(client)
if err != nil {
log.Fatalf("Unable to retrieve Calendar client: %v", err)
}
from := time.Now()
to := time.Now().AddDate(0, 0, 1)
log.Println(from.Format("2006-01-02") + " " + to.Format("2006-01-02"))
events, _ := c.ListEvents(staffId, from, to)
cache, _ := eventsFromFile("events.json")
if cache == nil {
cache = make(map[string]dd.Event)
}
for _, event := range events {
log.Println(event.Summary)
if _, exist := cache[event.Id]; !exist {
googleEvent := &calendar.Event{
Summary: event.Summary,
Location: event.Location,
Description: event.Description,
Start: &calendar.EventDateTime{
DateTime: event.Start.DateTime,
TimeZone: timezone,
},
End: &calendar.EventDateTime{
DateTime: event.End.DateTime,
TimeZone: timezone,
},
}
cache[event.Id] = event
// log.Println(srv, googleEvent)
googleEvent, err = srv.Events.Insert(calendarId, googleEvent).Do()
if err != nil {
log.Fatalf("Unable to create event. %v\n", err)
}
fmt.Printf("Event created: %s\n", googleEvent.HtmlLink)
}
}
saveEvents("events.json", cache)
}
|
[
"\"corpid\"",
"\"corpsecret\"",
"\"calendar_id\"",
"\"staff_id\"",
"\"timezone\""
] |
[] |
[
"staff_id",
"corpid",
"corpsecret",
"timezone",
"calendar_id"
] |
[]
|
["staff_id", "corpid", "corpsecret", "timezone", "calendar_id"]
|
go
| 5 | 0 | |
subnet/kube/kube.go
|
// Copyright 2016 flannel authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"time"
"github.com/flannel-io/flannel/pkg/ip"
"github.com/flannel-io/flannel/subnet"
"golang.org/x/net/context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
log "k8s.io/klog"
)
var (
ErrUnimplemented = errors.New("unimplemented")
)
const (
resyncPeriod = 5 * time.Minute
nodeControllerSyncTimeout = 10 * time.Minute
)
type kubeSubnetManager struct {
enableIPv4 bool
enableIPv6 bool
annotations annotations
client clientset.Interface
nodeName string
nodeStore listers.NodeLister
nodeController cache.Controller
subnetConf *subnet.Config
events chan subnet.Event
setNodeNetworkUnavailable bool
}
func NewSubnetManager(ctx context.Context, apiUrl, kubeconfig, prefix, netConfPath string, setNodeNetworkUnavailable bool) (subnet.Manager, error) {
var cfg *rest.Config
var err error
// Try to build kubernetes config from a master url or a kubeconfig filepath. If neither masterUrl
// or kubeconfigPath are passed in we fall back to inClusterConfig. If inClusterConfig fails,
// we fallback to the default config.
cfg, err = clientcmd.BuildConfigFromFlags(apiUrl, kubeconfig)
if err != nil {
return nil, fmt.Errorf("fail to create kubernetes config: %v", err)
}
c, err := clientset.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("unable to initialize client: %v", err)
}
// The kube subnet mgr needs to know the k8s node name that it's running on so it can annotate it.
// If we're running as a pod then the POD_NAME and POD_NAMESPACE will be populated and can be used to find the node
// name. Otherwise, the environment variable NODE_NAME can be passed in.
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
podName := os.Getenv("POD_NAME")
podNamespace := os.Getenv("POD_NAMESPACE")
if podName == "" || podNamespace == "" {
return nil, fmt.Errorf("env variables POD_NAME and POD_NAMESPACE must be set")
}
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error retrieving pod spec for '%s/%s': %v", podNamespace, podName, err)
}
nodeName = pod.Spec.NodeName
if nodeName == "" {
return nil, fmt.Errorf("node name not present in pod spec '%s/%s'", podNamespace, podName)
}
}
netConf, err := ioutil.ReadFile(netConfPath)
if err != nil {
return nil, fmt.Errorf("failed to read net conf: %v", err)
}
sc, err := subnet.ParseConfig(string(netConf))
if err != nil {
return nil, fmt.Errorf("error parsing subnet config: %s", err)
}
sm, err := newKubeSubnetManager(ctx, c, sc, nodeName, prefix)
if err != nil {
return nil, fmt.Errorf("error creating network manager: %s", err)
}
sm.setNodeNetworkUnavailable = setNodeNetworkUnavailable
go sm.Run(context.Background())
log.Infof("Waiting %s for node controller to sync", nodeControllerSyncTimeout)
err = wait.Poll(time.Second, nodeControllerSyncTimeout, func() (bool, error) {
return sm.nodeController.HasSynced(), nil
})
if err != nil {
return nil, fmt.Errorf("error waiting for nodeController to sync state: %v", err)
}
log.Infof("Node controller sync successful")
return sm, nil
}
func newKubeSubnetManager(ctx context.Context, c clientset.Interface, sc *subnet.Config, nodeName, prefix string) (*kubeSubnetManager, error) {
var err error
var ksm kubeSubnetManager
ksm.annotations, err = newAnnotations(prefix)
if err != nil {
return nil, err
}
ksm.enableIPv4 = sc.EnableIPv4
ksm.enableIPv6 = sc.EnableIPv6
ksm.client = c
ksm.nodeName = nodeName
ksm.subnetConf = sc
ksm.events = make(chan subnet.Event, 5000)
indexer, controller := cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return ksm.client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return ksm.client.CoreV1().Nodes().Watch(ctx, options)
},
},
&v1.Node{},
resyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ksm.handleAddLeaseEvent(subnet.EventAdded, obj)
},
UpdateFunc: ksm.handleUpdateLeaseEvent,
DeleteFunc: func(obj interface{}) {
_, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *api.Node here and we need to handle that correctly.
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Infof("Error received unexpected object: %v", obj)
return
}
node, ok := deletedState.Obj.(*v1.Node)
if !ok {
log.Infof("Error deletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
obj = node
}
ksm.handleAddLeaseEvent(subnet.EventRemoved, obj)
},
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
ksm.nodeController = controller
ksm.nodeStore = listers.NewNodeLister(indexer)
return &ksm, nil
}
func (ksm *kubeSubnetManager) handleAddLeaseEvent(et subnet.EventType, obj interface{}) {
n := obj.(*v1.Node)
if s, ok := n.Annotations[ksm.annotations.SubnetKubeManaged]; !ok || s != "true" {
return
}
l, err := ksm.nodeToLease(*n)
if err != nil {
log.Infof("Error turning node %q to lease: %v", n.ObjectMeta.Name, err)
return
}
ksm.events <- subnet.Event{Type: et, Lease: l}
}
func (ksm *kubeSubnetManager) handleUpdateLeaseEvent(oldObj, newObj interface{}) {
o := oldObj.(*v1.Node)
n := newObj.(*v1.Node)
if s, ok := n.Annotations[ksm.annotations.SubnetKubeManaged]; !ok || s != "true" {
return
}
var changed = true
if ksm.enableIPv4 && o.Annotations[ksm.annotations.BackendData] == n.Annotations[ksm.annotations.BackendData] &&
o.Annotations[ksm.annotations.BackendType] == n.Annotations[ksm.annotations.BackendType] &&
o.Annotations[ksm.annotations.BackendPublicIP] == n.Annotations[ksm.annotations.BackendPublicIP] {
changed = false
}
if ksm.enableIPv6 && o.Annotations[ksm.annotations.BackendV6Data] == n.Annotations[ksm.annotations.BackendV6Data] &&
o.Annotations[ksm.annotations.BackendType] == n.Annotations[ksm.annotations.BackendType] &&
o.Annotations[ksm.annotations.BackendPublicIPv6] == n.Annotations[ksm.annotations.BackendPublicIPv6] {
changed = false
}
if !changed {
return // No change to lease
}
l, err := ksm.nodeToLease(*n)
if err != nil {
log.Infof("Error turning node %q to lease: %v", n.ObjectMeta.Name, err)
return
}
ksm.events <- subnet.Event{Type: subnet.EventAdded, Lease: l}
}
func (ksm *kubeSubnetManager) GetNetworkConfig(ctx context.Context) (*subnet.Config, error) {
return ksm.subnetConf, nil
}
func (ksm *kubeSubnetManager) AcquireLease(ctx context.Context, attrs *subnet.LeaseAttrs) (*subnet.Lease, error) {
cachedNode, err := ksm.nodeStore.Get(ksm.nodeName)
if err != nil {
return nil, err
}
n := cachedNode.DeepCopy()
if n.Spec.PodCIDR == "" {
return nil, fmt.Errorf("node %q pod cidr not assigned", ksm.nodeName)
}
var bd, v6Bd []byte
bd, err = attrs.BackendData.MarshalJSON()
if err != nil {
return nil, err
}
v6Bd, err = attrs.BackendV6Data.MarshalJSON()
if err != nil {
return nil, err
}
var cidr, ipv6Cidr *net.IPNet
_, cidr, err = net.ParseCIDR(n.Spec.PodCIDR)
if err != nil {
return nil, err
}
for _, podCidr := range n.Spec.PodCIDRs {
_, parseCidr, err := net.ParseCIDR(podCidr)
if err != nil {
return nil, err
}
if len(parseCidr.IP) == net.IPv6len {
ipv6Cidr = parseCidr
break
}
}
if (n.Annotations[ksm.annotations.BackendData] != string(bd) ||
n.Annotations[ksm.annotations.BackendType] != attrs.BackendType ||
n.Annotations[ksm.annotations.BackendPublicIP] != attrs.PublicIP.String() ||
n.Annotations[ksm.annotations.SubnetKubeManaged] != "true" ||
(n.Annotations[ksm.annotations.BackendPublicIPOverwrite] != "" && n.Annotations[ksm.annotations.BackendPublicIPOverwrite] != attrs.PublicIP.String())) ||
(attrs.PublicIPv6 != nil &&
(n.Annotations[ksm.annotations.BackendV6Data] != string(v6Bd) ||
n.Annotations[ksm.annotations.BackendType] != attrs.BackendType ||
n.Annotations[ksm.annotations.BackendPublicIPv6] != attrs.PublicIPv6.String() ||
n.Annotations[ksm.annotations.SubnetKubeManaged] != "true" ||
(n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] != "" && n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] != attrs.PublicIPv6.String()))) {
n.Annotations[ksm.annotations.BackendType] = attrs.BackendType
//TODO -i only vxlan and host-gw backends support dual stack now.
if (attrs.BackendType == "vxlan" && string(bd) != "null") || (attrs.BackendType == "wireguard" && string(bd) != "null") || attrs.BackendType != "vxlan" {
n.Annotations[ksm.annotations.BackendData] = string(bd)
if n.Annotations[ksm.annotations.BackendPublicIPOverwrite] != "" {
if n.Annotations[ksm.annotations.BackendPublicIP] != n.Annotations[ksm.annotations.BackendPublicIPOverwrite] {
log.Infof("Overriding public ip with '%s' from node annotation '%s'",
n.Annotations[ksm.annotations.BackendPublicIPOverwrite],
ksm.annotations.BackendPublicIPOverwrite)
n.Annotations[ksm.annotations.BackendPublicIP] = n.Annotations[ksm.annotations.BackendPublicIPOverwrite]
}
} else {
n.Annotations[ksm.annotations.BackendPublicIP] = attrs.PublicIP.String()
}
}
if (attrs.BackendType == "vxlan" && string(v6Bd) != "null") || (attrs.BackendType == "wireguard" && string(v6Bd) != "null" && attrs.PublicIPv6 != nil) || (attrs.BackendType == "host-gw" && attrs.PublicIPv6 != nil) {
n.Annotations[ksm.annotations.BackendV6Data] = string(v6Bd)
if n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] != "" {
if n.Annotations[ksm.annotations.BackendPublicIPv6] != n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite] {
log.Infof("Overriding public ipv6 with '%s' from node annotation '%s'",
n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite],
ksm.annotations.BackendPublicIPv6Overwrite)
n.Annotations[ksm.annotations.BackendPublicIPv6] = n.Annotations[ksm.annotations.BackendPublicIPv6Overwrite]
}
} else {
n.Annotations[ksm.annotations.BackendPublicIPv6] = attrs.PublicIPv6.String()
}
}
n.Annotations[ksm.annotations.SubnetKubeManaged] = "true"
oldData, err := json.Marshal(cachedNode)
if err != nil {
return nil, err
}
newData, err := json.Marshal(n)
if err != nil {
return nil, err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return nil, fmt.Errorf("failed to create patch for node %q: %v", ksm.nodeName, err)
}
_, err = ksm.client.CoreV1().Nodes().Patch(ctx, ksm.nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
return nil, err
}
}
if ksm.setNodeNetworkUnavailable {
log.Infoln("Setting NodeNetworkUnavailable")
err = ksm.setNodeNetworkUnavailableFalse(ctx)
if err != nil {
log.Errorf("Unable to set NodeNetworkUnavailable to False for %q: %v", ksm.nodeName, err)
}
} else {
log.Infoln("Skip setting NodeNetworkUnavailable")
}
lease := &subnet.Lease{
Attrs: *attrs,
Expiration: time.Now().Add(24 * time.Hour),
}
if cidr != nil && ksm.enableIPv4 {
lease.Subnet = ip.FromIPNet(cidr)
}
if ipv6Cidr != nil {
lease.IPv6Subnet = ip.FromIP6Net(ipv6Cidr)
}
//TODO - only vxlan, host-gw and wireguard backends support dual stack now.
if attrs.BackendType != "vxlan" && attrs.BackendType != "host-gw" && attrs.BackendType != "wireguard" {
lease.EnableIPv4 = true
lease.EnableIPv6 = false
}
return lease, nil
}
func (ksm *kubeSubnetManager) WatchLeases(ctx context.Context, cursor interface{}) (subnet.LeaseWatchResult, error) {
select {
case event := <-ksm.events:
return subnet.LeaseWatchResult{
Events: []subnet.Event{event},
}, nil
case <-ctx.Done():
return subnet.LeaseWatchResult{}, context.Canceled
}
}
func (ksm *kubeSubnetManager) Run(ctx context.Context) {
log.Infof("Starting kube subnet manager")
ksm.nodeController.Run(ctx.Done())
}
func (ksm *kubeSubnetManager) nodeToLease(n v1.Node) (l subnet.Lease, err error) {
if ksm.enableIPv4 {
l.Attrs.PublicIP, err = ip.ParseIP4(n.Annotations[ksm.annotations.BackendPublicIP])
if err != nil {
return l, err
}
l.Attrs.BackendData = json.RawMessage(n.Annotations[ksm.annotations.BackendData])
_, cidr, err := net.ParseCIDR(n.Spec.PodCIDR)
if err != nil {
return l, err
}
l.Subnet = ip.FromIPNet(cidr)
l.EnableIPv4 = ksm.enableIPv4
}
if ksm.enableIPv6 {
l.Attrs.PublicIPv6, err = ip.ParseIP6(n.Annotations[ksm.annotations.BackendPublicIPv6])
if err != nil {
return l, err
}
l.Attrs.BackendV6Data = json.RawMessage(n.Annotations[ksm.annotations.BackendV6Data])
ipv6Cidr := new(net.IPNet)
log.Infof("Creating the node lease for IPv6. This is the n.Spec.PodCIDRs: %v", n.Spec.PodCIDRs)
for _, podCidr := range n.Spec.PodCIDRs {
_, parseCidr, err := net.ParseCIDR(podCidr)
if err != nil {
return l, err
}
if len(parseCidr.IP) == net.IPv6len {
ipv6Cidr = parseCidr
break
}
}
l.IPv6Subnet = ip.FromIP6Net(ipv6Cidr)
l.EnableIPv6 = ksm.enableIPv6
}
l.Attrs.BackendType = n.Annotations[ksm.annotations.BackendType]
return l, nil
}
// RenewLease: unimplemented
func (ksm *kubeSubnetManager) RenewLease(ctx context.Context, lease *subnet.Lease) error {
return ErrUnimplemented
}
func (ksm *kubeSubnetManager) WatchLease(ctx context.Context, sn ip.IP4Net, sn6 ip.IP6Net, cursor interface{}) (subnet.LeaseWatchResult, error) {
return subnet.LeaseWatchResult{}, ErrUnimplemented
}
func (ksm *kubeSubnetManager) Name() string {
return fmt.Sprintf("Kubernetes Subnet Manager - %s", ksm.nodeName)
}
// Set Kubernetes NodeNetworkUnavailable to false when starting
// https://kubernetes.io/docs/concepts/architecture/nodes/#condition
func (ksm *kubeSubnetManager) setNodeNetworkUnavailableFalse(ctx context.Context) error {
condition := v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse,
Reason: "FlannelIsUp",
Message: "Flannel is running on this node",
LastTransitionTime: metav1.Now(),
LastHeartbeatTime: metav1.Now(),
}
raw, err := json.Marshal(&[]v1.NodeCondition{condition})
if err != nil {
return err
}
patch := []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw))
_, err = ksm.client.CoreV1().Nodes().PatchStatus(ctx, ksm.nodeName, patch)
return err
}
|
[
"\"NODE_NAME\"",
"\"POD_NAME\"",
"\"POD_NAMESPACE\""
] |
[] |
[
"POD_NAMESPACE",
"NODE_NAME",
"POD_NAME"
] |
[]
|
["POD_NAMESPACE", "NODE_NAME", "POD_NAME"]
|
go
| 3 | 0 | |
examples/currency/history/historicRatesForACurrency/main.go
|
package main
import (
"fmt"
"os"
"go.m3o.com/currency"
)
// Returns the historic rates for a currency on a given date
func main() {
currencyService := currency.NewCurrencyService(os.Getenv("M3O_API_TOKEN"))
rsp, err := currencyService.History(¤cy.HistoryRequest{
Code: "USD",
Date: "2021-05-30",
})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
integ/integ_test.go
|
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"strconv"
"testing"
"time"
"github.com/insomniacslk/dublin-traceroute/go/dublintraceroute/results"
"github.com/stretchr/testify/require"
)
// WARNING: this test is meant to run on CI, don't run it on your production
// machines or it will mess up your iptables rules.
const NfQueueNum int64 = 101
var (
one = 1
)
var (
// TODO detect this at start-up
needSudo = true
defaultDubTrTimeout = 10 * time.Second
)
// isCI returns true if the environment is a CI like Travis-CI or CircleCI,
// false otherwise.
func isCI() bool {
return os.Getenv("CI") == "true"
}
func setup() {
if isCI() {
cl := []string{"iptables", "-A", "OUTPUT", "-p", "udp", "--dport", "33434:33634", "-d", "8.8.8.8", "-j", "NFQUEUE", "--queue-num", strconv.FormatInt(NfQueueNum, 10)}
if needSudo {
cl = append([]string{"sudo"}, cl...)
}
if err := exec.Command(cl[0], cl[1:]...).Run(); err != nil {
log.Panicf("Failed to run iptables: %v", err)
}
}
}
func shutdown() {
// nothing to do here
}
func TestMain(m *testing.M) {
setup()
code := m.Run()
shutdown()
os.Exit(code)
}
type testConfig struct {
// timeout for dublin-traceroute
timeout time.Duration
// arguments to routest
configFile string
// arguments to dublin-traceroute
paths *int
minTTL *int
maxTTL *int
srcPort *int
dstPort *int
delay *int
target string
}
func runWithConfig(cfg testConfig) ([]byte, []byte, error) {
// validate to config
if cfg.timeout <= 0 {
cfg.timeout = defaultDubTrTimeout
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// run routest
riCmd := exec.Command("go", "install", "github.com/insomniacslk/dublin-traceroute/go/dublintraceroute/cmd/routest")
riCmd.Stdout, riCmd.Stderr = os.Stdout, os.Stderr
if err := riCmd.Run(); err != nil {
return nil, nil, fmt.Errorf("Cannot install routest: %v", err)
}
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
cl := []string{path.Join(gopath, "bin/routest"), "-i", "lo", "-c", cfg.configFile, "-q", strconv.FormatInt(NfQueueNum, 10)}
if needSudo {
cl = append([]string{"sudo"}, cl...)
}
rCmd := exec.CommandContext(ctx, cl[0], cl[1:]...)
rCmd.Stdout, rCmd.Stderr = os.Stdout, os.Stderr
defer func() {
_ = rCmd.Process.Kill()
}()
go func() {
if err := rCmd.Run(); err != nil {
log.Printf("Error returned from command %+v: %v", rCmd, err)
}
}()
// wait a second to give routest time to start
// TODO do something better than waiting
time.Sleep(time.Second)
// run dublin-traceroute
errCh := make(chan error, 1)
traceFile := "trace.json"
cl = []string{}
if needSudo {
cl = append([]string{"sudo"}, cl...)
}
cl = append(cl, "../build/dublin-traceroute")
if cfg.paths != nil {
cl = append(cl, "-n", strconv.FormatInt(int64(*cfg.paths), 10))
}
if cfg.minTTL != nil {
cl = append(cl, "-t", strconv.FormatInt(int64(*cfg.minTTL), 10))
}
if cfg.maxTTL != nil {
cl = append(cl, "-T", strconv.FormatInt(int64(*cfg.maxTTL), 10))
}
if cfg.srcPort != nil {
cl = append(cl, "-s", strconv.FormatInt(int64(*cfg.srcPort), 10))
}
if cfg.dstPort != nil {
cl = append(cl, "-d", strconv.FormatInt(int64(*cfg.dstPort), 10))
}
if cfg.delay != nil {
cl = append(cl, "-D", strconv.FormatInt(int64(*cfg.delay), 10))
}
cl = append(cl, "-o", traceFile)
cl = append(cl, cfg.target)
dCmd := exec.CommandContext(ctx, cl[0], cl[1:]...)
var outWriter bytes.Buffer
dCmd.Stdout, dCmd.Stderr = &outWriter, os.Stderr
go func() {
errCh <- dCmd.Run()
}()
select {
case err := <-errCh:
if err != nil {
return nil, nil, fmt.Errorf("failed call to dublin-traceroute: %v", err)
}
break
case <-time.After(cfg.timeout):
return nil, nil, fmt.Errorf("dublin-traceroute timed out after %s", cfg.timeout)
}
trace, err := ioutil.ReadFile(traceFile)
if err != nil {
return nil, nil, fmt.Errorf("Cannot read trace file %s: %v", traceFile, err)
}
return outWriter.Bytes(), trace, nil
}
func requireEqualResults(t *testing.T, got, want *results.Results) {
for wantK, wantV := range want.Flows {
require.Contains(t, got.Flows, wantK)
gotV := got.Flows[wantK]
require.Equal(t, len(wantV), len(gotV))
for idx := 0; idx < len(wantV); idx++ {
wantReply, gotReply := wantV[idx], gotV[idx]
// skip FlowHash, Name, NatID
require.Equal(t, wantReply.IsLast, gotReply.IsLast)
// accept 20 msec of difference
require.InDelta(t, wantReply.RttUsec, gotReply.RttUsec, 20000.)
// match Sent packet, ignoring Timestamp, IP.SrcIP
require.NotNil(t, gotReply.Sent, "Sent is nil")
require.NotNil(t, gotReply.Sent.IP, "Sent.IP not nil")
require.Equal(t, wantReply.Sent.IP.DstIP, gotReply.Sent.IP.DstIP, "Sent.IP.DstIP")
require.Equal(t, wantReply.Sent.IP.ID, gotReply.Sent.IP.ID, "Sent.IP.ID")
require.Equal(t, wantReply.Sent.IP.TTL, gotReply.Sent.IP.TTL, "Sent.IP.TTL")
require.Equal(t, wantReply.Sent.UDP, gotReply.Sent.UDP, "Sent.UDP")
// ICMP should be nil
require.Nil(t, gotReply.Sent.ICMP, "Sent.ICMP not nil")
// match Received packet, ignoring Timestamp, IP.DstIP, IP.ID,
// IP.TTL
require.NotNil(t, gotReply.Received, "Received is nil")
require.NotNil(t, gotReply.Received.IP, "Received.IP is nil")
require.Equal(t, wantReply.Received.IP.SrcIP, gotReply.Received.IP.SrcIP, "Received.IP.SrcIP")
// UDP should be nil
require.Equal(t, wantReply.Received.UDP, gotReply.Received.UDP, "Received.UDP")
require.Nil(t, gotReply.Received.UDP, "Received.UDP is not nil")
require.Equal(t, wantReply.Received.ICMP, gotReply.Received.ICMP, "Received.ICMP")
require.Equal(t, wantReply.ZeroTTLForwardingBug, gotReply.ZeroTTLForwardingBug, "ZeroTTLForwardingBug")
}
}
}
func TestGoogleDNSOnePath(t *testing.T) {
wantJSON, err := ioutil.ReadFile("test_data/want_8.8.8.8_one_path.json")
require.NoError(t, err)
c := testConfig{
configFile: "test_data/config_8.8.8.8_one_path.json",
paths: &one,
target: "8.8.8.8",
}
_, gotJSON, err := runWithConfig(c)
require.NoError(t, err)
var want, got results.Results
err = json.Unmarshal(wantJSON, &want)
require.NoError(t, err)
err = json.Unmarshal(gotJSON, &got)
require.NoError(t, err)
require.Equal(t, len(want.Flows), len(got.Flows))
requireEqualResults(t, &got, &want)
}
|
[
"\"CI\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"CI"
] |
[]
|
["GOPATH", "CI"]
|
go
| 2 | 0 | |
vendor/github.com/rkt/rkt/tests/rkt_app_sandbox_test.go
|
// Copyright 2016 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Disabled on kvm due to https://github.com/rkt/rkt/issues/3382
// +build !fly,!kvm
package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/coreos/gexpect"
"github.com/rkt/rkt/tests/testutils"
)
// TestAppSandboxOneApp is a basic test for `rkt app` sandbox.
// It starts the sandbox, adds one app, starts it, and removes it.
func TestAppSandboxAddStartRemove(t *testing.T) {
testSandbox(t, func(ctx *testutils.RktRunCtx, child *gexpect.ExpectSubprocess, podUUID string) {
actionTimeout := 30 * time.Second
imageName := "coreos.com/rkt-inspect/hello"
appName := "hello-app"
msg := "HelloFromAppInSandbox"
aciHello := patchTestACI("rkt-inspect-hello.aci", "--name="+imageName, "--exec=/inspect --print-msg="+msg)
defer os.Remove(aciHello)
combinedOutput(t, ctx.ExecCmd("fetch", "--insecure-options=image", aciHello))
combinedOutput(t, ctx.ExecCmd("app", "add", "--debug", podUUID, imageName, "--name="+appName))
combinedOutput(t, ctx.ExecCmd("app", "start", "--debug", podUUID, "--app="+appName))
if err := expectTimeoutWithOutput(child, msg, actionTimeout); err != nil {
t.Fatalf("Expected %q but not found: %v", msg, err)
}
combinedOutput(t, ctx.ExecCmd("app", "rm", "--debug", podUUID, "--app="+appName))
out := combinedOutput(t, ctx.ExecCmd("app", "list", "--no-legend", podUUID))
if out != "\n" {
t.Errorf("unexpected output %q", out)
return
}
})
}
// TestAppSandboxMultipleApps tests multiple apps in a sandbox:
// one that exits successfully, one that exits with an error, and one that keeps running.
func TestAppSandboxMultipleApps(t *testing.T) {
testSandbox(t, func(ctx *testutils.RktRunCtx, child *gexpect.ExpectSubprocess, podUUID string) {
actionTimeout := 30 * time.Second
type app struct {
name, image, exec, aci string
}
apps := []app{
{
name: "winner",
image: "coreos.com/rkt-inspect/success",
exec: "/inspect -print-msg=SUCCESS",
aci: "rkt-inspect-success.aci",
},
{
name: "loser",
image: "coreos.com/rkt-inspect/fail",
exec: "/inspect -print-msg=FAILED -exit-code=12",
aci: "rkt-inspect-loser.aci",
},
{
name: "sleeper",
image: "coreos.com/rkt-inspect/sleep",
exec: "/inspect -print-msg=SLEEP -sleep=120",
aci: "rkt-inspect-sleep.aci",
},
}
// create, fetch, add, and start all apps in the sandbox
for _, app := range apps {
aci := patchTestACI(app.aci, "--name="+app.image, "--exec="+app.exec)
defer os.Remove(aci)
combinedOutput(t, ctx.ExecCmd("fetch", "--insecure-options=image", aci))
combinedOutput(t, ctx.ExecCmd("app", "add", "--debug", podUUID, app.image, "--name="+app.name))
combinedOutput(t, ctx.ExecCmd("app", "start", "--debug", podUUID, "--app="+app.name))
}
// check for app output messages
for _, msg := range []string{
"SUCCESS",
"FAILED",
"SLEEP",
} {
if err := expectTimeoutWithOutput(child, msg, actionTimeout); err != nil {
t.Fatalf("Expected %q but not found: %v", msg, err)
}
}
// total retry timeout: 10s
r := retry{
n: 20,
t: 500 * time.Millisecond,
}
// assert `rkt app list` for the apps
if err := r.Retry(func() error {
got := combinedOutput(t, ctx.ExecCmd("app", "list", "--no-legend", podUUID))
if strings.Contains(got, "winner\texited") &&
strings.Contains(got, "loser\texited") &&
strings.Contains(got, "sleeper\trunning") {
return nil
}
return fmt.Errorf("unexpected result, got %q", got)
}); err != nil {
t.Error(err)
return
}
// assert `rkt app status` for the apps
for _, app := range []struct {
name string
checkExitCode bool
exitCode int
state string
}{
{
name: "winner",
checkExitCode: true,
exitCode: 0,
state: "exited",
},
{
name: "loser",
checkExitCode: true,
exitCode: 12,
state: "exited",
},
{
name: "sleeper",
state: "running",
},
} {
if err := r.Retry(func() error {
got := combinedOutput(t, ctx.ExecCmd("app", "status", podUUID, "--app="+app.name))
ok := true
if app.checkExitCode {
ok = ok && strings.Contains(got, "exit_code="+strconv.Itoa(app.exitCode))
}
ok = ok && strings.Contains(got, "state="+app.state)
if !ok {
return fmt.Errorf("unexpected result, got %q", got)
}
return nil
}); err != nil {
t.Error(err)
return
}
}
// remove all apps
for _, app := range apps {
combinedOutput(t, ctx.ExecCmd("app", "rm", "--debug", podUUID, "--app="+app.name))
}
// assert empty `rkt app list`, no need for retrying,
// as after removal no leftovers are expected to be present
got := combinedOutput(t, ctx.ExecCmd("app", "list", "--no-legend", podUUID))
if got != "\n" {
t.Errorf("unexpected result, got %q", got)
return
}
})
}
// TestAppSandboxRestart tests multiple apps in a sandbox and restarts one of them.
func TestAppSandboxRestart(t *testing.T) {
testSandbox(t, func(ctx *testutils.RktRunCtx, child *gexpect.ExpectSubprocess, podUUID string) {
type app struct {
name, image, exec, aci string
}
apps := []app{
{
name: "app1",
image: "coreos.com/rkt-inspect/app1",
exec: "/inspect -sleep=120",
aci: "rkt-inspect-app1.aci",
},
{
name: "app2",
image: "coreos.com/rkt-inspect/app1",
exec: "/inspect -sleep=120",
aci: "rkt-inspect-app1.aci",
},
}
// create, fetch, add, and start all apps in the sandbox
for _, app := range apps {
aci := patchTestACI(app.aci, "--name="+app.image, "--exec="+app.exec)
defer os.Remove(aci)
combinedOutput(t, ctx.ExecCmd("fetch", "--insecure-options=image", aci))
combinedOutput(t, ctx.ExecCmd("app", "add", "--debug", podUUID, app.image, "--name="+app.name))
combinedOutput(t, ctx.ExecCmd("app", "start", "--debug", podUUID, "--app="+app.name))
}
// total retry timeout: 10s
r := retry{
n: 20,
t: 500 * time.Millisecond,
}
// assert `rkt app list` for the apps
if err := r.Retry(func() error {
got := combinedOutput(t, ctx.ExecCmd("app", "list", "--no-legend", podUUID))
if strings.Contains(got, "app1\trunning") &&
strings.Contains(got, "app2\trunning") {
return nil
}
return fmt.Errorf("unexpected result, got %q", got)
}); err != nil {
t.Error(err)
return
}
assertStatus := func(name, status string) error {
return r.Retry(func() error {
got := combinedOutput(t, ctx.ExecCmd("app", "status", podUUID, "--app="+name))
if !strings.Contains(got, status) {
return fmt.Errorf("unexpected result, got %q", got)
}
return nil
})
}
// assert `rkt app status` for the apps
for _, app := range apps {
if err := assertStatus(app.name, "state=running"); err != nil {
t.Error(err)
return
}
}
// stop app1
combinedOutput(t, ctx.ExecCmd("app", "stop", podUUID, "--app=app1"))
// assert `rkt app status` for the apps
for _, app := range []struct {
name string
status string
}{
{
name: "app1",
status: "state=exited",
},
{
name: "app2",
status: "state=running",
},
} {
if err := assertStatus(app.name, app.status); err != nil {
t.Error(err)
return
}
}
// assert `rkt app list` for the apps
if err := r.Retry(func() error {
got := combinedOutput(t, ctx.ExecCmd("app", "list", "--no-legend", podUUID))
if strings.Contains(got, "app1\texited") &&
strings.Contains(got, "app2\trunning") {
return nil
}
return fmt.Errorf("unexpected result, got %q", got)
}); err != nil {
t.Error(err)
return
}
// start app1
combinedOutput(t, ctx.ExecCmd("app", "start", podUUID, "--app=app1"))
// assert `rkt app status` for the apps
for _, app := range []struct {
name string
status string
}{
{
name: "app1",
status: "state=running",
},
{
name: "app2",
status: "state=running",
},
} {
if err := assertStatus(app.name, app.status); err != nil {
t.Error(err)
return
}
}
// assert `rkt app list` for the apps
if err := r.Retry(func() error {
got := combinedOutput(t, ctx.ExecCmd("app", "list", "--no-legend", podUUID))
if strings.Contains(got, "app1\trunning") &&
strings.Contains(got, "app2\trunning") {
return nil
}
return fmt.Errorf("unexpected result, got %q", got)
}); err != nil {
t.Error(err)
return
}
})
}
func TestAppSandboxMount(t *testing.T) {
// this test hast to be skipped on semaphore for now,
// because it uses an outdated kernel hindering mount propagation,
// letting this test fail.
if os.Getenv("SEMAPHORE") == "true" {
t.Skip("skipped on semaphore")
}
mntSrcDir := mustTempDir("rkt-mount-test-")
defer os.RemoveAll(mntSrcDir)
mntSrcFile := filepath.Join(mntSrcDir, "test")
if err := ioutil.WriteFile(mntSrcFile, []byte("content"), 0666); err != nil {
t.Fatalf("Cannot write file: %v", err)
}
testSandbox(t, func(ctx *testutils.RktRunCtx, child *gexpect.ExpectSubprocess, podUUID string) {
aci := patchTestACI(
"rkt-inspect-mounter.aci",
"--name=coreos.com/rkt-inspect/mounter",
"--exec=/inspect -read-file",
)
defer os.Remove(aci)
combinedOutput(t, ctx.ExecCmd("fetch", "--insecure-options=image", aci))
for _, tt := range []struct {
mntTarget string
expectedFile string
}{
{
mntTarget: "/dir2",
expectedFile: "/dir2/test",
},
{
mntTarget: "/dir1/link_rel_dir2",
expectedFile: "/dir2/test",
},
{
mntTarget: "/dir1/link_abs_dir2",
expectedFile: "/dir2/test",
},
{
mntTarget: "/dir1/link_abs_root/notexists",
expectedFile: "/notexists/test",
},
{
mntTarget: "/../../../../../../../../notexists",
expectedFile: "/notexists/test",
},
{
mntTarget: "../../../../../../../../notexists",
expectedFile: "/notexists/test",
},
} {
combinedOutput(t, ctx.ExecCmd(
"app", "add", "--debug", podUUID,
"coreos.com/rkt-inspect/mounter",
"--name=mounter",
"--environment=FILE="+tt.expectedFile,
"--mnt-volume=name=test,kind=host,source="+mntSrcDir+",target="+tt.mntTarget,
))
combinedOutput(t, ctx.ExecCmd("app", "start", "--debug", podUUID, "--app=mounter"))
if err := expectTimeoutWithOutput(child, "content", 10*time.Second); err != nil {
t.Fatalf("Expected \"content\" but not found: %v", err)
}
combinedOutput(t, ctx.ExecCmd("app", "rm", "--debug", podUUID, "--app=mounter"))
}
})
}
func testSandbox(t *testing.T, testFunc func(*testutils.RktRunCtx, *gexpect.ExpectSubprocess, string)) {
if err := os.Setenv("RKT_EXPERIMENT_APP", "true"); err != nil {
panic(err)
}
defer os.Unsetenv("RKT_EXPERIMENT_APP")
if err := os.Setenv("RKT_EXPERIMENT_ATTACH", "true"); err != nil {
panic(err)
}
defer os.Unsetenv("RKT_EXPERIMENT_ATTACH")
tmpDir := mustTempDir("rkt-test-cri-")
uuidFile := filepath.Join(tmpDir, "uuid")
defer os.RemoveAll(tmpDir)
ctx := testutils.NewRktRunCtx()
defer ctx.Cleanup()
rkt := ctx.Cmd() + " app sandbox --uuid-file-save=" + uuidFile
child := spawnOrFail(t, rkt)
// wait for the sandbox to start
podUUID, err := waitPodReady(ctx, t, uuidFile, 30*time.Second)
if err != nil {
t.Fatal(err)
}
testFunc(ctx, child, podUUID)
// assert that the pod is still running
got := combinedOutput(t, ctx.ExecCmd("status", podUUID))
if !strings.Contains(got, "state=running") {
t.Errorf("unexpected result, got %q", got)
return
}
combinedOutput(t, ctx.ExecCmd("stop", podUUID))
waitOrFail(t, child, 0)
}
|
[
"\"SEMAPHORE\""
] |
[] |
[
"SEMAPHORE"
] |
[]
|
["SEMAPHORE"]
|
go
| 1 | 0 | |
internal/todo/handler/handler.go
|
package handler
import (
"context"
"encoding/json"
"errors"
"net/http"
"os"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/go-chi/cors"
"github.com/go-chi/jwtauth/v5"
mm "github.com/manabie-com/togo/internal/pkg/middleware"
"github.com/manabie-com/togo/internal/todo/domain"
s "github.com/manabie-com/togo/internal/todo/service"
log "github.com/sirupsen/logrus"
)
var tokenAuth *jwtauth.JWTAuth
type TodoRepositoryList struct {
UserRepo domain.UserRepository
TaskRepo domain.TaskRepository
}
func init() {
tokenAuth = jwtauth.New("HS256", []byte(os.Getenv("JWT_KEY")), nil)
}
func NewTodoHandler(todoRepo TodoRepositoryList) http.Handler {
r := chi.NewRouter()
// Http log
logger := log.New()
logger.Formatter = &log.JSONFormatter{
DisableTimestamp: true,
}
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(mm.NewStructuredLogger(logger))
r.Use(middleware.Recoverer)
r.Use(middleware.Compress(5))
r.Use(middleware.Timeout(60 * time.Second))
r.Use(middleware.Heartbeat("/ping"))
r.Use(cors.Handler(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"},
ExposedHeaders: []string{"Link"},
AllowCredentials: false,
MaxAge: 300,
}))
// Protected routes
r.Group(func(r chi.Router) {
// Seek, verify and validate JWT tokens
r.Use(jwtauth.Verifier(tokenAuth))
r.Use(jwtauth.Authenticator)
taskService := s.NewTaskService(todoRepo.TaskRepo)
taskHandler := NewTaskHandler(taskService)
r.Get("/tasks", taskHandler.ListTask)
r.Post("/tasks", taskHandler.CreateTask)
})
r.Group(func(r chi.Router) {
// Public routes
authService := s.NewAuthService(todoRepo.UserRepo)
authHandler := NewAuthHandler(authService)
r.Post("/login", authHandler.Login)
})
return r
}
type AppHandler struct{}
func (h *AppHandler) getUserIDFromCtx(ctx context.Context) (int, error) {
_, claims, err := jwtauth.FromContext(ctx)
if err != nil {
return 0, err
}
userID, ok := claims["userID"].(float64)
if !ok {
return 0, errors.New("unable to parse user id")
}
return int(userID), nil
}
func (h *AppHandler) responseError(w http.ResponseWriter, statusCode int, msg string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(map[string]string{
"error": msg,
})
}
|
[
"\"JWT_KEY\""
] |
[] |
[
"JWT_KEY"
] |
[]
|
["JWT_KEY"]
|
go
| 1 | 0 | |
cmd/go/appengine_gomod/main_test.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
gcp "github.com/GoogleCloudPlatform/buildpacks/pkg/gcpbuildpack"
"github.com/buildpacks/libcnb"
)
func TestDetect(t *testing.T) {
testCases := []struct {
name string
files map[string]string
env []string
want int
}{
{
name: "go.mod and buildable undefined",
files: map[string]string{
"go.mod": "",
},
env: []string{},
want: 0,
},
{
name: "no go.mod",
files: map[string]string{},
env: []string{},
want: 100,
},
{
name: "buildable defined",
files: map[string]string{
"go.mod": "",
},
env: []string{
"GOOGLE_BUILDABLE=./main",
},
want: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
gcp.TestDetect(t, detectFn, tc.name, tc.files, tc.env, tc.want)
})
}
}
func TestMainPath(t *testing.T) {
testCases := []struct {
name string
stagerFileContents string
gaeMainEnv string
want string
}{
{
name: "no stagerfile and an empty env var",
gaeMainEnv: "",
want: "",
},
{
name: "stagerfile with main directory and an empty env var",
stagerFileContents: "maindir",
gaeMainEnv: "",
want: "maindir",
},
{
name: "no stagerfile and a non-empty env var",
gaeMainEnv: "anothermaindir",
want: "anothermaindir",
},
{
name: "stagerfile with main directory and a non-empty env var",
stagerFileContents: "maindir",
gaeMainEnv: "anothermaindir",
want: "anothermaindir",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
dir, err := ioutil.TempDir("", "TestMainPath-")
if err != nil {
t.Fatalf("Creating temporary directory: %v", err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Fatalf("Unable to remove test directory %q", dir)
}
}()
ctx := gcp.NewContextForTests(libcnb.BuildpackInfo{}, dir)
if tc.stagerFileContents != "" {
if err = ioutil.WriteFile(filepath.Join(dir, "_main-package-path"), []byte(tc.stagerFileContents), 0755); err != nil {
t.Fatalf("Creating file in temporary directory: %v", err)
}
}
oldEnv := os.Getenv("GAE_YAML_MAIN")
if err = os.Setenv("GAE_YAML_MAIN", tc.gaeMainEnv); err != nil {
t.Fatalf("Setting environment variable GAE_YAML_MAIN to %q", tc.gaeMainEnv)
}
defer func() {
if err := os.Setenv("GAE_YAML_MAIN", oldEnv); err != nil {
t.Fatalf("Unable to reset the env var GAE_YAML_MAIN to %q", oldEnv)
}
}()
if got := mainPath(ctx); got != tc.want {
t.Errorf("mainPath() = %q, want %q", got, tc.want)
}
})
}
}
func TestCleanMainPathNoError(t *testing.T) {
testCases := []struct {
str string
want string
}{
{
str: ".",
want: ".",
},
{
str: " . ",
want: ".",
},
{
str: "./dir/..",
want: ".",
},
{
str: "./dir1/dir2/..",
want: "dir1",
},
{
str: "./dir1///dir2",
want: "dir1/dir2",
},
{
str: "dir1///dir2",
want: "dir1/dir2",
},
{
str: "dir1",
want: "dir1",
},
{
str: "dir1/../dir2",
want: "dir2",
},
}
for _, tc := range testCases {
t.Run(tc.str, func(t *testing.T) {
if got, err := cleanMainPath(tc.str); err != nil {
t.Errorf("cleanMainPath(%q) returns error: %v", tc.str, err)
} else if got != tc.want {
t.Errorf("cleanMainPath(%q) = %q, want %q", tc.str, got, tc.want)
}
})
}
}
func TestCleanMainPathWantError(t *testing.T) {
testCases := []string{
"/.",
"/somedir",
"./..",
"../dir1",
"../dir1/dir2",
"dir1/../../dir2",
}
for _, tc := range testCases {
t.Run(tc, func(t *testing.T) {
if got, err := cleanMainPath(tc); err == nil {
t.Errorf("cleanMainPath(%q) = %q, expected error", tc, got)
}
})
}
}
|
[
"\"GAE_YAML_MAIN\""
] |
[] |
[
"GAE_YAML_MAIN"
] |
[]
|
["GAE_YAML_MAIN"]
|
go
| 1 | 0 | |
pkg/kafka/pubsub_test.go
|
package kafka_test
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ThreeDotsLabs/watermill"
"github.com/ThreeDotsLabs/watermill-kafka/v2/pkg/kafka"
"github.com/ThreeDotsLabs/watermill/message"
"github.com/ThreeDotsLabs/watermill/message/subscriber"
"github.com/ThreeDotsLabs/watermill/pubsub/tests"
)
func kafkaBrokers() []string {
brokers := os.Getenv("WATERMILL_TEST_KAFKA_BROKERS")
if brokers != "" {
return strings.Split(brokers, ",")
}
return []string{"localhost:9091", "localhost:9092", "localhost:9093", "localhost:9094", "localhost:9095"}
}
func newPubSub(t *testing.T, marshaler kafka.MarshalerUnmarshaler, consumerGroup string) (*kafka.Publisher, *kafka.Subscriber) {
logger := watermill.NewStdLogger(true, true)
var err error
var publisher *kafka.Publisher
retriesLeft := 5
for {
publisher, err = kafka.NewPublisher(kafka.PublisherConfig{
Brokers: kafkaBrokers(),
Marshaler: marshaler,
}, logger)
if err == nil || retriesLeft == 0 {
break
}
retriesLeft--
fmt.Printf("cannot create kafka Publisher: %s, retrying (%d retries left)", err, retriesLeft)
time.Sleep(time.Second * 2)
}
require.NoError(t, err)
saramaConfig := kafka.DefaultSaramaSubscriberConfig()
saramaConfig.Consumer.Offsets.Initial = sarama.OffsetOldest
saramaConfig.Admin.Timeout = time.Second * 30
saramaConfig.Producer.RequiredAcks = sarama.WaitForAll
saramaConfig.ChannelBufferSize = 10240
saramaConfig.Consumer.Group.Heartbeat.Interval = time.Millisecond * 500
saramaConfig.Consumer.Group.Rebalance.Timeout = time.Second * 3
var subscriber *kafka.Subscriber
retriesLeft = 5
for {
subscriber, err = kafka.NewSubscriber(
kafka.SubscriberConfig{
Brokers: kafkaBrokers(),
Unmarshaler: marshaler,
OverwriteSaramaConfig: saramaConfig,
ConsumerGroup: consumerGroup,
InitializeTopicDetails: &sarama.TopicDetail{
NumPartitions: 8,
ReplicationFactor: 1,
},
},
logger,
)
if err == nil || retriesLeft == 0 {
break
}
retriesLeft--
fmt.Printf("cannot create kafka Subscriber: %s, retrying (%d retries left)", err, retriesLeft)
time.Sleep(time.Second * 2)
}
require.NoError(t, err)
return publisher, subscriber
}
func generatePartitionKey(topic string, msg *message.Message) (string, error) {
return msg.Metadata.Get("partition_key"), nil
}
func createPubSubWithConsumerGrup(t *testing.T, consumerGroup string) (message.Publisher, message.Subscriber) {
return newPubSub(t, kafka.DefaultMarshaler{}, consumerGroup)
}
func createPubSub(t *testing.T) (message.Publisher, message.Subscriber) {
return createPubSubWithConsumerGrup(t, "test")
}
func createPartitionedPubSub(t *testing.T) (message.Publisher, message.Subscriber) {
return newPubSub(t, kafka.NewWithPartitioningMarshaler(generatePartitionKey), "test")
}
func createNoGroupPubSub(t *testing.T) (message.Publisher, message.Subscriber) {
return newPubSub(t, kafka.DefaultMarshaler{}, "")
}
func TestPublishSubscribe(t *testing.T) {
features := tests.Features{
ConsumerGroups: true,
ExactlyOnceDelivery: false,
GuaranteedOrder: false,
Persistent: true,
}
tests.TestPubSub(
t,
features,
createPubSub,
createPubSubWithConsumerGrup,
)
}
func TestPublishSubscribe_ordered(t *testing.T) {
if testing.Short() {
t.Skip("skipping long tests")
}
tests.TestPubSub(
t,
tests.Features{
ConsumerGroups: true,
ExactlyOnceDelivery: false,
GuaranteedOrder: true,
Persistent: true,
},
createPartitionedPubSub,
createPubSubWithConsumerGrup,
)
}
func TestNoGroupSubscriber(t *testing.T) {
if testing.Short() {
t.Skip("skipping long tests")
}
tests.TestPubSub(
t,
tests.Features{
ConsumerGroups: false,
ExactlyOnceDelivery: false,
GuaranteedOrder: false,
Persistent: true,
NewSubscriberReceivesOldMessages: true,
},
createNoGroupPubSub,
nil,
)
}
func TestCtxValues(t *testing.T) {
pub, sub := newPubSub(t, kafka.DefaultMarshaler{}, "")
topicName := "topic_" + watermill.NewUUID()
var messagesToPublish []*message.Message
for i := 0; i < 20; i++ {
id := watermill.NewUUID()
messagesToPublish = append(messagesToPublish, message.NewMessage(id, nil))
}
err := pub.Publish(topicName, messagesToPublish...)
require.NoError(t, err, "cannot publish message")
messages, err := sub.Subscribe(context.Background(), topicName)
require.NoError(t, err)
receivedMessages, all := subscriber.BulkReadWithDeduplication(messages, len(messagesToPublish), time.Second*10)
require.True(t, all)
expectedPartitionsOffsets := map[int32]int64{}
for _, msg := range receivedMessages {
partition, ok := kafka.MessagePartitionFromCtx(msg.Context())
assert.True(t, ok)
messagePartitionOffset, ok := kafka.MessagePartitionOffsetFromCtx(msg.Context())
assert.True(t, ok)
kafkaMsgTimestamp, ok := kafka.MessageTimestampFromCtx(msg.Context())
assert.True(t, ok)
assert.NotZero(t, kafkaMsgTimestamp)
if expectedPartitionsOffsets[partition] <= messagePartitionOffset {
// kafka partition offset is offset of the last message + 1
expectedPartitionsOffsets[partition] = messagePartitionOffset + 1
}
}
assert.NotEmpty(t, expectedPartitionsOffsets)
offsets, err := sub.PartitionOffset(topicName)
require.NoError(t, err)
assert.NotEmpty(t, offsets)
assert.EqualValues(t, expectedPartitionsOffsets, offsets)
require.NoError(t, pub.Close())
}
|
[
"\"WATERMILL_TEST_KAFKA_BROKERS\""
] |
[] |
[
"WATERMILL_TEST_KAFKA_BROKERS"
] |
[]
|
["WATERMILL_TEST_KAFKA_BROKERS"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
# noinspection PyUnresolvedReferences
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
# This allows easy placement of apps within the interior
# dit_helpdesk directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "dit_helpdesk"))
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/demo/multistroke/main.py
|
#!/usr/bin/env python
'''
Multistroke Recognition Database Demonstration
==============================================
This application records gestures and attempts to match them. You should
see a black drawing surface with some buttons across the bottom. As you
make a gesture on the drawing surface, the gesture will be added to
the history and a match will be attempted. If you go to the history tab,
name the gesture, and add it to the database, then simliar gestures in the
future will be recognized. You can load and save databases of gestures
in .kg files.
This demonstration code is many files, with this being the primary file.
The information pop-up ('No match') comes from the file helpers.py.
The history pane is managed in the file historymanager.py and described
in the file historymanager.kv. The database pane and storage is managed in
the file gestureDatabase.py and the described in the file gestureDatabase.kv.
The general logic of the sliders and buttons are in the file
settings.py and described in settings.kv. but the actual settings pane is
described in the file multistroke.kv and managed from this file.
'''
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.gesturesurface import GestureSurface
from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition
from kivy.uix.label import Label
from kivy.multistroke import Recognizer
# Local libraries
from historymanager import GestureHistoryManager
from gesturedatabase import GestureDatabase
from settings import MultistrokeSettingsContainer
class MainMenu(GridLayout):
pass
class MultistrokeAppSettings(MultistrokeSettingsContainer):
pass
class MultistrokeApp(App):
def goto_database_screen(self, *l):
self.database.import_gdb()
self.manager.current = 'database'
def handle_gesture_cleanup(self, surface, g, *l):
if hasattr(g, '_result_label'):
surface.remove_widget(g._result_label)
def handle_gesture_discard(self, surface, g, *l):
# Don't bother creating Label if it's not going to be drawn
if surface.draw_timeout == 0:
return
text = '[b]Discarded:[/b] Not enough input'
g._result_label = Label(text=text, markup=True, size_hint=(None, None),
center=(g.bbox['minx'], g.bbox['miny']))
self.surface.add_widget(g._result_label)
def handle_gesture_complete(self, surface, g, *l):
result = self.recognizer.recognize(g.get_vectors())
result._gesture_obj = g
result.bind(on_complete=self.handle_recognize_complete)
def handle_recognize_complete(self, result, *l):
self.history.add_recognizer_result(result)
# Don't bother creating Label if it's not going to be drawn
if self.surface.draw_timeout == 0:
return
best = result.best
if best['name'] is None:
text = '[b]No match[/b]'
else:
text = 'Name: [b]%s[/b]\nScore: [b]%f[/b]\nDistance: [b]%f[/b]' % (
best['name'], best['score'], best['dist'])
g = result._gesture_obj
g._result_label = Label(text=text, markup=True, size_hint=(None, None),
center=(g.bbox['minx'], g.bbox['miny']))
self.surface.add_widget(g._result_label)
def build(self):
# Setting NoTransition breaks the "history" screen! Possibly related
# to some inexplicable rendering bugs on my particular system
self.manager = ScreenManager(transition=SlideTransition(
duration=.15))
self.recognizer = Recognizer()
# Setup the GestureSurface and bindings to our Recognizer
surface = GestureSurface(line_width=2, draw_bbox=True,
use_random_color=True)
surface_screen = Screen(name='surface')
surface_screen.add_widget(surface)
self.manager.add_widget(surface_screen)
surface.bind(on_gesture_discard=self.handle_gesture_discard)
surface.bind(on_gesture_complete=self.handle_gesture_complete)
surface.bind(on_gesture_cleanup=self.handle_gesture_cleanup)
self.surface = surface
# History is the list of gestures drawn on the surface
history = GestureHistoryManager()
history_screen = Screen(name='history')
history_screen.add_widget(history)
self.history = history
self.manager.add_widget(history_screen)
# Database is the list of gesture templates in Recognizer
database = GestureDatabase(recognizer=self.recognizer)
database_screen = Screen(name='database')
database_screen.add_widget(database)
self.database = database
self.manager.add_widget(database_screen)
# Settings screen
app_settings = MultistrokeAppSettings()
ids = app_settings.ids
ids.max_strokes.bind(value=surface.setter('max_strokes'))
ids.temporal_win.bind(value=surface.setter('temporal_window'))
ids.timeout.bind(value=surface.setter('draw_timeout'))
ids.line_width.bind(value=surface.setter('line_width'))
ids.draw_bbox.bind(value=surface.setter('draw_bbox'))
ids.use_random_color.bind(value=surface.setter('use_random_color'))
settings_screen = Screen(name='settings')
settings_screen.add_widget(app_settings)
self.manager.add_widget(settings_screen)
# Wrap in a gridlayout so the main menu is always visible
layout = GridLayout(cols=1)
layout.add_widget(self.manager)
layout.add_widget(MainMenu())
return layout
if __name__ in ('__main__', '__android__'):
MultistrokeApp().run()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
main.go
|
package main
import (
"encoding/json"
"flag"
"fmt"
"net/url"
"os"
"github.com/justone/simpleslack"
)
func main() {
token := flag.String("t", os.Getenv("SLACK_TOKEN"), "slack token")
undo := flag.Bool("u", false, "undo the away")
minutes := flag.String("d", "120", "how long to DND")
emoji := flag.String("e", ":computer:", "emoji to set for status")
message := flag.String("m", "Busy, head's down.", "message to set for status")
flag.Parse()
sc := simpleslack.Client{*token}
if *undo {
errCheck(setStatus(sc, "", ""))
errCheck(setPresence(sc, "auto"))
errCheck(clearDND(sc))
} else {
errCheck(setStatus(sc, *emoji, *message))
errCheck(setPresence(sc, "away"))
errCheck(setDND(sc, *minutes))
}
}
func errCheck(err error) {
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func setStatus(sc simpleslack.Client, emoji, message string) error {
encodedStatus, _ := json.Marshal(map[string]string{
"status_emoji": emoji,
"status_text": message,
})
_, err := sc.Post("users.profile.set", url.Values{"profile": {string(encodedStatus)}})
return err
}
func setPresence(sc simpleslack.Client, presence string) error {
_, err := sc.Post("users.setPresence", url.Values{"presence": {presence}})
return err
}
func setDND(sc simpleslack.Client, minutes string) error {
_, err := sc.Post("dnd.setSnooze", url.Values{"num_minutes": {minutes}})
return err
}
func clearDND(sc simpleslack.Client) error {
_, err := sc.Post("dnd.endSnooze", url.Values{})
return err
}
|
[
"\"SLACK_TOKEN\""
] |
[] |
[
"SLACK_TOKEN"
] |
[]
|
["SLACK_TOKEN"]
|
go
| 1 | 0 | |
setup.py
|
# Copyright (C) 2020 Łukasz Langa
from setuptools import setup
import sys
import os
assert sys.version_info >= (3, 6, 0), "black requires Python 3.6+"
from pathlib import Path # noqa E402
CURRENT_DIR = Path(__file__).parent
sys.path.insert(0, str(CURRENT_DIR)) # for setuptools.build_meta
def get_long_description() -> str:
return (
(CURRENT_DIR / "README.md").read_text(encoding="utf8")
+ "\n\n"
+ (CURRENT_DIR / "CHANGES.md").read_text(encoding="utf8")
)
USE_MYPYC = False
# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
if len(sys.argv) > 1 and sys.argv[1] == "--use-mypyc":
sys.argv.pop(1)
USE_MYPYC = True
if os.getenv("BLACK_USE_MYPYC", None) == "1":
USE_MYPYC = True
if USE_MYPYC:
mypyc_targets = [
"src/black/__init__.py",
"src/blib2to3/pytree.py",
"src/blib2to3/pygram.py",
"src/blib2to3/pgen2/parse.py",
"src/blib2to3/pgen2/grammar.py",
"src/blib2to3/pgen2/token.py",
"src/blib2to3/pgen2/driver.py",
"src/blib2to3/pgen2/pgen.py",
]
from mypyc.build import mypycify
opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
ext_modules = mypycify(mypyc_targets, opt_level=opt_level)
else:
ext_modules = []
setup(
name="tan",
use_scm_version={
"write_to": "src/_black_version.py",
"write_to_template": 'version = "{version}"\n',
},
description="The compromising code formatter.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
keywords="automation formatter yapf autopep8 pyfmt gofmt rustfmt",
author="Łukasz Langa",
author_email="[email protected]",
url="https://github.com/jleclanche/tan",
project_urls={"Changelog": "https://github.com/psf/black/blob/master/CHANGES.md"},
license="MIT",
py_modules=["_black_version"],
ext_modules=ext_modules,
packages=["blackd", "black", "blib2to3", "blib2to3.pgen2", "black_primer"],
package_dir={"": "src"},
package_data={"blib2to3": ["*.txt"], "black": ["py.typed"]},
python_requires=">=3.6",
zip_safe=False,
install_requires=[
"click>=7.1.2",
"appdirs",
"toml>=0.10.1",
"typed-ast>=1.4.0",
"regex>=2020.1.8",
"pathspec>=0.6, <1",
"dataclasses>=0.6; python_version < '3.7'",
"typing_extensions>=3.7.4",
"mypy_extensions>=0.4.3",
],
extras_require={
"d": ["aiohttp>=3.3.2", "aiohttp-cors"],
"colorama": ["colorama>=0.4.3"],
},
test_suite="tests.test_black",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
],
entry_points={
"console_scripts": [
"tan=black:patched_main",
"tand=blackd:patched_main [d]",
"tan-primer=black_primer.cli:main",
]
},
)
|
[] |
[] |
[
"MYPYC_OPT_LEVEL",
"BLACK_USE_MYPYC"
] |
[]
|
["MYPYC_OPT_LEVEL", "BLACK_USE_MYPYC"]
|
python
| 2 | 0 | |
pkg/git/repository_test.go
|
package git
import (
"io/ioutil"
"net/url"
"os"
"path"
"sort"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/rhd-gitops-example/services/test"
)
const testRepository = "https://github.com/bigkevmcd/env-staging.git"
func TestRepoName(t *testing.T) {
nameTests := []struct {
url string
wantName string
wantErr string
}{
{testRepository, "env-staging", ""},
{"https://github.com/bigkevmcd", "", "could not identify repo: https://github.com/bigkevmcd"},
}
for _, tt := range nameTests {
n, err := repoName(tt.url)
if tt.wantName != n {
t.Errorf("repoName(%s) got name %s, want %s", tt.url, n, tt.wantName)
continue
}
if tt.wantErr != "" && err != nil && err.Error() != tt.wantErr {
t.Errorf("repoName(%s) got error %s, want %s", tt.url, err, tt.wantErr)
}
}
}
// If the directory provided doesn't exist e.g. "~/.promotion/cache" then the
// directory is created during the clone process.
func TestCloneCreatesDirectory(t *testing.T) {
tempDir, cleanup := makeTempDir(t)
defer cleanup()
r, err := NewRepository(testRepository, path.Join(tempDir, "path"))
assertNoError(t, err)
err = r.Clone()
assertNoError(t, err)
contents, err := ioutil.ReadFile(path.Join(tempDir, "path", "env-staging/service-a/deployment.txt"))
assertNoError(t, err)
want := "This is the staging version of this file.\n"
if diff := cmp.Diff(want, string(contents)); diff != "" {
t.Fatalf("failed to read file: %s", diff)
}
}
func TestClone(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
contents, err := ioutil.ReadFile(path.Join(r.LocalPath, "env-staging/service-a/deployment.txt"))
assertNoError(t, err)
want := "This is the staging version of this file.\n"
if diff := cmp.Diff(want, string(contents)); diff != "" {
t.Fatalf("failed to read file: %s", diff)
}
}
func TestCheckout(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
err := r.Checkout("updated-version")
assertNoError(t, err)
contents, err := ioutil.ReadFile(path.Join(r.LocalPath, "env-staging/service-a/deployment.txt"))
assertNoError(t, err)
want := "This is an updated version of this file.\n"
if diff := cmp.Diff(want, string(contents)); diff != "" {
t.Fatalf("failed to read file: %s", diff)
}
}
func TestWalk(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
visited := []string{}
err := r.Walk("service-a", func(prefix, name string) error {
visited = append(visited, name)
return nil
})
assertNoError(t, err)
want := []string{"service-a/deployment.txt", "service-a/files/myfile.txt"}
if diff := cmp.Diff(want, visited); diff != "" {
t.Fatalf("failed to read file: %s", diff)
}
}
func TestWriteFile(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
err := r.WriteFile(strings.NewReader("this is some text"), "service-a/new-file.txt")
assertNoError(t, err)
visited := []string{}
err = r.Walk("service-a", func(prefix, name string) error {
visited = append(visited, name)
return nil
})
assertNoError(t, err)
sort.Strings(visited)
want := []string{"service-a/deployment.txt", "service-a/files/myfile.txt", "service-a/new-file.txt"}
if diff := cmp.Diff(want, visited); diff != "" {
t.Fatalf("failed to read file: %s", diff)
}
}
func TestCopyFile(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
tmpfile, err := ioutil.TempFile("", "source")
assertNoError(t, err)
defer os.Remove(tmpfile.Name())
content := []byte(`test content`)
_, err = tmpfile.Write(content)
assertNoError(t, err)
err = tmpfile.Close()
assertNoError(t, err)
err = r.CopyFile(tmpfile.Name(), "service-a/copy/copied.txt")
assertNoError(t, err)
visited := []string{}
err = r.Walk("service-a", func(prefix, name string) error {
visited = append(visited, name)
return nil
})
assertNoError(t, err)
sort.Strings(visited)
want := []string{"service-a/copy/copied.txt", "service-a/deployment.txt", "service-a/files/myfile.txt"}
if diff := cmp.Diff(want, visited); diff != "" {
t.Fatalf("failed to read file: %s", diff)
}
}
func TestCopyWithMissingSource(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
tempDir, cleanup := makeTempDir(t)
defer cleanup()
err := r.CopyFile(path.Join(tempDir, "unknown.txt"), "service-a/copy/copied.txt")
test.AssertErrorMatch(t, "failed to get permissions for existing file.*stat.*no such file or directory", err)
}
func TestStageFiles(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
err := r.WriteFile(strings.NewReader("this is some text"), "service-a/new-file.txt")
assertNoError(t, err)
err = r.StageFiles("service-a/new-file.txt")
assertNoError(t, err)
out := assertExecGit(t, r.repoPath("service-a"), "status", "--porcelain")
want := "A service-a/new-file.txt\n"
if diff := cmp.Diff(want, string(out)); diff != "" {
t.Fatalf("file status not modified: %s", diff)
}
}
// The output of the git log -n looks like this:
// commit c88ebbcdef14604aed32f20166582be2762348fd (HEAD -> master)
// Author: Git User <[email protected]>
// Date: Wed Mar 18 11:48:20 2020 +0000
//
// testing.
func TestCommit(t *testing.T) {
r, cleanup := cloneTestRepository(t)
defer cleanup()
err := r.WriteFile(strings.NewReader("this is some text"), "service-a/new-file.txt")
assertNoError(t, err)
err = r.StageFiles("service-a/new-file.txt")
assertNoError(t, err)
err = r.Commit("this is a test commit", &Author{Name: "Test User", Email: "[email protected]"})
assertNoError(t, err)
out := strings.Split(string(assertExecGit(t, r.repoPath("service-a"), "log", "-n", "1")), "\n")
want := []string{"Author: Test User <[email protected]>", " this is a test commit"}
if diff := cmp.Diff(want, out, cmpopts.IgnoreSliceElements(func(s string) bool {
return strings.HasPrefix(s, "commit") || strings.HasPrefix(s, "Date:") || s == ""
})); diff != "" {
t.Fatalf("file commit match failed: %s", diff)
}
}
func TestPush(t *testing.T) {
if authToken() == "" {
t.Skip("no auth token to push the branch upstream")
}
r, cleanup := cloneTestRepository(t)
defer cleanup()
err := r.CheckoutAndCreate("my-new-branch")
assertNoError(t, err)
err = r.WriteFile(strings.NewReader("this is some text"), "service-a/new-file.txt")
assertNoError(t, err)
err = r.StageFiles("service-a/new-file.txt")
assertNoError(t, err)
err = r.Commit("this is a test commit", &Author{Name: "Test User", Email: "[email protected]"})
assertNoError(t, err)
err = r.Push("my-new-branch")
assertNoError(t, err)
}
func cloneTestRepository(t *testing.T) (*Repository, func()) {
tempDir, cleanup := makeTempDir(t)
r, err := NewRepository(authenticatedURL(t), tempDir)
assertNoError(t, err)
err = r.Clone()
assertNoError(t, err)
return r, cleanup
}
func authenticatedURL(t *testing.T) string {
t.Helper()
parsed, err := url.Parse(testRepository)
if err != nil {
t.Fatalf("failed to parse git repo url %v: %w", testRepository, err)
}
parsed.User = url.UserPassword("promotion", authToken())
return parsed.String()
}
func makeTempDir(t *testing.T) (string, func()) {
t.Helper()
dir, err := ioutil.TempDir(os.TempDir(), "promote")
assertNoError(t, err)
return dir, func() {
err := os.RemoveAll(dir)
assertNoError(t, err)
}
}
func assertExecGit(t *testing.T, gitPath string, args ...string) []byte {
t.Helper()
out, err := execGit(gitPath, nil, args...)
if err != nil {
t.Fatalf("assertExecGit failed: %s (%s)", err, out)
}
return out
}
func assertNoError(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
func authToken() string {
return os.Getenv("TEST_GITHUB_TOKEN")
}
|
[
"\"TEST_GITHUB_TOKEN\""
] |
[] |
[
"TEST_GITHUB_TOKEN"
] |
[]
|
["TEST_GITHUB_TOKEN"]
|
go
| 1 | 0 | |
server/storage/rethink_realdb_test.go
|
// +build rethinkdb
// Uses a real RethinkDB connection testing purposes
package storage
import (
"os"
"testing"
"github.com/docker/go-connections/tlsconfig"
"github.com/stretchr/testify/require"
"github.com/theupdateframework/notary/storage/rethinkdb"
"github.com/theupdateframework/notary/tuf/data"
"gopkg.in/dancannon/gorethink.v3"
)
var tlsOpts = tlsconfig.Options{InsecureSkipVerify: true, ExclusiveRootPools: true}
func rethinkSessionSetup(t *testing.T) (*gorethink.Session, string) {
// Get the Rethink connection string from an environment variable
rethinkSource := os.Getenv("DBURL")
require.NotEqual(t, "", rethinkSource)
sess, err := rethinkdb.AdminConnection(tlsOpts, rethinkSource)
require.NoError(t, err)
return sess, rethinkSource
}
func rethinkDBSetup(t *testing.T) (RethinkDB, func()) {
session, _ := rethinkSessionSetup(t)
dbName := "servertestdb"
var cleanup = func() { gorethink.DBDrop(dbName).Exec(session) }
cleanup()
require.NoError(t, rethinkdb.SetupDB(session, dbName, []rethinkdb.Table{
TUFFilesRethinkTable,
ChangeRethinkTable,
}))
return NewRethinkDBStorage(dbName, "", "", session), cleanup
}
func TestRethinkBootstrapSetsUsernamePassword(t *testing.T) {
adminSession, source := rethinkSessionSetup(t)
dbname, username, password := "servertestdb", "testuser", "testpassword"
otherDB, otherUser, otherPass := "otherservertestdb", "otheruser", "otherpassword"
// create a separate user with access to a different DB
require.NoError(t, rethinkdb.SetupDB(adminSession, otherDB, nil))
defer gorethink.DBDrop(otherDB).Exec(adminSession)
require.NoError(t, rethinkdb.CreateAndGrantDBUser(adminSession, otherDB, otherUser, otherPass))
// Bootstrap
s := NewRethinkDBStorage(dbname, username, password, adminSession)
require.NoError(t, s.Bootstrap())
defer gorethink.DBDrop(dbname).Exec(adminSession)
// A user with an invalid password cannot connect to rethink DB at all
_, err := rethinkdb.UserConnection(tlsOpts, source, username, "wrongpass")
require.Error(t, err)
// the other user cannot access rethink, causing health checks to fail
userSession, err := rethinkdb.UserConnection(tlsOpts, source, otherUser, otherPass)
require.NoError(t, err)
s = NewRethinkDBStorage(dbname, otherUser, otherPass, userSession)
_, _, err = s.GetCurrent("gun", data.CanonicalRootRole)
require.Error(t, err)
require.IsType(t, gorethink.RQLRuntimeError{}, err)
require.Error(t, s.CheckHealth())
// our user can access the DB though
userSession, err = rethinkdb.UserConnection(tlsOpts, source, username, password)
require.NoError(t, err)
s = NewRethinkDBStorage(dbname, username, password, userSession)
_, _, err = s.GetCurrent("gun", data.CanonicalRootRole)
require.Error(t, err)
require.IsType(t, ErrNotFound{}, err)
require.NoError(t, s.CheckHealth())
}
func TestRethinkCheckHealth(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
// sanity check - all tables present - health check passes
require.NoError(t, dbStore.CheckHealth())
// if the DB is unreachable, health check fails
require.NoError(t, dbStore.sess.Close())
require.Error(t, dbStore.CheckHealth())
// if the connection is reopened, health check succeeds
require.NoError(t, dbStore.sess.Reconnect())
require.NoError(t, dbStore.CheckHealth())
// only one table existing causes health check to fail
require.NoError(t, gorethink.DB(dbStore.dbName).TableDrop(TUFFilesRethinkTable.Name).Exec(dbStore.sess))
require.Error(t, dbStore.CheckHealth())
// No DB, health check fails
cleanup()
require.Error(t, dbStore.CheckHealth())
}
// UpdateCurrent will add a new TUF file if no previous version of that gun and role existed.
func TestRethinkUpdateCurrentEmpty(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testUpdateCurrentEmptyStore(t, dbStore)
}
// UpdateCurrent will add a new TUF file if the version is higher than previous, but fail
// if the version already exists in the DB
func TestRethinkUpdateCurrentVersionCheckOldVersionExists(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testUpdateCurrentVersionCheck(t, dbStore, true)
}
// UpdateCurrent will successfully add a new (higher) version of an existing TUF file,
// but will return an error if the to-be-added version does not exist in the DB, but
// is older than an existing version in the DB.
func TestRethinkUpdateCurrentVersionCheckOldVersionNotExist(t *testing.T) {
t.Skip("Currently rethink only errors if the previous version exists - it doesn't check for strictly increasing")
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testUpdateCurrentVersionCheck(t, dbStore, false)
}
func TestRethinkGetVersion(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testGetVersion(t, dbStore)
}
// UpdateMany succeeds if the updates do not conflict with each other or with what's
// already in the DB
func TestRethinkUpdateManyNoConflicts(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testUpdateManyNoConflicts(t, dbStore)
}
// UpdateMany does not insert any rows (or at least rolls them back) if there
// are any conflicts.
func TestRethinkUpdateManyConflictRollback(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testUpdateManyConflictRollback(t, dbStore)
}
// Delete will remove all TUF metadata, all versions, associated with a gun
func TestRethinkDeleteSuccess(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testDeleteSuccess(t, dbStore)
}
func TestRethinkTUFMetaStoreGetCurrent(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testTUFMetaStoreGetCurrent(t, dbStore)
}
func TestRethinkDBGetChanges(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t)
defer cleanup()
testGetChanges(t, dbStore)
}
|
[
"\"DBURL\""
] |
[] |
[
"DBURL"
] |
[]
|
["DBURL"]
|
go
| 1 | 0 | |
src/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fragment.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scripts/get_maps.py
|
import ecrlgcm.environment
import xarray as xr
import numpy as np
import xesmf as xe
import sys
import os
import glob
import warnings
import argparse
warnings.filterwarnings("ignore")
parser=argparse.ArgumentParser(description="Download paleo-continent maps")
args=parser.parse_args()
zip_file = 'Scotese_Wright_2018_Maps_1-88_6minX6min_PaleoDEMS_nc.zip'
data_source = f'https://zenodo.org/record/5460860/files/{zip_file}'
#zip_file = 'Scotese_Wright_2018_Maps_1-88_1degX1deg_PaleoDEMS_nc.zip'
#data_source = f'http://www.earthbyte.org/webdav/ftp/Data_Collections/Scotese_Wright_2018_PaleoDEM/{zip_file}'
cmd = f'rm -rf {os.environ["RAW_TOPO_DIR"]}'
cmd += f'; wget {data_source}'
cmd += f'; unzip {zip_file}'
cmd += f'; mv {zip_file.strip(".zip")} {os.environ["RAW_TOPO_DIR"]}'
cmd += f'; rm {zip_file}'
os.system(cmd)
|
[] |
[] |
[
"RAW_TOPO_DIR"
] |
[]
|
["RAW_TOPO_DIR"]
|
python
| 1 | 0 | |
config/config.go
|
package config
import (
"github.com/vrischmann/envconfig"
"log"
"os"
)
const envFileName = ".env"
const devEnv = "dev"
// New returns the settings from the environment.
func New() *Config {
cfg := &Config{}
err := envconfig.Init(cfg)
if err != nil {
log.Print(err)
}
return cfg
}
func IsDev() bool {
return os.Getenv("SERVICE_ENV") == devEnv
}
|
[
"\"SERVICE_ENV\""
] |
[] |
[
"SERVICE_ENV"
] |
[]
|
["SERVICE_ENV"]
|
go
| 1 | 0 | |
tests/contrib/google/test_google.py
|
import json
import logging
import os
from pathlib import Path
import httpretty
import pytest
from snsary.contrib.google import BigQueryOutput
@pytest.fixture()
def credentials_path():
return f"{Path(__file__).parent}/credentials.json"
@pytest.fixture
@httpretty.activate(allow_net_connect=False)
def big_query(mocker, credentials_path):
mocker.patch.dict(os.environ, {
'GOOGLE_APPLICATION_CREDENTIALS': credentials_path,
})
# body obtained by inspecting code and errors from returning an empty JSON response
httpretty.register_uri(
httpretty.POST,
'https://oauth2.googleapis.com/token',
body=json.dumps({"access_token": "1234", "expires_in": 33})
)
# body obtained by inspecting code and errors from returning an empty JSON response
httpretty.register_uri(
httpretty.GET,
(
'https://bigquery.googleapis.com/bigquery' +
'/v2/projects/test-project/datasets/snsary/tables/readings?prettyPrint=false'
),
body=json.dumps({
"tableReference": {"tableId": "1234", "projectId": "1234", "datasetId": "1234"}
})
)
return BigQueryOutput(retry_deadline=0)
@httpretty.activate(allow_net_connect=False)
def test_publish_batch(
mocker,
reading,
big_query
):
mocker.patch('platform.node', return_value='snsary')
httpretty.register_uri(
httpretty.POST,
(
'https://bigquery.googleapis.com/bigquery' +
'/v2/projects/1234/datasets/1234/tables/1234/insertAll?prettyPrint=false'
),
)
big_query.publish_batch([reading])
request = httpretty.last_request()
assert b'"host": "snsary"' in request.body
assert b'"metric": "myreading"' in request.body
assert b'"sensor": "mysensor"' in request.body
assert b'"timestamp": "2022-04-23T20:25:46+00:00"' in request.body
assert b'"value": 123' in request.body
@httpretty.activate(allow_net_connect=False)
def test_publish_batch_error(big_query):
httpretty.register_uri(
httpretty.POST,
(
'https://bigquery.googleapis.com/bigquery' +
'/v2/projects/1234/datasets/1234/tables/1234/insertAll?prettyPrint=false'
),
status=500
)
with pytest.raises(Exception) as excinfo:
big_query.publish_batch([])
assert 'Deadline of 0.0s exceeded' in str(excinfo.value)
@httpretty.activate(allow_net_connect=False)
def test_publish_batch_invalid(caplog, big_query):
caplog.set_level(logging.ERROR)
httpretty.register_uri(
httpretty.POST,
(
'https://bigquery.googleapis.com/bigquery' +
'/v2/projects/1234/datasets/1234/tables/1234/insertAll?prettyPrint=false'
),
body=json.dumps({
'insertErrors': [{'index': 0, 'errors': [{'message': 'no such field: abc.'}]}]
})
)
big_query.publish_batch([])
assert 'Error inserting row' in caplog.text
assert 'no such field' in caplog.text
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tensorflow/python/kernel_tests/conv_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
from tensorflow.contrib import layers
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, strides,
padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
with self.test_session() as sess:
values = sess.run(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-5, atol=1e-5)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides, padding,
expected):
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
tensors.append(result)
with self.test_session() as sess:
values = sess.run(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
print("expected = ", expected)
print("actual = ", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
# TODO this currently fails.
#self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu, err):
total_output_size = 1
total_filter_size = 1
for s in output_sizes:
total_output_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_filter_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0, t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = sess.run(conv)
self.assertShapeEqual(value, conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
new_input_sizes = NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu):
total_input_size = 1
total_output_size = 1
for s in input_sizes:
total_input_size *= s
for s in output_sizes:
total_output_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x0 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
explicit_strides = [1] + strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
explicit_strides = NHWCToNCHW(explicit_strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=padding,
data_format=data_format)
value = sess.run(conv)
self.assertShapeEqual(value, conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Gradient checkers
def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows,
filter_cols, in_depth, out_depth, stride_rows,
stride_cols, padding, test_input, data_format,
use_gpu):
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
else:
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
if data_format == "NCHW":
new_input_tensor = NHWCToNCHW(input_tensor)
strides = NHWCToNCHW(strides)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
print("conv_2d gradient error = ", err)
self.assertLess(err, 0.002)
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
def testOpEdgeCases(self):
with self.test_session() as sess:
# Illegal strides.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[2, 1, 1, 1],
padding="SAME"))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 2],
padding="SAME"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
# This is only a very simple test. More comprehensive tests live in
# //learning/dist_belief/experimental/brain_compatibility/conv_nn_test.py
# where we compare the numeric results of the depthwise conv op with the
# depthwise weighted sum transformer in dist_belief.
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes,
pointwise_filter_in_sizes, stride, padding, expected):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
with self.test_session() as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
conv = nn_impl.separable_conv2d(
t1, f1, f2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testSeparableConv2D(self):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output)
def testSeparableConv2DEqualInputOutputDepth(self):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output)
def testSeparableConv2DIllegalCases(self):
# Output depth less then input depth.
with self.assertRaisesRegexp(
ValueError,
"Refusing to perform an overparameterized separable convolution"):
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 5],
stride=1,
padding="SAME",
expected=None)
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.test_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = sess.run([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = sess.run([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = layers.convolution2d(x, num_outputs, [1, kernel_w])
outputs = x
variables.global_variables_initializer().run()
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
print("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def GetInceptionFwdTest(input_size, filter_size, stride, padding):
def Test(self):
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding):
def Test(self):
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding):
def Test(self):
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
GetInceptionBackInputTest(input_size_, filter_size_, output_size_,
stride_, padding_))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
GetInceptionBackFilterTest(input_size_, filter_size_, output_size_,
[stride_, stride_], padding_))
test.main()
|
[] |
[] |
[
"TF_USE_DEEP_CONV2D"
] |
[]
|
["TF_USE_DEEP_CONV2D"]
|
python
| 1 | 0 | |
iota/modules/Vizio/VizioController.py
|
import requests
import json
import os
import urllib3
from wakeonlan import send_magic_packet
# REFERENCE: https://github.com/exiva/Vizio_SmartCast_API
class VizioController(object):
def __init__(self):
# Vizio's API is completely insecure, but it's also local only, so...
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.auth_token = os.environ['VIZIO_AUTH_TOKEN']
self.ip = os.environ['VIZIO_IP_ADDRESS']
self.port = os.environ['VIZIO_PORT']
self.mac = os.environ['VIZIO_MAC_ADDRESS']
self.power_keys = {'off': 0, 'on': 1, 'toggle': 2}
self.headers = {
'Content-Type': 'application/json',
'AUTH': self.auth_token
}
def _build_url(self, parts: list) -> str:
return f'https://{self.ip}:{self.port}/{"/".join(parts)}'
def _call(self, method: str, parts: list, body={}) -> requests.Response:
try:
if method == 'GET':
response = requests.get(
url=self._build_url(parts),
headers=self.headers,
verify=False
)
elif method == 'PUT':
response = requests.put(
url=self._build_url(parts),
headers=self.headers,
data=json.dumps(body),
verify=False
)
return response
except requests.exceptions.ConnectionError as e:
print("ERROR: Couldn't connect to Vizio TV")
self.log_exception(e)
return None
def _get_power_state(self) -> requests.Response:
return self._call('GET', ['state', 'device', 'power_mode'])
def _power_key(self, state: str) -> requests.Response:
body = {
'KEYLIST': [{
'CODESET': 11,
'CODE': self.power_keys[state],
'ACTION': 'KEYPRESS'
}]
}
return self._call('PUT', ['key_command', ''], body)
def turn_on(self):
send_magic_packet(self.mac)
self._power_key(state='on')
def turn_off(self):
self._power_key(state='off')
def toggle_power(self):
self._power_key(state='toggle')
def _get_all_input_names(self) -> list:
response = self._call(
'GET',
['menu_native', 'dynamic', 'tv_settings', 'devices', 'name_input']
)
if response and response.status_code == 200:
return [item['NAME'] for item in response.json()['ITEMS']]
else:
return []
def _get_current_input(self) -> dict:
response = self._call('GET', [
'menu_native', 'dynamic', 'tv_settings', 'devices', 'current_input'
])
if response.status_code == 200:
input = response.json()['ITEMS'][0]
return {'value': input['VALUE'], 'hash': input['HASHVAL']}
else:
return {}
def switch_input(self, input_name: str) -> requests.Response:
if input_name not in self._get_all_input_names():
return None
current = self._get_current_input()
if 'hash' not in current.keys():
return None
return self._call(
method='PUT',
parts=[
'menu_native',
'dynamic',
'tv_settings',
'devices',
'current_input'
],
body={
'REQUEST': 'MODIFY',
'VALUE': input_name,
'HASHVAL': current['hash']
}
)
|
[] |
[] |
[
"VIZIO_PORT",
"VIZIO_MAC_ADDRESS",
"VIZIO_AUTH_TOKEN",
"VIZIO_IP_ADDRESS"
] |
[]
|
["VIZIO_PORT", "VIZIO_MAC_ADDRESS", "VIZIO_AUTH_TOKEN", "VIZIO_IP_ADDRESS"]
|
python
| 4 | 0 | |
examples/golang/main.go
|
package main
import (
"context"
"fmt"
"os"
"runtime/pprof"
"github.com/pyroscope-io/client/pyroscope"
)
//go:noinline
func work(n int) {
// revive:disable:empty-block this is fine because this is a example app, not real production code
for i := 0; i < n; i++ {
}
fmt.Printf("work\n")
// revive:enable:empty-block
}
func fastFunction(c context.Context) {
pyroscope.TagWrapper(c, pyroscope.Labels("function", "fast"), func(c context.Context) {
work(20000000)
})
}
func slowFunction(c context.Context) {
// standard pprof.Do wrappers work as well
pprof.Do(c, pprof.Labels("function", "slow"), func(c context.Context) {
work(80000000)
})
}
func main() {
serverAddress := os.Getenv("PYROSCOPE_SERVER_ADDRESS")
if serverAddress == "" {
serverAddress = "http://localhost:4040"
}
pyroscope.Start(pyroscope.Config{
ApplicationName: "simple.golang.app",
ServerAddress: serverAddress,
Logger: pyroscope.StandardLogger,
})
pyroscope.TagWrapper(context.Background(), pyroscope.Labels("foo", "bar"), func(c context.Context) {
for {
fastFunction(c)
slowFunction(c)
}
})
}
|
[
"\"PYROSCOPE_SERVER_ADDRESS\""
] |
[] |
[
"PYROSCOPE_SERVER_ADDRESS"
] |
[]
|
["PYROSCOPE_SERVER_ADDRESS"]
|
go
| 1 | 0 | |
cmd/cli.go
|
// Copyright 2016 Wercker Holding BV
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"text/template"
"time"
"k8s.io/apimachinery/pkg/labels"
homedir "github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/wercker/stern/stern"
"github.com/fatih/color"
)
const version = "1.11.0"
type Options struct {
container string
excludeContainer string
containerState string
timestamps bool
since time.Duration
context string
namespace string
kubeConfig string
exclude []string
include []string
allNamespaces bool
selector string
tail int64
color string
version bool
completion string
template string
output string
}
var opts = &Options{
container: ".*",
containerState: "running",
tail: -1,
color: "auto",
template: "",
output: "default",
}
func Run() {
cmd := &cobra.Command{}
cmd.Use = "stern pod-query"
cmd.Short = "Tail multiple pods and containers from Kubernetes"
cmd.Flags().StringVarP(&opts.container, "container", "c", opts.container, "Container name when multiple containers in pod")
cmd.Flags().StringVarP(&opts.excludeContainer, "exclude-container", "E", opts.excludeContainer, "Exclude a Container name")
cmd.Flags().StringVar(&opts.containerState, "container-state", opts.containerState, "If present, tail containers with status in running, waiting or terminated. Default to running.")
cmd.Flags().BoolVarP(&opts.timestamps, "timestamps", "t", opts.timestamps, "Print timestamps")
cmd.Flags().DurationVarP(&opts.since, "since", "s", opts.since, "Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to 48h.")
cmd.Flags().StringVar(&opts.context, "context", opts.context, "Kubernetes context to use. Default to current context configured in kubeconfig.")
cmd.Flags().StringVarP(&opts.namespace, "namespace", "n", opts.namespace, "Kubernetes namespace to use. Default to namespace configured in Kubernetes context")
cmd.Flags().StringVar(&opts.kubeConfig, "kubeconfig", opts.kubeConfig, "Path to kubeconfig file to use")
cmd.Flags().StringVar(&opts.kubeConfig, "kube-config", opts.kubeConfig, "Path to kubeconfig file to use")
cmd.Flags().MarkDeprecated("kube-config", "Use --kubeconfig instead.")
cmd.Flags().StringSliceVarP(&opts.exclude, "exclude", "e", opts.exclude, "Regex of log lines to exclude")
cmd.Flags().StringSliceVarP(&opts.include, "include", "i", opts.include, "Regex of log lines to include")
cmd.Flags().BoolVar(&opts.allNamespaces, "all-namespaces", opts.allNamespaces, "If present, tail across all namespaces. A specific namespace is ignored even if specified with --namespace.")
cmd.Flags().StringVarP(&opts.selector, "selector", "l", opts.selector, "Selector (label query) to filter on. If present, default to \".*\" for the pod-query.")
cmd.Flags().Int64Var(&opts.tail, "tail", opts.tail, "The number of lines from the end of the logs to show. Defaults to -1, showing all logs.")
cmd.Flags().StringVar(&opts.color, "color", opts.color, "Color output. Can be 'always', 'never', or 'auto'")
cmd.Flags().BoolVarP(&opts.version, "version", "v", opts.version, "Print the version and exit")
cmd.Flags().StringVar(&opts.completion, "completion", opts.completion, "Outputs stern command-line completion code for the specified shell. Can be 'bash' or 'zsh'")
cmd.Flags().StringVar(&opts.template, "template", opts.template, "Template to use for log lines, leave empty to use --output flag")
cmd.Flags().StringVarP(&opts.output, "output", "o", opts.output, "Specify predefined template. Currently support: [default, raw, json]")
// Specify custom bash completion function
cmd.BashCompletionFunction = bash_completion_func
for name, completion := range bash_completion_flags {
if cmd.Flag(name) != nil {
if cmd.Flag(name).Annotations == nil {
cmd.Flag(name).Annotations = map[string][]string{}
}
cmd.Flag(name).Annotations[cobra.BashCompCustom] = append(
cmd.Flag(name).Annotations[cobra.BashCompCustom],
completion,
)
}
}
cmd.RunE = func(cmd *cobra.Command, args []string) error {
if opts.version {
fmt.Printf("stern version %s\n", version)
return nil
}
if opts.completion != "" {
return runCompletion(opts.completion, cmd)
}
narg := len(args)
if (narg > 1) || (narg == 0 && opts.selector == "") {
return cmd.Help()
}
config, err := parseConfig(args)
if err != nil {
log.Println(err)
os.Exit(2)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
err = stern.Run(ctx, config)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
return nil
}
if err := cmd.Execute(); err != nil {
log.Fatal(err)
}
}
func parseConfig(args []string) (*stern.Config, error) {
type VotiroLog struct {
Timestamp: string
Level: string
MessageTemplate: string
}
kubeConfig, err := getKubeConfig()
if err != nil {
return nil, err
}
var podQuery string
if len(args) == 0 {
podQuery = ".*"
} else {
podQuery = args[0]
}
pod, err := regexp.Compile(podQuery)
if err != nil {
return nil, errors.Wrap(err, "failed to compile regular expression from query")
}
container, err := regexp.Compile(opts.container)
if err != nil {
return nil, errors.Wrap(err, "failed to compile regular expression for container query")
}
var excludeContainer *regexp.Regexp
if opts.excludeContainer != "" {
excludeContainer, err = regexp.Compile(opts.excludeContainer)
if err != nil {
return nil, errors.Wrap(err, "failed to compile regular expression for exclude container query")
}
}
var exclude []*regexp.Regexp
for _, ex := range opts.exclude {
rex, err := regexp.Compile(ex)
if err != nil {
return nil, errors.Wrap(err, "failed to compile regular expression for exclusion filter")
}
exclude = append(exclude, rex)
}
var include []*regexp.Regexp
for _, inc := range opts.include {
rin, err := regexp.Compile(inc)
if err != nil {
return nil, errors.Wrap(err, "failed to compile regular expression for inclusion filter")
}
include = append(include, rin)
}
containerState, err := stern.NewContainerState(opts.containerState)
if err != nil {
return nil, err
}
var labelSelector labels.Selector
selector := opts.selector
if selector == "" {
labelSelector = labels.Everything()
} else {
labelSelector, err = labels.Parse(selector)
if err != nil {
return nil, errors.Wrap(err, "failed to parse selector as label selector")
}
}
var tailLines *int64
if opts.tail != -1 {
tailLines = &opts.tail
}
colorFlag := opts.color
if colorFlag == "always" {
color.NoColor = false
} else if colorFlag == "never" {
color.NoColor = true
} else if colorFlag != "auto" {
return nil, errors.New("color should be one of 'always', 'never', or 'auto'")
}
t := opts.template
if t == "" {
switch opts.output {
case "default":
if color.NoColor {
t = "{{.PodName}} {{.ContainerName}} {{.Message}}"
if opts.allNamespaces {
t = fmt.Sprintf("{{.Namespace}} %s", t)
}
} else {
t = "{{color .PodColor .PodName}} {{color .ContainerColor .ContainerName}} {{.Message}}"
if opts.allNamespaces {
t = fmt.Sprintf("{{color .PodColor .Namespace}} %s", t)
}
}
case "raw":
t = "{{.Message}}"
case "json":
t = "{{json .}}\n"
}
}
funs := map[string]interface{}{
"json": func(in interface{}) (string, error) {
b, err := json.Marshal(in)
if err != nil {
return "", err
}
return string(b), nil
},
"color": func(color color.Color, text string) string {
return color.SprintFunc()(text)
},
"votiro": func(in interface{}) (string, error) {
var votiroLog VotiroLog
json.Unmarshal([]byte(in), &votiroLog)
return votiroLog, nil
},
}
template, err := template.New("log").Funcs(funs).Parse(t)
if err != nil {
return nil, errors.Wrap(err, "unable to parse template")
}
if opts.since == 0 {
opts.since = 172800000000000 // 48h
}
return &stern.Config{
KubeConfig: kubeConfig,
PodQuery: pod,
ContainerQuery: container,
ExcludeContainerQuery: excludeContainer,
ContainerState: containerState,
Exclude: exclude,
Include: include,
Timestamps: opts.timestamps,
Since: opts.since,
ContextName: opts.context,
Namespace: opts.namespace,
AllNamespaces: opts.allNamespaces,
LabelSelector: labelSelector,
TailLines: tailLines,
Template: template,
}, nil
}
func getKubeConfig() (string, error) {
var kubeconfig string
if kubeconfig = opts.kubeConfig; kubeconfig != "" {
return kubeconfig, nil
}
if kubeconfig = os.Getenv("KUBECONFIG"); kubeconfig != "" {
return kubeconfig, nil
}
// kubernetes requires an absolute path
home, err := homedir.Dir()
if err != nil {
return "", errors.Wrap(err, "failed to get user home directory")
}
kubeconfig = filepath.Join(home, ".kube/config")
return kubeconfig, nil
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
example/service/ecr/deleteRepository/deleteRepository.go
|
//go:build example
// +build example
package main
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
)
const DEFAULT_AWS_REGION = "us-east-1"
// This example deletes an ECR Repository
//
// Usage:
// AWS_REGION=us-east-1 go run -tags example deleteECRRepository.go <repo_name>
func main() {
config := &aws.Config{Region: aws.String(getAwsRegion())}
svc := ecr.New(session.New(), config)
repoName := getRepoNameArg()
if repoName == "" {
printUsageAndExit1()
}
input := &ecr.DeleteRepositoryInput{
Force: aws.Bool(false),
RepositoryName: aws.String(repoName),
}
output, err := svc.DeleteRepository(input)
if err != nil {
fmt.Printf("\nError deleting the repo %v in region %v\n%v\n", repoName, aws.StringValue(config.Region), err.Error())
os.Exit(1)
}
fmt.Printf("\nECR Repository \"%v\" deleted successfully!\n\nAWS Output:\n%v", repoName, output)
}
// Print correct usage and exit the program with code 1
func printUsageAndExit1() {
fmt.Println("\nUsage: AWS_REGION=us-east-1 go run -tags example deleteECRRepository.go <repo_name>")
os.Exit(1)
}
// Try get the repo name from the first argument
func getRepoNameArg() string {
if len(os.Args) < 2 {
return ""
}
firstArg := os.Args[1]
return firstArg
}
// Returns the aws region from env var or default region defined in DEFAULT_AWS_REGION constant
func getAwsRegion() string {
awsRegion := os.Getenv("AWS_REGION")
if awsRegion != "" {
return awsRegion
}
return DEFAULT_AWS_REGION
}
|
[
"\"AWS_REGION\""
] |
[] |
[
"AWS_REGION"
] |
[]
|
["AWS_REGION"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bookings.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
sparse_operation_kit/unit_test/test_scripts/tf1/test_sparse_emb_demo.py
|
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import sys, os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../../../")))
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import sparse_operation_kit as sok
import tensorflow as tf
import utils
from sparse_models import SOKDemo, TFDemo
from test_dense_emb_demo import check_saved_embedding_variables
import strategy_wrapper
import numpy as np
def get_sok_results(args, init_tensors, *random_samples):
if args.distributed_tool == "onedevice":
strategy = strategy_wrapper.OneDeviceStrategy()
elif args.distributed_tool == "horovod":
import horovod.tensorflow as hvd
hvd.init()
strategy = strategy_wrapper.HorovodStrategy()
else:
raise ValueError(f"{args.distributed_tool} is not supported.")
with strategy.scope():
sok_init_op = sok.Init(global_batch_size=args.global_batch_size)
sok_sparse_demo = SOKDemo(max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size,
combiner=args.combiner,
slot_num=args.slot_num,
max_nnz=args.max_nnz,
use_hashtable=args.use_hashtable,
num_of_dense_layers=0)
emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)
dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
sok_saver = sok.Saver()
restore_op = list()
for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):
control_inputs = [restore_op[-1]] if restore_op else None
with tf.control_dependencies(control_inputs):
if args.restore_params:
filepath = r"./embedding_variables"
op = sok_saver.restore_from_file(embedding_layer.embedding_variable, filepath)
else:
op = sok_saver.load_embedding_values(embedding_layer.embedding_variable, init_tensors[i])
restore_op.append(op)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction="none")
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
def _train_step(inputs, labels, training):
def _step_fn(inputs, labels):
logit, embedding_vector = sok_sparse_demo(inputs, training=training)
loss = _replica_loss(labels, logit)
emb_var, other_var = sok.split_embedding_variable_from_others(sok_sparse_demo.trainable_variables)
grads = tf.gradients(loss, emb_var + other_var, colocate_gradients_with_ops=True,
unconnected_gradients=tf.UnconnectedGradients.NONE)
emb_grads, other_grads = grads[:len(emb_var)], grads[len(emb_var):]
if "plugin" in args.optimizer:
emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))
else:
with sok.OptimizerScope(emb_var):
emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))
with tf.control_dependencies([*emb_grads]):
# in case NCCL runs concurrently via SOK and horovod
other_grads = strategy.reduce("sum", other_grads)
other_train_op = dense_opt.apply_gradients(zip(other_grads, other_var))
with tf.control_dependencies([emb_train_op, other_train_op]):
total_loss = strategy.reduce("sum", loss)
total_loss = tf.identity(total_loss)
return total_loss, embedding_vector
return strategy.run(_step_fn, inputs, labels)
replica_batch_size = args.global_batch_size // args.gpu_num
dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size,
to_sparse_tensor=True, repeat=1)
train_iterator = dataset.make_initializable_iterator()
iterator_init = train_iterator.initializer
inputs, labels = train_iterator.get_next()
graph_results = _train_step(inputs, labels, training=True)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
if "plugin" in args.optimizer:
init_op = tf.group(init_op, emb_opt.initializer)
save_op = list()
for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):
control_inputs = [save_op[-1]] if save_op else None
with tf.control_dependencies(control_inputs):
if args.save_params:
filepath = r"./embedding_variables/"
utils.try_make_dirs(filepath)
op = sok_saver.dump_to_file(embedding_layer.embedding_variable, filepath)
else:
op = tf.constant(1.0)
save_op.append(op)
sok_results = list()
with tf.Session() as sess:
sess.run(sok_init_op)
sess.run([init_op, iterator_init])
sess.run(restore_op)
sess.graph.finalize()
for step in range(args.iter_num):
loss_v, emb_vector_v = sess.run([*graph_results])
print("*" * 80)
print(f"Step: {step}, loss: {loss_v}, embedding_vector:\n{emb_vector_v}")
sok_results.append(emb_vector_v)
sess.run(save_op)
name = list()
for embedding_layer in sok_sparse_demo.embedding_layers:
name.append(embedding_layer.embedding_variable.m_var_name)
return sok_results, name
def get_tf_results(args, init_tensors, *random_samples):
graph = tf.Graph()
with graph.as_default():
tf_sparse_demo = TFDemo(vocabulary_size=args.max_vocabulary_size_per_gpu * args.gpu_num,
embedding_vec_size=args.embedding_vec_size,
combiner=args.combiner,
slot_num=args.slot_num,
max_nnz=args.max_nnz,
use_hashtable=args.use_hashtable,
num_of_dense_layers=0)
optimizer = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def _train_step(inputs, labels, training):
logit, embedding_vector = tf_sparse_demo(inputs, training=training)
loss = loss_fn(labels, logit)
grads = tf.gradients(loss, tf_sparse_demo.trainable_variables,
colocate_gradients_with_ops=True,
unconnected_gradients=tf.UnconnectedGradients.NONE)
train_op = optimizer.apply_gradients(zip(grads, tf_sparse_demo.trainable_variables))
with tf.control_dependencies([train_op]):
loss = tf.identity(loss)
return loss, embedding_vector
dataset = utils.tf_dataset(*random_samples, batchsize=args.global_batch_size,
to_sparse_tensor=True, repeat=1)
train_iterator = dataset.make_initializable_iterator()
iterator_init = train_iterator.initializer
inputs, labels = train_iterator.get_next()
graph_results = _train_step(inputs, labels, training=True)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
restore_op = list()
for i, embedding_weight in enumerate(tf_sparse_demo.embedding_weights):
restore_op.append(embedding_weight.assign(tf.concat(init_tensors[i], axis=0)))
emb_values = list()
for embedding_weight in tf_sparse_demo.embedding_weights:
if args.save_params:
filepath = r"./embedding_variables/"
utils.try_make_dirs(filepath)
emb_values.append(embedding_weight.read_value())
else:
emb_values = tf.constant(1.0)
tf_results = list()
with tf.Session(graph=graph) as sess:
sess.run([init_op, iterator_init])
sess.run(restore_op)
sess.graph.finalize()
for step in range(args.iter_num):
loss_v, emb_vector_v = sess.run([*graph_results])
print("*" * 80)
print(f"step: {step}, loss: {loss_v}, embedding_vector:\n{emb_vector_v}")
tf_results.append(emb_vector_v)
emb_values_v = sess.run(emb_values)
if args.save_params:
for i, value in enumerate(emb_values_v):
utils.save_to_file(os.path.join(filepath, r"tf_variable_" + str(i) + r".file"),
value)
name = list()
for embedding_weight in tf_sparse_demo.embedding_weights:
name.append(embedding_weight.name)
return tf_results, name
def compare_sparse_emb_sok_with_tf(args):
if args.global_batch_size % args.gpu_num != 0:
raise ValueError(f"global_batch_size: {args.global_batch_size} is not divisible "
f"by gpu_num: {args.gpu_num}")
if args.use_hashtable:
vocabulary_size = args.max_vocabulary_size_per_gpu * args.gpu_num
else:
vocabulary_size = args.max_vocabulary_size_per_gpu
if args.generate_new_datas:
replica_batch_size = args.global_batch_size // args.gpu_num
random_samples = utils.generate_random_samples(num_of_samples=replica_batch_size * args.iter_num,
vocabulary_size=vocabulary_size,
slot_num=sum(args.slot_num),
max_nnz=args.max_nnz,
use_sparse_mask=True)
utils.save_to_file(r"./random_samples_" + str(args.rank_idx) + r".file", *random_samples)
else:
random_samples = utils.restore_from_file(r"./random_samples_" + str(args.rank_idx) + r".file")
if args.restore_params:
filepath = r"./embedding_variables"
# because we already checked the variable consistency when saving
# so that we can directly use TF Variable file to initialize
# TF's Variable and SOK's Variable
init_tensors = list()
for i in range(len(args.slot_num)):
tf_values_filename = os.path.join(filepath, r"tf_variable_" + str(i) + r".file")
init_tensors.append(utils.restore_from_file(tf_values_filename))
else:
init_tensors = list()
for i in range(len(args.slot_num)):
init_tensors.append(utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size[i],
num=args.gpu_num))
sok_results, variable_names = get_sok_results(args, init_tensors, *random_samples)
utils.save_to_file(r"./sok_embedding_vectors_" + str(args.rank_idx) + r".file", *sok_results)
if args.rank_idx != 0:
return
# aggregate dataset from different worker
dataset_filenames = [r"./random_samples_" + str(rank_idx) + r".file"
for rank_idx in range(args.rank_size)]
random_samples_total = [list() for _ in range(args.iter_num)]
random_labels_total = [list() for _ in range(args.iter_num)]
local_batch_size = args.global_batch_size // args.gpu_num
for rank_idx in range(args.rank_size):
samples, labels = utils.restore_from_file(dataset_filenames[rank_idx])
for i in range(args.iter_num):
random_samples_total[i].extend(samples[i * local_batch_size : (i + 1) * local_batch_size])
random_labels_total[i].extend(labels[i * local_batch_size : (i + 1) * local_batch_size])
random_samples_total = np.concatenate(random_samples_total, axis=0)
random_labels_total = np.concatenate(random_labels_total, axis=0)
tf_results, _ = get_tf_results(args, init_tensors, random_samples_total, random_labels_total)
# aggregate sok forward results from different worker
sok_results_filenames = [r"./sok_embedding_vectors_" + str(rank_idx) + r".file"
for rank_idx in range(args.rank_size)]
sok_results_total = list()
for filename in sok_results_filenames:
sok_results = utils.restore_from_file(filename)
sok_results_total.append(sok_results)
if len(sok_results_total[0]) != len(tf_results):
raise ValueError("The length of sok results is not equal to that of tensorflow.")
if len(sok_results) != args.iter_num:
raise ValueError("The length of embedding vectors: %d is not equal to iteration number: %d."
%(len(sok_results), args.iter_num))
rtol, atol = 1e-3, 1e-3
if args.restore_params:
rtol, atol = rtol * 10, atol * 10
if args.distributed_tool == "horovod":
rtol, atol = rtol * 10, atol * 10
for i in range(args.iter_num):
sok_vector = np.concatenate([sok_results_total[rank_idx][i]
for rank_idx in range(args.rank_size)], axis=0)
allclose = np.allclose(sok_vector, tf_results[i], rtol=rtol, atol=atol)
if not allclose:
raise ValueError(f"\n{sok_vector} \nis not near to \n{tf_results[i]} \nat rtol={rtol}, atol={atol}")
print(f"\n[INFO]: For {len(args.slot_num)} Sparse Embedding layer, using {args.gpu_num} GPUs + {args.optimizer} optimizer, "
f"using hashtable? {args.use_hashtable}, combiner = {args.combiner}, the embedding vectors"
f" obtained from sok and tf are consistent for {args.iter_num} iterations.")
if args.save_params:
check_saved_embedding_variables(args, variable_names,
use_hashtable=args.use_hashtable, gpu_num=args.gpu_num)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_num", type=int, required=False, default=1)
parser.add_argument("--distributed_tool", type=str, required=False,
choices=["horovod", "onedevice"], default="onedevice")
parser.add_argument("--iter_num", type=int, required=False, default=50)
parser.add_argument("--max_vocabulary_size_per_gpu", type=int,
required=False, default=1024)
parser.add_argument("--combiner", type=str, required=False, default="sum",
choices=["sum", "mean"])
parser.add_argument("--slot_num", type=int, nargs="+",
help="the number of feature fileds",
required=False, default=1)
parser.add_argument("--max_nnz", type=int,
help="the maximum of valid inputs",
required=False, default=1)
parser.add_argument("--embedding_vec_size", type=int, nargs="+",
required=False, default=1)
parser.add_argument("--global_batch_size", type=int, required=False,
default=16)
parser.add_argument("--optimizer", type=str, required=False,
default="adam", choices=["plugin_adam", "adam", "sgd", "compat_adam"])
parser.add_argument("--generate_new_datas", type=int, choices=[0, 1],
required=False, default=1)
parser.add_argument("--save_params", type=int, choices=[0, 1],
required=False, default=1)
parser.add_argument("--restore_params", type=int, choices=[0, 1],
required=False, default=0)
parser.add_argument("--use_hashtable", type=int, choices=[0, 1],
required=False, default=1)
args = parser.parse_args()
args.generate_new_datas = True if args.generate_new_datas == 1 else False
args.save_params = True if args.save_params == 1 else False
args.restore_params = True if args.restore_params == 1 else False
args.use_hashtable = True if args.use_hashtable == 1 else False
if (args.distributed_tool == "onedevice" and args.gpu_num != 1):
raise ValueError(f"When 'onedevice' is used as the distributed_tool, "
f"gpu_num must be 1, which is {args.gpu_num}")
if args.distributed_tool == "onedevice":
available_gpus = ",".join(map(str, range(args.gpu_num)))
rank_size = args.gpu_num
rank_idx = 0
else:
# gpu_num will be ignored.
rank_size = os.getenv("OMPI_COMM_WORLD_SIZE")
if rank_size is None:
raise ValueError(f"When distributed_tool is set to {args.distributed_tool}, "
"mpiexec / mpirun must be used to launch this program.")
rank_size = int(rank_size)
rank_idx = int(os.getenv("OMPI_COMM_WORLD_RANK"))
available_gpus = str(rank_idx)
os.environ["CUDA_VISIBLE_DEVICES"] = available_gpus
args.rank_size = rank_size
args.rank_idx = rank_idx
args.gpu_num = rank_size
compare_sparse_emb_sok_with_tf(args)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"OMPI_COMM_WORLD_SIZE",
"OMPI_COMM_WORLD_RANK",
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "OMPI_COMM_WORLD_SIZE", "OMPI_COMM_WORLD_RANK", "TF_CPP_MIN_LOG_LEVEL"]
|
python
| 4 | 0 | |
internal/receiver/smartagentreceiver/receiver_test.go
|
// Copyright OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package smartagentreceiver
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"testing"
"time"
saconfig "github.com/signalfx/signalfx-agent/pkg/core/config"
"github.com/signalfx/signalfx-agent/pkg/monitors"
"github.com/signalfx/signalfx-agent/pkg/monitors/cpu"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/model/pdata"
"go.opentelemetry.io/collector/service/servicetest"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"
internaltest "github.com/signalfx/splunk-otel-collector/internal/components/componenttest"
"github.com/signalfx/splunk-otel-collector/internal/extension/smartagentextension"
)
func cleanUp() {
configureEnvironmentOnce = sync.Once{}
}
func newReceiverCreateSettings() component.ReceiverCreateSettings {
return component.ReceiverCreateSettings{
TelemetrySettings: component.TelemetrySettings{
Logger: zap.NewNop(),
TracerProvider: trace.NewNoopTracerProvider(),
},
}
}
var expectedCPUMetrics = map[string]pdata.MetricDataType{
"cpu.idle": pdata.MetricDataTypeSum,
"cpu.interrupt": pdata.MetricDataTypeSum,
"cpu.nice": pdata.MetricDataTypeSum,
"cpu.num_processors": pdata.MetricDataTypeGauge,
"cpu.softirq": pdata.MetricDataTypeSum,
"cpu.steal": pdata.MetricDataTypeSum,
"cpu.system": pdata.MetricDataTypeSum,
"cpu.user": pdata.MetricDataTypeSum,
"cpu.utilization": pdata.MetricDataTypeGauge,
"cpu.utilization_per_core": pdata.MetricDataTypeGauge,
"cpu.wait": pdata.MetricDataTypeSum,
}
func newConfig(nameVal, monitorType string, intervalSeconds int) Config {
return Config{
ReceiverSettings: config.NewReceiverSettings(config.NewComponentIDWithName(typeStr, nameVal)),
monitorConfig: &cpu.Config{
MonitorConfig: saconfig.MonitorConfig{
Type: monitorType,
IntervalSeconds: intervalSeconds,
ExtraDimensions: map[string]string{
"required_dimension": "required_value",
},
},
ReportPerCPU: true,
},
}
}
func TestSmartAgentReceiver(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("valid", "cpu", 10)
consumer := new(consumertest.MetricsSink)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
receiver.registerMetricsConsumer(consumer)
err := receiver.Start(context.Background(), componenttest.NewNopHost())
require.NoError(t, err)
assert.EqualValues(t, "smartagentvalid", cfg.monitorConfig.MonitorConfigCore().MonitorID)
monitor, isMonitor := receiver.monitor.(*cpu.Monitor)
require.True(t, isMonitor)
monitorOutput := monitor.Output
_, isOutput := monitorOutput.(*Output)
assert.True(t, isOutput)
assert.Eventuallyf(t, func() bool {
// confirm single occurrence of total metrics as sanity in lieu of
// out of scope cpu monitor verification.
seenTotalMetric := map[string]bool{}
allMetrics := consumer.AllMetrics()
for _, m := range allMetrics {
resourceMetrics := m.ResourceMetrics()
for i := 0; i < resourceMetrics.Len(); i++ {
resourceMetric := resourceMetrics.At(i)
instrumentationLibraryMetrics := resourceMetric.InstrumentationLibraryMetrics()
for j := 0; j < instrumentationLibraryMetrics.Len(); j++ {
instrumentationLibraryMetric := instrumentationLibraryMetrics.At(j)
metrics := instrumentationLibraryMetric.Metrics()
for k := 0; k < metrics.Len(); k++ {
metric := metrics.At(k)
name := metric.Name()
dataType := metric.DataType()
expectedDataType := expectedCPUMetrics[name]
require.NotEqual(t, pdata.MetricDataTypeNone, expectedDataType, "received unexpected none type for %s", name)
assert.Equal(t, expectedDataType, dataType)
var attributes pdata.AttributeMap
switch dataType {
case pdata.MetricDataTypeGauge:
dg := metric.Gauge()
for l := 0; l < dg.DataPoints().Len(); l++ {
dgdp := dg.DataPoints().At(l)
attributes = dgdp.Attributes()
var val = dgdp.DoubleVal()
assert.NotEqual(t, val, 0, "invalid value of MetricDataTypeGauge metric %s", name)
}
case pdata.MetricDataTypeSum:
ds := metric.Sum()
for l := 0; l < ds.DataPoints().Len(); l++ {
dsdp := ds.DataPoints().At(l)
attributes = dsdp.Attributes()
var val float64 = dsdp.DoubleVal()
assert.NotEqual(t, val, 0, "invalid value of MetricDataTypeSum metric %s", name)
}
default:
t.Errorf("unexpected type %#v for metric %s", metric.DataType(), name)
}
labelVal, ok := attributes.Get("required_dimension")
require.True(t, ok)
assert.Equal(t, "required_value", labelVal.StringVal())
systemType, ok := attributes.Get("system.type")
require.True(t, ok)
assert.Equal(t, "cpu", systemType.StringVal())
// mark metric as having been seen
cpuNum, _ := attributes.Get("cpu")
seenName := fmt.Sprintf("%s%s", name, cpuNum.StringVal())
assert.False(t, seenTotalMetric[seenName], "unexpectedly repeated metric: %v", seenName)
seenTotalMetric[seenName] = true
}
}
}
}
return len(allMetrics) > 0
}, 5*time.Second, 1*time.Millisecond, "failed to receive expected cpu metrics")
metrics := consumer.AllMetrics()
assert.Greater(t, len(metrics), 0)
err = receiver.Shutdown(context.Background())
assert.NoError(t, err)
}
func TestStripMonitorTypePrefix(t *testing.T) {
assert.Equal(t, "nginx", stripMonitorTypePrefix("collectd/nginx"))
assert.Equal(t, "cpu", stripMonitorTypePrefix("cpu"))
}
func TestStartReceiverWithInvalidMonitorConfig(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("invalid", "cpu", -123)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
err := receiver.Start(context.Background(), componenttest.NewNopHost())
assert.EqualError(t, err,
"config validation failed for \"smartagent/invalid\": intervalSeconds must be greater than 0s (-123 provided)",
)
}
func TestStartReceiverWithUnknownMonitorType(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("invalid", "notamonitortype", 1)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
err := receiver.Start(context.Background(), componenttest.NewNopHost())
assert.EqualError(t, err,
"failed creating monitor \"notamonitortype\": unable to find MonitorFactory for \"notamonitortype\"",
)
}
func TestStartAndShutdown(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("valid", "cpu", 1)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
err := receiver.Start(context.Background(), componenttest.NewNopHost())
require.NoError(t, err)
err = receiver.Shutdown(context.Background())
require.NoError(t, err)
}
func TestOutOfOrderShutdownInvocations(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("valid", "cpu", 1)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
err := receiver.Shutdown(context.Background())
require.Error(t, err)
assert.EqualError(t, err,
"smartagentreceiver's Shutdown() called before Start() or with invalid monitor state",
)
}
func TestMultipleInstacesOfSameMonitorType(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("valid", "cpu", 1)
fstRcvr := NewReceiver(newReceiverCreateSettings(), cfg)
ctx := context.Background()
mh := internaltest.NewAssertNoErrorHost(t)
require.NoError(t, fstRcvr.Start(ctx, mh))
require.NoError(t, fstRcvr.Shutdown(ctx))
sndRcvr := NewReceiver(newReceiverCreateSettings(), cfg)
assert.NoError(t, sndRcvr.Start(ctx, mh))
assert.NoError(t, sndRcvr.Shutdown(ctx))
}
func TestInvalidMonitorStateAtShutdown(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("valid", "cpu", 1)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
receiver.monitor = new(interface{})
err := receiver.Shutdown(context.Background())
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid monitor state at Shutdown(): (*interface {})")
}
func TestConfirmStartingReceiverWithInvalidMonitorInstancesDoesntPanic(t *testing.T) {
t.Cleanup(cleanUp)
tests := []struct {
name string
monitorFactory func() interface{}
expectedError string
}{
{"anonymous struct", func() interface{} { return struct{}{} }, ""},
{"anonymous struct pointer", func() interface{} { return &struct{}{} }, ""},
{"nil interface pointer", func() interface{} { return new(interface{}) }, ": invalid struct instance: (*interface {})"},
{"nil", func() interface{} { return nil }, ": invalid struct instance: <nil>"},
{"boolean", func() interface{} { return false }, ": invalid struct instance: false"},
{"string", func() interface{} { return "asdf" }, ": invalid struct instance: \"asdf\""},
}
for _, test := range tests {
t.Run(test.name, func(tt *testing.T) {
monitors.MonitorFactories["notarealmonitor"] = test.monitorFactory
monitors.MonitorMetadatas["notarealmonitor"] = &monitors.Metadata{MonitorType: "notarealmonitor"}
cfg := newConfig("invalid", "notarealmonitor", 123)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
err := receiver.Start(context.Background(), componenttest.NewNopHost())
require.Error(tt, err)
assert.Contains(tt, err.Error(),
fmt.Sprintf("failed creating monitor \"notarealmonitor\": unable to set Output field of monitor%s", test.expectedError),
)
})
}
}
func TestFilteringNoMetadata(t *testing.T) {
t.Cleanup(cleanUp)
monitors.MonitorFactories["fakemonitor"] = func() interface{} { return struct{}{} }
cfg := newConfig("valid", "fakemonitor", 1)
receiver := NewReceiver(newReceiverCreateSettings(), cfg)
err := receiver.Start(context.Background(), componenttest.NewNopHost())
require.EqualError(t, err, "failed creating monitor \"fakemonitor\": could not find monitor metadata of type fakemonitor")
}
func TestSmartAgentConfigProviderOverrides(t *testing.T) {
t.Cleanup(cleanUp)
cfg := newConfig("valid", "cpu", 1)
observedLogger, logs := observer.New(zapcore.WarnLevel)
logger := zap.New(observedLogger)
rcs := newReceiverCreateSettings()
rcs.Logger = logger
r := NewReceiver(rcs, cfg)
configs := getSmartAgentExtensionConfig(t)
host := &mockHost{
smartagentextensionConfig: configs[0],
smartagentextensionConfigExtra: configs[1],
}
require.NoError(t, r.Start(context.Background(), host))
require.NoError(t, r.Shutdown(context.Background()))
require.True(t, func() bool {
for _, message := range logs.All() {
if strings.HasPrefix(message.Message, "multiple smartagent extensions found, using ") {
return true
}
}
return false
}())
require.Equal(t, saconfig.CollectdConfig{
DisableCollectd: false,
Timeout: 10,
ReadThreads: 1,
WriteThreads: 4,
WriteQueueLimitHigh: 5,
WriteQueueLimitLow: 400000,
LogLevel: "notice",
IntervalSeconds: 10,
WriteServerIPAddr: "127.9.8.7",
WriteServerPort: 0,
ConfigDir: filepath.Join("/opt", "run", "collectd"),
BundleDir: "/opt/",
HasGenericJMXMonitor: false,
InstanceName: "",
WriteServerQuery: "",
}, saConfig.Collectd)
// Ensure envs are setup.
require.Equal(t, "/opt/", os.Getenv("SIGNALFX_BUNDLE_DIR"))
if runtime.GOOS == "windows" {
require.NotEqual(t, filepath.Join("/opt", "jre"), os.Getenv("JAVA_HOME"))
} else {
require.Equal(t, filepath.Join("/opt", "jre"), os.Getenv("JAVA_HOME"))
}
require.Equal(t, "/proc", os.Getenv("HOST_PROC"))
require.Equal(t, "/sys", os.Getenv("HOST_SYS"))
require.Equal(t, "/run", os.Getenv("HOST_RUN"))
require.Equal(t, "/var", os.Getenv("HOST_VAR"))
require.Equal(t, "/etc", os.Getenv("HOST_ETC"))
}
func getSmartAgentExtensionConfig(t *testing.T) []*smartagentextension.Config {
factories, err := componenttest.NopFactories()
require.Nil(t, err)
factory := smartagentextension.NewFactory()
factories.Extensions[typeStr] = factory
cfg, err := servicetest.LoadConfig(
path.Join(".", "testdata", "extension_config.yaml"), factories,
)
require.NoError(t, err)
partialSettingsConfig := cfg.Extensions[config.NewComponentIDWithName(typeStr, "partial_settings")]
require.NotNil(t, partialSettingsConfig)
extraSettingsConfig := cfg.Extensions[config.NewComponentIDWithName(typeStr, "extra")]
require.NotNil(t, extraSettingsConfig)
one, ok := partialSettingsConfig.(*smartagentextension.Config)
require.True(t, ok)
two, ok := extraSettingsConfig.(*smartagentextension.Config)
require.True(t, ok)
return []*smartagentextension.Config{one, two}
}
type mockHost struct {
smartagentextensionConfig *smartagentextension.Config
smartagentextensionConfigExtra *smartagentextension.Config
}
func (m *mockHost) ReportFatalError(error) {
}
func (m *mockHost) GetFactory(component.Kind, config.Type) component.Factory {
return nil
}
func (m *mockHost) GetExtensions() map[config.ComponentID]component.Extension {
exampleFactory := componenttest.NewNopExtensionFactory()
randomExtensionConfig := exampleFactory.CreateDefaultConfig()
return map[config.ComponentID]component.Extension{
m.smartagentextensionConfig.ID(): getExtension(smartagentextension.NewFactory(), m.smartagentextensionConfig),
randomExtensionConfig.ID(): getExtension(exampleFactory, randomExtensionConfig),
m.smartagentextensionConfigExtra.ID(): getExtension(smartagentextension.NewFactory(), m.smartagentextensionConfigExtra),
}
}
func getExtension(f component.ExtensionFactory, cfg config.Extension) component.Extension {
e, err := f.CreateExtension(context.Background(), component.ExtensionCreateSettings{}, cfg)
if err != nil {
panic(err)
}
return e
}
func (m *mockHost) GetExporters() map[config.DataType]map[config.ComponentID]component.Exporter {
return nil
}
|
[
"\"SIGNALFX_BUNDLE_DIR\"",
"\"JAVA_HOME\"",
"\"JAVA_HOME\"",
"\"HOST_PROC\"",
"\"HOST_SYS\"",
"\"HOST_RUN\"",
"\"HOST_VAR\"",
"\"HOST_ETC\""
] |
[] |
[
"HOST_SYS",
"JAVA_HOME",
"SIGNALFX_BUNDLE_DIR",
"HOST_PROC",
"HOST_RUN",
"HOST_VAR",
"HOST_ETC"
] |
[]
|
["HOST_SYS", "JAVA_HOME", "SIGNALFX_BUNDLE_DIR", "HOST_PROC", "HOST_RUN", "HOST_VAR", "HOST_ETC"]
|
go
| 7 | 0 | |
playground_test.go
|
/*
* Flow Playground
*
* Copyright 2019-2021 Dapper Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package playground_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"github.com/Masterminds/semver"
"github.com/go-chi/chi"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
playground "github.com/dapperlabs/flow-playground-api"
"github.com/dapperlabs/flow-playground-api/auth"
legacyauth "github.com/dapperlabs/flow-playground-api/auth/legacy"
"github.com/dapperlabs/flow-playground-api/client"
"github.com/dapperlabs/flow-playground-api/compute"
"github.com/dapperlabs/flow-playground-api/middleware/httpcontext"
"github.com/dapperlabs/flow-playground-api/model"
"github.com/dapperlabs/flow-playground-api/storage"
"github.com/dapperlabs/flow-playground-api/storage/datastore"
"github.com/dapperlabs/flow-playground-api/storage/memory"
)
type Project struct {
ID string
Title string
Description string
Readme string
Seed int
Persist bool
Version string
Accounts []struct {
ID string
Address string
DraftCode string
}
TransactionTemplates []TransactionTemplate
Secret string
}
const MutationCreateProject = `
mutation($title: String!, $description: String!, $readme: String!, $seed: Int!, $accounts: [String!], $transactionTemplates: [NewProjectTransactionTemplate!]) {
createProject(input: { title: $title, description: $description, readme: $readme, seed: $seed, accounts: $accounts, transactionTemplates: $transactionTemplates }) {
id
title
description
readme
seed
persist
version
accounts {
id
address
draftCode
}
transactionTemplates {
id
title
script
index
}
}
}
`
type CreateProjectResponse struct {
CreateProject Project
}
const QueryGetProject = `
query($projectId: UUID!) {
project(id: $projectId) {
id
accounts {
id
address
}
}
}
`
type GetProjectResponse struct {
Project Project
}
const MutationUpdateProjectPersist = `
mutation($projectId: UUID!, $title: String!, $description: String!, $readme: String!, $persist: Boolean!) {
updateProject(input: { id: $projectId, title: $title, description: $description, readme: $readme, persist: $persist }) {
id
title
description
readme
persist
}
}
`
type UpdateProjectResponse struct {
UpdateProject struct {
ID string
Title string
Description string
Readme string
Persist bool
}
}
const QueryGetProjectTransactionTemplates = `
query($projectId: UUID!) {
project(id: $projectId) {
id
transactionTemplates {
id
script
index
}
}
}
`
type GetProjectTransactionTemplatesResponse struct {
Project struct {
ID string
TransactionTemplates []struct {
ID string
Script string
Index int
}
}
}
const QueryGetProjectScriptTemplates = `
query($projectId: UUID!) {
project(id: $projectId) {
id
scriptTemplates {
id
script
index
}
}
}
`
type GetProjectScriptTemplatesResponse struct {
Project struct {
ID string
ScriptTemplates []struct {
ID string
Script string
Index int
}
}
}
const QueryGetAccount = `
query($accountId: UUID!, $projectId: UUID!) {
account(id: $accountId, projectId: $projectId) {
id
address
draftCode
deployedCode
state
}
}
`
type GetAccountResponse struct {
Account struct {
ID string
Address string
DraftCode string
DeployedCode string
State string
}
}
const MutationUpdateAccountDraftCode = `
mutation($accountId: UUID!, $projectId: UUID!, $code: String!) {
updateAccount(input: { id: $accountId, projectId: $projectId, draftCode: $code }) {
id
address
draftCode
deployedCode
}
}
`
const MutationUpdateAccountDeployedCode = `
mutation($accountId: UUID!, $projectId: UUID!, $code: String!) {
updateAccount(input: { id: $accountId, projectId: $projectId, deployedCode: $code }) {
id
address
draftCode
deployedCode
}
}
`
type UpdateAccountResponse struct {
UpdateAccount struct {
ID string
Address string
DraftCode string
DeployedCode string
}
}
type TransactionTemplate struct {
ID string
Title string
Script string
Index int
}
const MutationCreateTransactionTemplate = `
mutation($projectId: UUID!, $title: String!, $script: String!) {
createTransactionTemplate(input: { projectId: $projectId, title: $title, script: $script }) {
id
title
script
index
}
}
`
type CreateTransactionTemplateResponse struct {
CreateTransactionTemplate TransactionTemplate
}
const QueryGetTransactionTemplate = `
query($templateId: UUID!, $projectId: UUID!) {
transactionTemplate(id: $templateId, projectId: $projectId) {
id
script
index
}
}
`
type GetTransactionTemplateResponse struct {
TransactionTemplate struct {
ID string
Script string
Index int
}
}
const MutationUpdateTransactionTemplateScript = `
mutation($templateId: UUID!, $projectId: UUID!, $script: String!) {
updateTransactionTemplate(input: { id: $templateId, projectId: $projectId, script: $script }) {
id
script
index
}
}
`
const MutationUpdateTransactionTemplateIndex = `
mutation($templateId: UUID!, $projectId: UUID!, $index: Int!) {
updateTransactionTemplate(input: { id: $templateId, projectId: $projectId, index: $index }) {
id
script
index
}
}
`
type UpdateTransactionTemplateResponse struct {
UpdateTransactionTemplate struct {
ID string
Script string
Index int
}
}
const MutationDeleteTransactionTemplate = `
mutation($templateId: UUID!, $projectId: UUID!) {
deleteTransactionTemplate(id: $templateId, projectId: $projectId)
}
`
type DeleteTransactionTemplateResponse struct {
DeleteTransactionTemplate string
}
const MutationCreateTransactionExecution = `
mutation($projectId: UUID!, $script: String!, $signers: [Address!], $arguments: [String!]) {
createTransactionExecution(input: {
projectId: $projectId,
script: $script,
arguments: $arguments,
signers: $signers
}) {
id
script
errors {
message
startPosition { offset line column }
endPosition { offset line column }
}
logs
events {
type
values
}
}
}
`
type CreateTransactionExecutionResponse struct {
CreateTransactionExecution struct {
ID string
Script string
Errors []model.ProgramError
Logs []string
Events []struct {
Type string
Values []string
}
}
}
const MutationCreateScriptExecution = `
mutation CreateScriptExecution($projectId: UUID!, $script: String!, $arguments: [String!]) {
createScriptExecution(input: {
projectId: $projectId,
script: $script,
arguments: $arguments
}) {
id
script
errors {
message
startPosition { offset line column }
endPosition { offset line column }
}
logs
value
}
}
`
type CreateScriptExecutionResponse struct {
CreateScriptExecution struct {
ID string
Script string
Errors []model.ProgramError
Logs []string
Value string
}
}
const MutationCreateScriptTemplate = `
mutation($projectId: UUID!, $title: String!, $script: String!) {
createScriptTemplate(input: { projectId: $projectId, title: $title, script: $script }) {
id
title
script
index
}
}
`
type ScriptTemplate struct {
ID string
Title string
Script string
Index int
}
type CreateScriptTemplateResponse struct {
CreateScriptTemplate ScriptTemplate
}
const QueryGetScriptTemplate = `
query($templateId: UUID!, $projectId: UUID!) {
scriptTemplate(id: $templateId, projectId: $projectId) {
id
script
}
}
`
type GetScriptTemplateResponse struct {
ScriptTemplate ScriptTemplate
}
const MutationUpdateScriptTemplateScript = `
mutation($templateId: UUID!, $projectId: UUID!, $script: String!) {
updateScriptTemplate(input: { id: $templateId, projectId: $projectId, script: $script }) {
id
script
index
}
}
`
const MutationUpdateScriptTemplateIndex = `
mutation($templateId: UUID!, $projectId: UUID!, $index: Int!) {
updateScriptTemplate(input: { id: $templateId, projectId: $projectId, index: $index }) {
id
script
index
}
}
`
type UpdateScriptTemplateResponse struct {
UpdateScriptTemplate struct {
ID string
Script string
Index int
}
}
const MutationDeleteScriptTemplate = `
mutation($templateId: UUID!, $projectId: UUID!) {
deleteScriptTemplate(id: $templateId, projectId: $projectId)
}
`
type DeleteScriptTemplateResponse struct {
DeleteScriptTemplate string
}
func TestProjects(t *testing.T) {
t.Run("Create empty project", func(t *testing.T) {
c := newClient()
var resp CreateProjectResponse
err := c.Post(
MutationCreateProject,
&resp,
client.Var("title", "foo"),
client.Var("description", "bar"),
client.Var("readme", "bah"),
client.Var("seed", 42),
)
require.NoError(t, err)
assert.NotEmpty(t, resp.CreateProject.ID)
assert.Equal(t, 42, resp.CreateProject.Seed)
assert.Equal(t, version.String(), resp.CreateProject.Version)
// project should be created with 4 default accounts
assert.Len(t, resp.CreateProject.Accounts, playground.MaxAccounts)
// project should not be persisted
assert.False(t, resp.CreateProject.Persist)
})
t.Run("Create project with 2 accounts", func(t *testing.T) {
c := newClient()
var resp CreateProjectResponse
accounts := []string{
"pub contract Foo {}",
"pub contract Bar {}",
}
err := c.Post(
MutationCreateProject,
&resp,
client.Var("title", "foo"),
client.Var("description", "desc"),
client.Var("readme", "rtfm"),
client.Var("seed", 42),
client.Var("accounts", accounts),
)
require.NoError(t, err)
// project should still be created with 4 default accounts
assert.Len(t, resp.CreateProject.Accounts, playground.MaxAccounts)
assert.Equal(t, accounts[0], resp.CreateProject.Accounts[0].DraftCode)
assert.Equal(t, accounts[1], resp.CreateProject.Accounts[1].DraftCode)
assert.Equal(t, "", resp.CreateProject.Accounts[2].DraftCode)
})
t.Run("Create project with 4 accounts", func(t *testing.T) {
c := newClient()
var resp CreateProjectResponse
accounts := []string{
"pub contract Foo {}",
"pub contract Bar {}",
"pub contract Dog {}",
"pub contract Cat {}",
}
err := c.Post(
MutationCreateProject,
&resp,
client.Var("title", "foo"),
client.Var("seed", 42),
client.Var("description", "desc"),
client.Var("readme", "rtfm"),
client.Var("accounts", accounts),
)
require.NoError(t, err)
// project should still be created with 4 default accounts
assert.Len(t, resp.CreateProject.Accounts, playground.MaxAccounts)
assert.Equal(t, accounts[0], resp.CreateProject.Accounts[0].DraftCode)
assert.Equal(t, accounts[1], resp.CreateProject.Accounts[1].DraftCode)
assert.Equal(t, accounts[2], resp.CreateProject.Accounts[2].DraftCode)
})
t.Run("Create project with transaction templates", func(t *testing.T) {
c := newClient()
var resp CreateProjectResponse
templates := []struct {
Title string `json:"title"`
Script string `json:"script"`
}{
{
"foo", "transaction { execute { log(\"foo\") } }",
},
{
"bar", "transaction { execute { log(\"bar\") } }",
},
}
err := c.Post(
MutationCreateProject,
&resp,
client.Var("title", "foo"),
client.Var("seed", 42),
client.Var("description", "desc"),
client.Var("readme", "rtfm"),
client.Var("transactionTemplates", templates),
)
require.NoError(t, err)
assert.Len(t, resp.CreateProject.TransactionTemplates, 2)
assert.Equal(t, templates[0].Title, resp.CreateProject.TransactionTemplates[0].Title)
assert.Equal(t, templates[0].Script, resp.CreateProject.TransactionTemplates[0].Script)
assert.Equal(t, templates[1].Title, resp.CreateProject.TransactionTemplates[1].Title)
assert.Equal(t, templates[1].Script, resp.CreateProject.TransactionTemplates[1].Script)
})
t.Run("Get project", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp GetProjectResponse
err := c.Post(
QueryGetProject,
&resp,
client.Var("projectId", project.ID),
)
require.NoError(t, err)
assert.Equal(t, project.ID, resp.Project.ID)
})
t.Run("Get non-existent project", func(t *testing.T) {
c := newClient()
var resp CreateProjectResponse
badID := uuid.New().String()
err := c.Post(
QueryGetProject,
&resp,
client.Var("projectId", badID),
)
assert.Error(t, err)
})
t.Run("Persist project without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp UpdateProjectResponse
err := c.Post(
MutationUpdateProjectPersist,
&resp,
client.Var("projectId", project.ID),
client.Var("title", project.Title),
client.Var("description", project.Description),
client.Var("readme", project.Readme),
client.Var("persist", true),
)
assert.Error(t, err)
})
t.Run("Persist project", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp UpdateProjectResponse
err := c.Post(
MutationUpdateProjectPersist,
&resp,
client.Var("projectId", project.ID),
client.Var("title", project.Title),
client.Var("description", project.Description),
client.Var("readme", project.Readme),
client.Var("persist", true),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, project.ID, resp.UpdateProject.ID)
assert.Equal(t, project.Title, resp.UpdateProject.Title)
assert.Equal(t, project.Description, resp.UpdateProject.Description)
assert.Equal(t, project.Readme, resp.UpdateProject.Readme)
assert.True(t, resp.UpdateProject.Persist)
})
}
func TestTransactionTemplates(t *testing.T) {
t.Run("Create transaction template without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionTemplateResponse
err := c.Post(
MutationCreateTransactionTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
)
assert.Error(t, err)
assert.Empty(t, resp.CreateTransactionTemplate.ID)
})
t.Run("Create transaction template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionTemplateResponse
err := c.Post(
MutationCreateTransactionTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.NotEmpty(t, resp.CreateTransactionTemplate.ID)
assert.Equal(t, "foo", resp.CreateTransactionTemplate.Title)
assert.Equal(t, "bar", resp.CreateTransactionTemplate.Script)
})
t.Run("Get transaction template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA CreateTransactionTemplateResponse
err := c.Post(
MutationCreateTransactionTemplate,
&respA,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
var respB GetTransactionTemplateResponse
err = c.Post(
QueryGetTransactionTemplate,
&respB,
client.Var("projectId", project.ID),
client.Var("templateId", respA.CreateTransactionTemplate.ID),
)
require.NoError(t, err)
assert.Equal(t, respA.CreateTransactionTemplate.ID, respB.TransactionTemplate.ID)
assert.Equal(t, respA.CreateTransactionTemplate.Script, respB.TransactionTemplate.Script)
})
t.Run("Get non-existent transaction template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp GetTransactionTemplateResponse
badID := uuid.New().String()
err := c.Post(
QueryGetTransactionTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", badID),
)
assert.Error(t, err)
})
t.Run("Update transaction template without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA CreateTransactionTemplateResponse
err := c.Post(
MutationCreateTransactionTemplate,
&respA,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "apple"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
templateID := respA.CreateTransactionTemplate.ID
var respB UpdateTransactionTemplateResponse
err = c.Post(
MutationUpdateTransactionTemplateScript,
&respB,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
client.Var("script", "orange"),
)
assert.Error(t, err)
})
t.Run("Update transaction template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA CreateTransactionTemplateResponse
err := c.Post(
MutationCreateTransactionTemplate,
&respA,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "apple"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
templateID := respA.CreateTransactionTemplate.ID
var respB UpdateTransactionTemplateResponse
err = c.Post(
MutationUpdateTransactionTemplateScript,
&respB,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
client.Var("script", "orange"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, respA.CreateTransactionTemplate.ID, respB.UpdateTransactionTemplate.ID)
assert.Equal(t, respA.CreateTransactionTemplate.Index, respB.UpdateTransactionTemplate.Index)
assert.Equal(t, "orange", respB.UpdateTransactionTemplate.Script)
var respC struct {
UpdateTransactionTemplate struct {
ID string
Script string
Index int
}
}
err = c.Post(
MutationUpdateTransactionTemplateIndex,
&respC,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
client.Var("index", 1),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, respA.CreateTransactionTemplate.ID, respC.UpdateTransactionTemplate.ID)
assert.Equal(t, 1, respC.UpdateTransactionTemplate.Index)
assert.Equal(t, respB.UpdateTransactionTemplate.Script, respC.UpdateTransactionTemplate.Script)
})
t.Run("Update non-existent transaction template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp UpdateTransactionTemplateResponse
badID := uuid.New().String()
err := c.Post(
MutationUpdateTransactionTemplateScript,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", badID),
client.Var("script", "bar"),
)
assert.Error(t, err)
})
t.Run("Get transaction templates for project", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
templateA := createTransactionTemplate(t, c, project)
templateB := createTransactionTemplate(t, c, project)
templateC := createTransactionTemplate(t, c, project)
var resp GetProjectTransactionTemplatesResponse
err := c.Post(
QueryGetProjectTransactionTemplates,
&resp,
client.Var("projectId", project.ID),
)
require.NoError(t, err)
assert.Len(t, resp.Project.TransactionTemplates, 3)
assert.Equal(t, templateA.ID, resp.Project.TransactionTemplates[0].ID)
assert.Equal(t, templateB.ID, resp.Project.TransactionTemplates[1].ID)
assert.Equal(t, templateC.ID, resp.Project.TransactionTemplates[2].ID)
assert.Equal(t, 0, resp.Project.TransactionTemplates[0].Index)
assert.Equal(t, 1, resp.Project.TransactionTemplates[1].Index)
assert.Equal(t, 2, resp.Project.TransactionTemplates[2].Index)
})
t.Run("Get transaction templates for non-existent project", func(t *testing.T) {
c := newClient()
var resp GetProjectTransactionTemplatesResponse
badID := uuid.New().String()
err := c.Post(
QueryGetProjectTransactionTemplates,
&resp,
client.Var("projectId", badID),
)
assert.Error(t, err)
})
t.Run("Delete transaction template without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
template := createTransactionTemplate(t, c, project)
var resp DeleteTransactionTemplateResponse
err := c.Post(
MutationDeleteTransactionTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", template.ID),
)
assert.Error(t, err)
assert.Empty(t, resp.DeleteTransactionTemplate)
})
t.Run("Delete transaction template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
template := createTransactionTemplate(t, c, project)
var resp DeleteTransactionTemplateResponse
err := c.Post(
MutationDeleteTransactionTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", template.ID),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, template.ID, resp.DeleteTransactionTemplate)
})
}
func TestTransactionExecutions(t *testing.T) {
t.Run("Create execution for non-existent project", func(t *testing.T) {
c := newClient()
badID := uuid.New().String()
var resp CreateTransactionExecutionResponse
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", badID),
client.Var("script", "transaction { execute { log(\"Hello, World!\") } }"),
)
assert.Error(t, err)
})
t.Run("Create execution without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionExecutionResponse
const script = "transaction { execute { log(\"Hello, World!\") } }"
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
)
assert.Error(t, err)
})
t.Run("Create execution", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionExecutionResponse
const script = "transaction { execute { log(\"Hello, World!\") } }"
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Empty(t, resp.CreateTransactionExecution.Errors)
assert.Contains(t, resp.CreateTransactionExecution.Logs, "\"Hello, World!\"")
assert.Equal(t, script, resp.CreateTransactionExecution.Script)
})
t.Run("Multiple executions", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA CreateTransactionExecutionResponse
const script = "transaction { prepare(signer: AuthAccount) { AuthAccount(payer: signer) } }"
err := c.Post(
MutationCreateTransactionExecution,
&respA,
client.Var("projectId", project.ID),
client.Var("script", script),
client.Var("signers", []string{project.Accounts[0].Address}),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Empty(t, respA.CreateTransactionExecution.Errors)
require.Len(t, respA.CreateTransactionExecution.Events, 1)
eventA := respA.CreateTransactionExecution.Events[0]
// first account should have address 0x06
assert.Equal(t, "flow.AccountCreated", eventA.Type)
assert.JSONEq(t,
`{"type":"Address","value":"0x0000000000000006"}`,
eventA.Values[0],
)
var respB CreateTransactionExecutionResponse
err = c.Post(
MutationCreateTransactionExecution,
&respB,
client.Var("projectId", project.ID),
client.Var("script", script),
client.Var("signers", []string{project.Accounts[0].Address}),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Empty(t, respB.CreateTransactionExecution.Errors)
require.Len(t, respB.CreateTransactionExecution.Events, 1)
eventB := respB.CreateTransactionExecution.Events[0]
// second account should have address 0x07
assert.Equal(t, "flow.AccountCreated", eventB.Type)
assert.JSONEq(t,
`{"type":"Address","value":"0x0000000000000007"}`,
eventB.Values[0],
)
})
t.Run("Multiple executions with cache reset", func(t *testing.T) {
// manually construct resolver
store := memory.NewStore()
computer, _ := compute.NewComputer(zerolog.Nop(), 128)
authenticator := auth.NewAuthenticator(store, sessionName)
resolver := playground.NewResolver(version, store, computer, authenticator)
c := newClientWithResolver(resolver)
project := createProject(t, c)
var respA CreateTransactionExecutionResponse
const script = "transaction { prepare(signer: AuthAccount) { AuthAccount(payer: signer) } }"
err := c.Post(
MutationCreateTransactionExecution,
&respA,
client.Var("projectId", project.ID),
client.Var("script", script),
client.Var("signers", []string{project.Accounts[0].Address}),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Empty(t, respA.CreateTransactionExecution.Errors)
require.Len(t, respA.CreateTransactionExecution.Events, 1)
eventA := respA.CreateTransactionExecution.Events[0]
// first account should have address 0x06
assert.Equal(t, "flow.AccountCreated", eventA.Type)
assert.JSONEq(t,
`{"type":"Address","value":"0x0000000000000006"}`,
eventA.Values[0],
)
// clear ledger cache
computer.ClearCache()
var respB CreateTransactionExecutionResponse
err = c.Post(
MutationCreateTransactionExecution,
&respB,
client.Var("projectId", project.ID),
client.Var("script", script),
client.Var("signers", []string{project.Accounts[0].Address}),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Len(t, respB.CreateTransactionExecution.Events, 1)
eventB := respB.CreateTransactionExecution.Events[0]
// second account should have address 0x07
assert.Equal(t, "flow.AccountCreated", eventB.Type)
assert.JSONEq(t,
`{"type":"Address","value":"0x0000000000000007"}`,
eventB.Values[0],
)
})
t.Run("invalid (parse error)", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionExecutionResponse
const script = `
transaction(a: Int) {
`
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Equal(t,
[]model.ProgramError{
{
Message: "unexpected token: EOF",
StartPosition: &model.ProgramPosition{
Offset: 41,
Line: 3,
Column: 8,
},
EndPosition: &model.ProgramPosition{
Offset: 41,
Line: 3,
Column: 8,
},
},
},
resp.CreateTransactionExecution.Errors,
)
require.Empty(t, resp.CreateTransactionExecution.Logs)
})
t.Run("invalid (semantic error)", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionExecutionResponse
const script = `
transaction { execute { XYZ } }
`
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Equal(t,
[]model.ProgramError{
{
Message: "cannot find variable in this scope: `XYZ`",
StartPosition: &model.ProgramPosition{
Offset: 35,
Line: 2,
Column: 34,
},
EndPosition: &model.ProgramPosition{
Offset: 37,
Line: 2,
Column: 36,
},
},
},
resp.CreateTransactionExecution.Errors,
)
require.Empty(t, resp.CreateTransactionExecution.Logs)
})
t.Run("invalid (run-time error)", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionExecutionResponse
const script = `
transaction { execute { panic("oh no") } }
`
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Equal(t,
[]model.ProgramError{
{
Message: "panic: oh no",
StartPosition: &model.ProgramPosition{
Offset: 35,
Line: 2,
Column: 34,
},
EndPosition: &model.ProgramPosition{
Offset: 48,
Line: 2,
Column: 47,
},
},
},
resp.CreateTransactionExecution.Errors,
)
require.Empty(t, resp.CreateTransactionExecution.Logs)
})
t.Run("exceeding computation limit", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionExecutionResponse
const script = `
transaction {
execute {
var i = 0
while i < 1_000_000 {
i = i + 1
}
}
}
`
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, script, resp.CreateTransactionExecution.Script)
require.Equal(t,
[]model.ProgramError{
{
Message: "computation limited exceeded: 100000",
StartPosition: &model.ProgramPosition{
Offset: 139,
Line: 6,
Column: 22,
},
EndPosition: &model.ProgramPosition{
Offset: 147,
Line: 6,
Column: 30,
},
},
},
resp.CreateTransactionExecution.Errors,
)
})
t.Run("argument", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateTransactionExecutionResponse
const script = `
transaction(a: Int) {
execute {
log(a)
}
}
`
err := c.Post(
MutationCreateTransactionExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.Var("arguments", []string{
`{"type": "Int", "value": "42"}`,
}),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Empty(t, resp.CreateTransactionExecution.Errors)
require.Equal(t, resp.CreateTransactionExecution.Logs, []string{"42"})
})
}
func TestScriptTemplates(t *testing.T) {
t.Run("Create script template without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptTemplateResponse
err := c.Post(
MutationCreateScriptTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
)
assert.Error(t, err)
assert.Empty(t, resp.CreateScriptTemplate.ID)
})
t.Run("Create script template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptTemplateResponse
err := c.Post(
MutationCreateScriptTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.NotEmpty(t, resp.CreateScriptTemplate.ID)
assert.Equal(t, "foo", resp.CreateScriptTemplate.Title)
assert.Equal(t, "bar", resp.CreateScriptTemplate.Script)
})
t.Run("Get script template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA CreateScriptTemplateResponse
err := c.Post(
MutationCreateScriptTemplate,
&respA,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
var respB GetScriptTemplateResponse
err = c.Post(
QueryGetScriptTemplate,
&respB,
client.Var("projectId", project.ID),
client.Var("templateId", respA.CreateScriptTemplate.ID),
)
require.NoError(t, err)
assert.Equal(t, respA.CreateScriptTemplate.ID, respB.ScriptTemplate.ID)
assert.Equal(t, respA.CreateScriptTemplate.Script, respB.ScriptTemplate.Script)
})
t.Run("Get non-existent script template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp GetScriptTemplateResponse
badID := uuid.New().String()
err := c.Post(
QueryGetScriptTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", badID),
)
assert.Error(t, err)
})
t.Run("Update script template without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA CreateScriptTemplateResponse
err := c.Post(
MutationCreateScriptTemplate,
&respA,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "apple"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
templateID := respA.CreateScriptTemplate.ID
var respB UpdateScriptTemplateResponse
err = c.Post(
MutationUpdateScriptTemplateScript,
&respB,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
client.Var("script", "orange"),
)
assert.Error(t, err)
})
t.Run("Update script template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA CreateScriptTemplateResponse
err := c.Post(
MutationCreateScriptTemplate,
&respA,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "apple"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
templateID := respA.CreateScriptTemplate.ID
var respB UpdateScriptTemplateResponse
err = c.Post(
MutationUpdateScriptTemplateScript,
&respB,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
client.Var("script", "orange"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, respA.CreateScriptTemplate.ID, respB.UpdateScriptTemplate.ID)
assert.Equal(t, respA.CreateScriptTemplate.Index, respB.UpdateScriptTemplate.Index)
assert.Equal(t, "orange", respB.UpdateScriptTemplate.Script)
var respC UpdateScriptTemplateResponse
err = c.Post(
MutationUpdateScriptTemplateIndex,
&respC,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
client.Var("index", 1),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, respA.CreateScriptTemplate.ID, respC.UpdateScriptTemplate.ID)
assert.Equal(t, 1, respC.UpdateScriptTemplate.Index)
assert.Equal(t, respB.UpdateScriptTemplate.Script, respC.UpdateScriptTemplate.Script)
})
t.Run("Update non-existent script template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp UpdateScriptTemplateResponse
badID := uuid.New().String()
err := c.Post(
MutationUpdateScriptTemplateScript,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", badID),
client.Var("script", "bar"),
)
assert.Error(t, err)
})
t.Run("Get script templates for project", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
templateIDA := createScriptTemplate(t, c, project)
templateIDB := createScriptTemplate(t, c, project)
templateIDC := createScriptTemplate(t, c, project)
var resp GetProjectScriptTemplatesResponse
err := c.Post(
QueryGetProjectScriptTemplates,
&resp,
client.Var("projectId", project.ID),
)
require.NoError(t, err)
assert.Len(t, resp.Project.ScriptTemplates, 3)
assert.Equal(t, templateIDA, resp.Project.ScriptTemplates[0].ID)
assert.Equal(t, templateIDB, resp.Project.ScriptTemplates[1].ID)
assert.Equal(t, templateIDC, resp.Project.ScriptTemplates[2].ID)
assert.Equal(t, 0, resp.Project.ScriptTemplates[0].Index)
assert.Equal(t, 1, resp.Project.ScriptTemplates[1].Index)
assert.Equal(t, 2, resp.Project.ScriptTemplates[2].Index)
})
t.Run("Get script templates for non-existent project", func(t *testing.T) {
c := newClient()
var resp GetProjectScriptTemplatesResponse
badID := uuid.New().String()
err := c.Post(
QueryGetProjectScriptTemplates,
&resp,
client.Var("projectId", badID),
)
assert.Error(t, err)
})
t.Run("Delete script template without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
templateID := createScriptTemplate(t, c, project)
var resp DeleteScriptTemplateResponse
err := c.Post(
MutationDeleteScriptTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
)
assert.Error(t, err)
})
t.Run("Delete script template", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
templateID := createScriptTemplate(t, c, project)
var resp DeleteScriptTemplateResponse
err := c.Post(
MutationDeleteScriptTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("templateId", templateID),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, templateID, resp.DeleteScriptTemplate)
})
}
func TestAccounts(t *testing.T) {
t.Run("Get account", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
account := project.Accounts[0]
var resp GetAccountResponse
err := c.Post(
QueryGetAccount,
&resp,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
)
require.NoError(t, err)
assert.Equal(t, account.ID, resp.Account.ID)
})
t.Run("Get non-existent account", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp GetAccountResponse
badID := uuid.New().String()
err := c.Post(
QueryGetAccount,
&resp,
client.Var("projectId", project.ID),
client.Var("accountId", badID),
)
assert.Error(t, err)
})
t.Run("Update account draft code without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
account := project.Accounts[0]
var respA GetAccountResponse
err := c.Post(
QueryGetAccount,
&respA,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
)
require.NoError(t, err)
assert.Equal(t, "", respA.Account.DraftCode)
var respB UpdateAccountResponse
err = c.Post(
MutationUpdateAccountDraftCode,
&respB,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
client.Var("code", "bar"),
)
assert.Error(t, err)
})
t.Run("Update account draft code", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
account := project.Accounts[0]
var respA GetAccountResponse
err := c.Post(
QueryGetAccount,
&respA,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
)
require.NoError(t, err)
assert.Equal(t, "", respA.Account.DraftCode)
var respB UpdateAccountResponse
err = c.Post(
MutationUpdateAccountDraftCode,
&respB,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
client.Var("code", "bar"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, "bar", respB.UpdateAccount.DraftCode)
})
t.Run("Update account invalid deployed code", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
account := project.Accounts[0]
var respA GetAccountResponse
err := c.Post(
QueryGetAccount,
&respA,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
)
require.NoError(t, err)
assert.Equal(t, "", respA.Account.DeployedCode)
var respB UpdateAccountResponse
err = c.Post(
MutationUpdateAccountDeployedCode,
&respB,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
client.Var("code", "INVALID CADENCE"),
)
assert.Error(t, err)
assert.Equal(t, "", respB.UpdateAccount.DeployedCode)
})
t.Run("Update account deployed code without permission", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
account := project.Accounts[0]
var resp UpdateAccountResponse
const contract = "pub contract Foo {}"
err := c.Post(
MutationUpdateAccountDeployedCode,
&resp,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
client.Var("code", contract),
)
assert.Error(t, err)
})
t.Run("Update account deployed code", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
account := project.Accounts[0]
var respA GetAccountResponse
err := c.Post(
QueryGetAccount,
&respA,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
)
require.NoError(t, err)
assert.Equal(t, "", respA.Account.DeployedCode)
var respB UpdateAccountResponse
const contract = "pub contract Foo {}"
err = c.Post(
MutationUpdateAccountDeployedCode,
&respB,
client.Var("projectId", project.ID),
client.Var("accountId", account.ID),
client.Var("code", contract),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, contract, respB.UpdateAccount.DeployedCode)
})
t.Run("Update non-existent account", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp UpdateAccountResponse
badID := uuid.New().String()
err := c.Post(
MutationUpdateAccountDraftCode,
&resp,
client.Var("projectId", project.ID),
client.Var("accountId", badID),
client.Var("script", "bar"),
)
assert.Error(t, err)
})
}
const counterContract = `
pub contract Counting {
pub event CountIncremented(count: Int)
pub resource Counter {
pub var count: Int
init() {
self.count = 0
}
pub fun add(_ count: Int) {
self.count = self.count + count
emit CountIncremented(count: self.count)
}
}
pub fun createCounter(): @Counter {
return <-create Counter()
}
}
`
// generateAddTwoToCounterScript generates a script that increments a counter.
// If no counter exists, it is created.
func generateAddTwoToCounterScript(counterAddress string) string {
return fmt.Sprintf(
`
import 0x%s
transaction {
prepare(signer: AuthAccount) {
if signer.borrow<&Counting.Counter>(from: /storage/counter) == nil {
signer.save(<-Counting.createCounter(), to: /storage/counter)
}
signer.borrow<&Counting.Counter>(from: /storage/counter)!.add(2)
}
}
`,
counterAddress,
)
}
func TestContractInteraction(t *testing.T) {
c := newClient()
project := createProject(t, c)
accountA := project.Accounts[0]
accountB := project.Accounts[1]
var respA UpdateAccountResponse
err := c.Post(
MutationUpdateAccountDeployedCode,
&respA,
client.Var("projectId", project.ID),
client.Var("accountId", accountA.ID),
client.Var("code", counterContract),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, counterContract, respA.UpdateAccount.DeployedCode)
addScript := generateAddTwoToCounterScript(accountA.Address)
var respB CreateTransactionExecutionResponse
err = c.Post(
MutationCreateTransactionExecution,
&respB,
client.Var("projectId", project.ID),
client.Var("script", addScript),
client.Var("signers", []string{accountB.Address}),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Empty(t, respB.CreateTransactionExecution.Errors)
}
func TestAuthentication(t *testing.T) {
t.Run("Migrate legacy auth", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var respA UpdateProjectResponse
oldSessionCookie := c.SessionCookie()
// clear session cookie before making request
c.ClearSessionCookie()
err := c.Post(
MutationUpdateProjectPersist,
&respA,
client.Var("projectId", project.ID),
client.Var("title", project.Title),
client.Var("description", project.Description),
client.Var("readme", project.Readme),
client.Var("persist", true),
client.AddCookie(legacyauth.MockProjectSessionCookie(project.ID, project.Secret)),
)
require.NoError(t, err)
assert.Equal(t, project.ID, respA.UpdateProject.ID)
assert.Equal(t, project.Title, respA.UpdateProject.Title)
assert.Equal(t, project.Description, respA.UpdateProject.Description)
assert.Equal(t, project.Readme, respA.UpdateProject.Readme)
assert.True(t, respA.UpdateProject.Persist)
// a new session cookie should be set
require.NotNil(t, c.SessionCookie())
assert.NotEqual(t, oldSessionCookie.Value, c.SessionCookie().Value)
var respB UpdateProjectResponse
err = c.Post(
MutationUpdateProjectPersist,
&respB,
client.Var("projectId", project.ID),
client.Var("title", project.Title),
client.Var("description", project.Description),
client.Var("readme", project.Readme),
client.Var("persist", false),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
// should be able to perform update using new session cookie
assert.Equal(t, project.ID, respB.UpdateProject.ID)
assert.Equal(t, project.Title, respB.UpdateProject.Title)
assert.Equal(t, project.Description, respB.UpdateProject.Description)
assert.Equal(t, project.Readme, respB.UpdateProject.Readme)
assert.False(t, respB.UpdateProject.Persist)
})
t.Run("Create project with malformed session cookie", func(t *testing.T) {
c := newClient()
var respA CreateProjectResponse
malformedCookie := http.Cookie{
Name: sessionName,
Value: "foo",
}
err := c.Post(
MutationCreateProject,
&respA,
client.Var("title", "foo"),
client.Var("description", "desc"),
client.Var("readme", "rtfm"),
client.Var("seed", 42),
client.AddCookie(&malformedCookie),
)
require.NoError(t, err)
projectID := respA.CreateProject.ID
projectTitle := respA.CreateProject.Title
projectDescription := respA.CreateProject.Description
projectReadme := respA.CreateProject.Readme
assert.NotEmpty(t, projectID)
assert.Equal(t, 42, respA.CreateProject.Seed)
// session cookie should be overwritten with new value
assert.NotNil(t, c.SessionCookie())
var respB UpdateProjectResponse
err = c.Post(
MutationUpdateProjectPersist,
&respB,
client.Var("projectId", projectID),
client.Var("title", projectTitle),
client.Var("description", projectDescription),
client.Var("readme", projectReadme),
client.Var("persist", true),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
// should be able to perform update using new session cookie
assert.Equal(t, projectID, respB.UpdateProject.ID)
assert.Equal(t, projectTitle, respB.UpdateProject.Title)
assert.Equal(t, projectDescription, respB.UpdateProject.Description)
assert.Equal(t, projectReadme, respB.UpdateProject.Readme)
assert.True(t, respB.UpdateProject.Persist)
})
t.Run("Update project with malformed session cookie", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp UpdateProjectResponse
malformedCookie := http.Cookie{
Name: sessionName,
Value: "foo",
}
c.ClearSessionCookie()
err := c.Post(
MutationUpdateProjectPersist,
&resp,
client.Var("projectId", project.ID),
client.Var("persist", true),
client.AddCookie(&malformedCookie),
)
assert.Error(t, err)
// session cookie should not be set
assert.Nil(t, c.SessionCookie())
})
t.Run("Update project with invalid session cookie", func(t *testing.T) {
c := newClient()
projectA := createProject(t, c)
_ = createProject(t, c)
cookieB := c.SessionCookie()
var resp UpdateProjectResponse
err := c.Post(
MutationUpdateProjectPersist,
&resp,
client.Var("projectId", projectA.ID),
client.Var("persist", true),
client.AddCookie(cookieB),
)
// should not be able to update project A with cookie B
assert.Error(t, err)
})
}
func TestScriptExecutions(t *testing.T) {
t.Run("valid, no return value", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptExecutionResponse
const script = "pub fun main() { }"
err := c.Post(
MutationCreateScriptExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
require.Empty(t, resp.CreateScriptExecution.Errors)
})
t.Run("invalid (parse error)", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptExecutionResponse
const script = "pub fun main() {"
err := c.Post(
MutationCreateScriptExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, script, resp.CreateScriptExecution.Script)
require.Equal(t,
[]model.ProgramError{
{
Message: "expected token '}'",
StartPosition: &model.ProgramPosition{
Offset: 16,
Line: 1,
Column: 16,
},
EndPosition: &model.ProgramPosition{
Offset: 16,
Line: 1,
Column: 16,
},
},
},
resp.CreateScriptExecution.Errors,
)
})
t.Run("invalid (semantic error)", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptExecutionResponse
const script = "pub fun main() { XYZ }"
err := c.Post(
MutationCreateScriptExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, script, resp.CreateScriptExecution.Script)
require.Equal(t,
[]model.ProgramError{
{
Message: "cannot find variable in this scope: `XYZ`",
StartPosition: &model.ProgramPosition{
Offset: 17,
Line: 1,
Column: 17,
},
EndPosition: &model.ProgramPosition{
Offset: 19,
Line: 1,
Column: 19,
},
},
},
resp.CreateScriptExecution.Errors,
)
})
t.Run("invalid (run-time error)", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptExecutionResponse
const script = "pub fun main() { panic(\"oh no\") }"
err := c.Post(
MutationCreateScriptExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, script, resp.CreateScriptExecution.Script)
require.Equal(t,
[]model.ProgramError{
{
Message: "panic: oh no",
StartPosition: &model.ProgramPosition{
Offset: 17,
Line: 1,
Column: 17,
},
EndPosition: &model.ProgramPosition{
Offset: 30,
Line: 1,
Column: 30,
},
},
},
resp.CreateScriptExecution.Errors,
)
})
t.Run("exceeding computation limit", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptExecutionResponse
const script = `
pub fun main() {
var i = 0
while i < 1_000_000 {
i = i + 1
}
}
`
err := c.Post(
MutationCreateScriptExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, script, resp.CreateScriptExecution.Script)
require.Equal(t,
[]model.ProgramError{
{
Message: "computation limited exceeded: 100000",
StartPosition: &model.ProgramPosition{
Offset: 106,
Line: 5,
Column: 18,
},
EndPosition: &model.ProgramPosition{
Offset: 114,
Line: 5,
Column: 26,
},
},
},
resp.CreateScriptExecution.Errors,
)
})
t.Run("return address", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptExecutionResponse
const script = "pub fun main(): Address { return 0x1 as Address }"
err := c.Post(
MutationCreateScriptExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, script, resp.CreateScriptExecution.Script)
require.Empty(t, resp.CreateScriptExecution.Errors)
assert.JSONEq(t,
`{"type":"Address","value":"0x0000000000000001"}`,
resp.CreateScriptExecution.Value,
)
})
t.Run("argument", func(t *testing.T) {
c := newClient()
project := createProject(t, c)
var resp CreateScriptExecutionResponse
const script = "pub fun main(a: Int): Int { return a + 1 }"
err := c.Post(
MutationCreateScriptExecution,
&resp,
client.Var("projectId", project.ID),
client.Var("script", script),
client.Var("arguments", []string{
`{"type":"Int","value":"2"}`,
}),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
assert.Equal(t, script, resp.CreateScriptExecution.Script)
require.Empty(t, resp.CreateScriptExecution.Errors)
assert.JSONEq(t,
`{"type":"Int","value":"3"}`,
resp.CreateScriptExecution.Value,
)
})
}
type Client struct {
client *client.Client
resolver *playground.Resolver
sessionCookie *http.Cookie
}
func (c *Client) Post(query string, response interface{}, options ...client.Option) error {
w := httptest.NewRecorder()
err := c.client.Post(w, query, response, options...)
for _, cookie := range w.Result().Cookies() {
if cookie.Name == sessionName {
c.sessionCookie = cookie
}
}
return err
}
func (c *Client) MustPost(query string, response interface{}, options ...client.Option) {
err := c.Post(query, response, options...)
if err != nil {
panic(err)
}
}
func (c *Client) SessionCookie() *http.Cookie {
return c.sessionCookie
}
func (c *Client) ClearSessionCookie() {
c.sessionCookie = nil
}
const sessionName = "flow-playground-test"
var version, _ = semver.NewVersion("0.1.0")
func newClient() *Client {
var store storage.Store
// TODO: Should eventually start up the emulator and run all tests with datastore backend
if strings.EqualFold(os.Getenv("FLOW_STORAGEBACKEND"), "datastore") {
var err error
store, err = datastore.NewDatastore(context.Background(), &datastore.Config{
DatastoreProjectID: "dl-flow",
DatastoreTimeout: time.Second * 5,
})
if err != nil {
// If datastore is expected, panic when we can't init
panic(err)
}
} else {
store = memory.NewStore()
}
computer, _ := compute.NewComputer(zerolog.Nop(), 128)
authenticator := auth.NewAuthenticator(store, sessionName)
resolver := playground.NewResolver(version, store, computer, authenticator)
return newClientWithResolver(resolver)
}
func newClientWithResolver(resolver *playground.Resolver) *Client {
router := chi.NewRouter()
router.Use(httpcontext.Middleware())
router.Use(legacyauth.MockProjectSessions())
router.Handle("/", playground.GraphQLHandler(resolver))
return &Client{
client: client.New(router),
resolver: resolver,
}
}
func createProject(t *testing.T, c *Client) Project {
var resp CreateProjectResponse
err := c.Post(
MutationCreateProject,
&resp,
client.Var("title", "foo"),
client.Var("seed", 42),
client.Var("description", "desc"),
client.Var("readme", "rtfm"),
client.Var("accounts", []string{}),
client.Var("transactionTemplates", []string{}),
)
require.NoError(t, err)
proj := resp.CreateProject
internalProj := c.resolver.LastCreatedProject()
proj.Secret = internalProj.Secret.String()
return proj
}
func createTransactionTemplate(t *testing.T, c *Client, project Project) TransactionTemplate {
var resp CreateTransactionTemplateResponse
err := c.Post(
MutationCreateTransactionTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
return resp.CreateTransactionTemplate
}
func createScriptTemplate(t *testing.T, c *Client, project Project) string {
var resp CreateScriptTemplateResponse
err := c.Post(
MutationCreateScriptTemplate,
&resp,
client.Var("projectId", project.ID),
client.Var("title", "foo"),
client.Var("script", "bar"),
client.AddCookie(c.SessionCookie()),
)
require.NoError(t, err)
return resp.CreateScriptTemplate.ID
}
|
[
"\"FLOW_STORAGEBACKEND\""
] |
[] |
[
"FLOW_STORAGEBACKEND"
] |
[]
|
["FLOW_STORAGEBACKEND"]
|
go
| 1 | 0 | |
cmd/cosmosdb.go
|
package cmd
import (
"os"
"gopkg.in/mgo.v2"
"net/url"
"fmt"
"strings"
"time"
"net"
"crypto/tls"
"log"
)
var mongoURL = os.Getenv("MONGOURL")
// MongoDB database and collection names
var mongoDatabaseName = ""
var mongoDBSessionCopy *mgo.Session
var mongoDBSession *mgo.Session
var mongoDBCollection *mgo.Collection
var mongoDBSessionError error
func WriteActivitiesToCosmosDB (activities Activities) {
ConnectToMongo()
log.Println("WriteActivitiesToCosmosDB")
mongoDBSessionCopy = mongoDBSession.Copy()
defer mongoDBSessionCopy.Close()
// Get collection
mongoDBCollection = mongoDBSessionCopy.DB(mongoDatabaseName).C("me") // change your collection name
defer mongoDBSessionCopy.Close()
err = mongoDBCollection.Insert(activities)
log.Println("Inserted", activities)
if err != nil {
log.Fatal("Problem inserting activities for collection: ", err)
}
}
func WriteSleepToCosmosDB (sleep SleepSummary) {
ConnectToMongo()
log.Println("WriteSleepToCosmosDB")
mongoDBSessionCopy = mongoDBSession.Copy()
defer mongoDBSessionCopy.Close()
// Get collection
mongoDBCollection = mongoDBSessionCopy.DB(mongoDatabaseName).C("me")
defer mongoDBSessionCopy.Close()
err = mongoDBCollection.Insert(sleep)
log.Println("Inserted", sleep)
if err != nil {
log.Fatal("Problem inserting sleep for collection: ", err)
}
}
func WriteHeartrateToCosmosDB (heartrate HeartRate) {
ConnectToMongo()
log.Println("WriteHeartrateToCosmosDB")
mongoDBSessionCopy = mongoDBSession.Copy()
defer mongoDBSessionCopy.Close()
// Get collection
mongoDBCollection = mongoDBSessionCopy.DB(mongoDatabaseName).C("me")
defer mongoDBSessionCopy.Close()
err = mongoDBCollection.Insert(heartrate)
log.Println("Inserted", heartrate)
if err != nil {
log.Fatal("Problem inserting heartrate for collection: ", err)
}
}
func ConnectToMongo() {
if len(os.Getenv("MONGOURL")) == 0 {
log.Print("The environment variable MONGOURL has not been set")
} else {
log.Print("The environment variable MONGOURL is " + os.Getenv("MONGOURL"))
}
url, err := url.Parse(mongoURL)
if err != nil {
log.Fatal(fmt.Sprintf("Problem parsing Mongo URL %s: ", url), err)
}
// Parse the connection string to extract components because the MongoDB driver is peculiar
var dialInfo *mgo.DialInfo
mongoUsername := ""
mongoPassword := ""
if url.User != nil {
mongoUsername = url.User.Username()
mongoPassword, _ = url.User.Password()
st := fmt.Sprintf("%s", url.User)
co := strings.Index(st, ":")
mongoDatabaseName = st[:co]
}
mongoHost := url.Host
mongoDatabase := mongoDatabaseName
//mongoDatabase := mongoDatabaseName // can be anything
mongoSSL := strings.Contains(url.RawQuery, "ssl=true")
log.Printf("\tUsername: %s", mongoUsername)
log.Printf("\tPassword: %s", mongoPassword)
log.Printf("\tHost: %s", mongoHost)
log.Printf("\tDatabase: %s", mongoDatabase)
log.Printf("\tSSL: %t", mongoSSL)
if mongoSSL {
dialInfo = &mgo.DialInfo{
Addrs: []string{mongoHost},
Timeout: 60 * time.Second,
Database: mongoDatabase, // It can be anything
Username: mongoUsername, // Username
Password: mongoPassword, // Password
DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) {
return tls.Dial("tcp", addr.String(), &tls.Config{})
},
}
} else {
dialInfo = &mgo.DialInfo{
Addrs: []string{mongoHost},
Timeout: 60 * time.Second,
Database: mongoDatabase, // It can be anything
Username: mongoUsername, // Username
Password: mongoPassword, // Password
}
}
success := false
mongoDBSession, mongoDBSessionError = mgo.DialWithInfo(dialInfo)
if mongoDBSessionError != nil {
log.Fatal(fmt.Sprintf("Can't connect to mongo at [%s], go error: ", mongoURL), mongoDBSessionError)
} else {
success = true
}
if !success {
os.Exit(1)
}
// SetSafe changes the session safety mode.
// If the safe parameter is nil, the session is put in unsafe mode, and writes become fire-and-forget,
// without error checking. The unsafe mode is faster since operations won't hold on waiting for a confirmation.
// http://godoc.org/labix.org/v2/mgo#Session.SetMode.
mongoDBSession.SetSafe(nil)
}
|
[
"\"MONGOURL\"",
"\"MONGOURL\"",
"\"MONGOURL\""
] |
[] |
[
"MONGOURL"
] |
[]
|
["MONGOURL"]
|
go
| 1 | 0 | |
office/v1/examples/message_store/read_all/main.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"time"
"github.com/antihax/optional"
"github.com/grokify/goauth"
"github.com/grokify/goauth/credentials"
"github.com/grokify/simplego/config"
"github.com/grokify/simplego/fmt/fmtutil"
rc "github.com/grokify/go-ringcentral-client/office/v1/client"
ru "github.com/grokify/go-ringcentral-client/office/v1/util"
)
func getClient() (*rc.APIClient, error) {
err := config.LoadDotEnvSkipEmpty(os.Getenv("ENV_PATH"), "./.env")
if err != nil {
return nil, err
}
return ru.NewApiClientPassword(
credentials.OAuth2Credentials{
ServerURL: os.Getenv("RINGCENTRAL_SERVER_URL"),
ClientID: os.Getenv("RINGCENTRAL_CLIENT_ID"),
ClientSecret: os.Getenv("RINGCENTRAL_CLIENT_SECRET"),
GrantType: goauth.GrantTypePassword,
Username: os.Getenv("RINGCENTRAL_USERNAME"),
Password: os.Getenv("RINGCENTRAL_PASSWORD")})
}
func main() {
apiClient, err := getClient()
if err != nil {
panic(err)
}
dt, err := time.Parse(time.RFC3339, "2016-01-01T00:00:00Z")
if err != nil {
log.Fatal(err)
}
opts := &rc.ListMessagesOpts{
DateFrom: optional.NewTime(dt)}
info, resp, err := apiClient.MessagesApi.ListMessages(
context.Background(), "~", "~", opts)
if err != nil {
panic(err)
} else if resp.StatusCode >= 300 {
panic(fmt.Errorf("API Status %v", resp.StatusCode))
}
fmtutil.PrintJSON(info)
fmt.Println("DONE")
}
|
[
"\"ENV_PATH\"",
"\"RINGCENTRAL_SERVER_URL\"",
"\"RINGCENTRAL_CLIENT_ID\"",
"\"RINGCENTRAL_CLIENT_SECRET\"",
"\"RINGCENTRAL_USERNAME\"",
"\"RINGCENTRAL_PASSWORD\""
] |
[] |
[
"RINGCENTRAL_CLIENT_SECRET",
"RINGCENTRAL_USERNAME",
"RINGCENTRAL_PASSWORD",
"ENV_PATH",
"RINGCENTRAL_CLIENT_ID",
"RINGCENTRAL_SERVER_URL"
] |
[]
|
["RINGCENTRAL_CLIENT_SECRET", "RINGCENTRAL_USERNAME", "RINGCENTRAL_PASSWORD", "ENV_PATH", "RINGCENTRAL_CLIENT_ID", "RINGCENTRAL_SERVER_URL"]
|
go
| 6 | 0 | |
leads/migrations/0007_auto_20210928_1800.py
|
# Generated by Django 3.2.7 on 2021-09-28 18:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('leads', '0006_category'),
]
operations = [
migrations.AddField(
model_name='category',
name='organisation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='leads.userprofile'),
),
migrations.AddField(
model_name='leadmodel',
name='Category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='leads.category'),
),
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
just_do_it/settings.py
|
"""
Django settings for just_do_it project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import django_heroku
import environ
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
environ.Env.read_env()
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todos.apps.TodosConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'just_do_it.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'just_do_it.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
# read os.environ['DATABASE_URL'] and raises ImproperlyConfigured exception if not found
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Activate Django-Heroku.
django_heroku.settings(locals())
# To deploy your branch on Heroku you need to run the following command:
# git push heroku branch-name:master
# Don't forget to run `heroku pg:promote HEROKU_POSTGRESQL_SILVER_URL`
# so HEROKU_POSTGRESQL_SILVER_URL aliases DATABASE_URL
|
[] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"golang.org/x/sys/windows/registry"
"gvm/web"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
)
const (
GvmVersion = "1.0.0"
)
//callback function for looping over files. If true, breaks the loop.
type callback func(file os.FileInfo, reg *regexp.Regexp, proot string) bool
func main() {
args := os.Args
osArch := strings.ToLower(os.Getenv("PROCESSOR_ARCHITECTURE"))
detail := ""
if len(args) < 2 {
help()
return
}
if len(args) > 2 {
detail = args[2]
}
if len(args) > 3 {
fmt.Println("Too many args: gvm expects 2 maximum.")
}
// Run the appropriate method
switch args[1] {
case "arch":
fmt.Println("System Architecture: " + osArch)
case "install":
success := install(detail, osArch)
if success {
fmt.Println("Successfully installed Go version " + detail + ".")
fmt.Println("To use this version, run gvm use " + detail + ". This will also set your GOROOT.")
}
case "goroot":
goroot(detail)
case "list":
listGos()
case "ls":
listGos()
case "uninstall":
uninstall(detail)
case "use":
useGo(detail)
case "version":
fmt.Println(GvmVersion)
case "v":
fmt.Println(GvmVersion)
default:
help()
}
}
func install(version string, arch string) bool {
fmt.Println("")
if os.Getenv("GOROOT") == "" {
fmt.Println("No GOROOT set. Set a GOROOT for Go installations with gvm goroot <path>.")
return false
}
if version == "" {
fmt.Println("Version not specified.")
return false
}
gorootroot := filepath.Clean(os.Getenv("GOROOT") + "\\..")
return web.Download(version, "windows-"+arch, gorootroot)
}
func goroot(path string) {
fmt.Println("")
if path == "" {
if os.Getenv("GOROOT") == "" {
fmt.Println("No GOROOT set.")
} else {
fmt.Println("GOROOT: ", os.Getenv("GOROOT"))
fmt.Println("Other Go versions installed at: ", filepath.Clean(os.Getenv("GOROOT")+"\\.."))
}
return
}
newpath := filepath.FromSlash(path)
//permanently set env var for user and local machine
//The path should be the same for all windows OSes.
machineEnvPath := "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment"
userEnvPath := "Environment"
setEnvVar("GOROOT", newpath, machineEnvPath, true)
setEnvVar("GOROOT", newpath, userEnvPath, false)
//Also update path for user and local machine
updatePathVar("PATH", filepath.FromSlash(os.Getenv("GOROOT")), newpath, machineEnvPath, true)
updatePathVar("PATH", filepath.FromSlash(os.Getenv("GOROOT")), newpath, userEnvPath, false)
fmt.Println("Set the GOROOT to " + newpath + ". Also updated PATH.")
fmt.Println("Note: You'll have to start another prompt to see the changes.")
}
func listGos() {
if os.Getenv("GOROOT") == "" {
fmt.Println("No GOROOT set. Set a GOROOT for go installations with gvm goroot <path>.")
return
}
//store all Go versions so we don't list duplicates
goVers := make(map[string]bool)
callb := func(f os.FileInfo, validDir *regexp.Regexp, gorootroot string) bool {
if f.IsDir() && validDir.MatchString(f.Name()) {
goDir := filepath.Join(gorootroot, f.Name())
version := getDirVersion(goDir)
//check if the version already exists (different named dirs with the same go version can exist)
_, exists := goVers[version]
if exists {
return false
}
str := ""
if goDir == os.Getenv("GOROOT") {
str = str + " * " + version[2:] + " (Using with GOROOT " + goDir + ")"
} else {
str = str + " " + version[2:]
}
goVers[version] = true
fmt.Println(str)
}
return false
}
loopFiles(callb)
}
func uninstall(unVer string) {
if unVer == "" {
fmt.Println("A version to uninstall must be specified.")
return
}
callb := func(f os.FileInfo, validDir *regexp.Regexp, gorootroot string) bool {
if f.IsDir() && validDir.MatchString(f.Name()) {
goDir := filepath.Join(gorootroot, f.Name())
version := getDirVersion(goDir)
if version == "go"+unVer {
os.RemoveAll(goDir)
fmt.Println("Uninstalled Go version " + version[2:] + ".")
fmt.Println("Note: If this was your GOROOT, make sure to set a new GOROOT with gvm goroot <path>")
return true
}
}
return false
}
found := loopFiles(callb)
if !found {
fmt.Println("Couldn't uninstall Go version " + unVer + ". Check Go versions with gvm list.")
}
}
func useGo(newVer string) {
if os.Getenv("GOROOT") == "" {
fmt.Println("No GOROOT set. Set a GOROOT for go installations with gvm goroot <path>.")
return
}
if newVer == "" {
fmt.Println("A new version must be specified.")
return
}
callb := func(f os.FileInfo, validDir *regexp.Regexp, gorootroot string) bool {
if f.IsDir() && validDir.MatchString(f.Name()) {
goDir := filepath.Join(gorootroot, f.Name())
version := getDirVersion(goDir)
if version == "go"+newVer {
//permanently set env var for user and local machine
//The path should be the same for all windows OSes.
machineEnvPath := "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment"
userEnvPath := "Environment"
setEnvVar("GOROOT", filepath.FromSlash(goDir), machineEnvPath, true)
setEnvVar("GOROOT", filepath.FromSlash(goDir), userEnvPath, false)
//Also update path for user and local machine
updatePathVar("PATH", filepath.FromSlash(os.Getenv("GOROOT")), goDir, machineEnvPath, true)
updatePathVar("PATH", filepath.FromSlash(os.Getenv("GOROOT")), goDir, userEnvPath, false)
fmt.Println("Now using Go version " + version[2:] + ". Set GOROOT to " + goDir + ". Also updated PATH.")
fmt.Println("Note: You'll have to start another prompt to see the changes.")
return true
}
}
return false
}
found := loopFiles(callb)
if !found {
fmt.Println("Couldn't use Go version " + newVer + ". Check Go versions with gvm list.")
}
}
func loopFiles(fn callback) bool {
validDir := regexp.MustCompile(`go(\d\.\d\.\d){0,1}`)
gorootroot := filepath.Clean(os.Getenv("GOROOT") + "\\..")
files, _ := ioutil.ReadDir(gorootroot)
fmt.Println("")
for _, f := range files {
shouldBreak := fn(f, validDir, gorootroot)
if shouldBreak {
return true
}
}
return false
}
func setEnvVar(envVar string, newVal string, envPath string, machine bool) {
//this sets the environment variable (GOROOT in this case) for either LOCAL_MACHINE or CURRENT_USER.
//They are set in the registry. both must be set since the GOROOT could be used from either location.
regplace := registry.CURRENT_USER
if machine {
regplace = registry.LOCAL_MACHINE
}
key, err := registry.OpenKey(regplace, envPath, registry.ALL_ACCESS)
if err != nil {
fmt.Println("error", err)
return
}
defer key.Close()
err = key.SetStringValue(envVar, newVal)
if err != nil {
fmt.Println("error", err)
}
}
func updatePathVar(envVar string, oldVal string, newVal string, envPath string, machine bool) {
//this sets the environment variable for either LOCAL_MACHINE or CURRENT_USER.
//They are set in the registry. both must be set since the GOROOT could be used from either location.
regplace := registry.CURRENT_USER
if machine {
regplace = registry.LOCAL_MACHINE
}
key, err := registry.OpenKey(regplace, envPath, registry.ALL_ACCESS)
if err != nil {
fmt.Println("error", err)
return
}
defer key.Close()
val, _, kerr := key.GetStringValue(envVar)
if kerr != nil {
fmt.Println("error", err)
return
}
pvars := strings.Split(val, ";")
for i, pvar := range pvars {
if pvar == newVal+"\\bin" {
//the requested new value already exists in PATH, do nothing
return
}
if pvar == oldVal+"\\bin" {
pvars = append(pvars[:i], pvars[i+1:]...)
}
}
val = strings.Join(pvars, ";")
val = val + ";" + newVal + "\\bin"
err = key.SetStringValue("PATH", val)
if err != nil {
fmt.Println("error", err)
}
}
func getDirVersion(dir string) string {
files, _ := ioutil.ReadDir(dir)
for _, f := range files {
if f.Name() == "VERSION" {
dat, err := ioutil.ReadFile(filepath.Join(dir, f.Name()))
if err != nil {
return "Error reading file."
}
return string(dat)
}
}
return ""
}
func help() {
fmt.Println("\nRunning version " + GvmVersion + ".")
fmt.Println("\nUsage:")
fmt.Println(" ")
fmt.Println(" gvm arch : Show architecture of OS.")
fmt.Println(" gvm install <version> : The version must be a version of Go.")
fmt.Println(" gvm goroot [path] : Sets/appends GOROOT/PATH. Without the extra arg just shows current GOROOT.")
fmt.Println(" gvm list : List the Go installations at or adjacent to GOROOT. Aliased as ls.")
fmt.Println(" gvm uninstall <version> : Uninstall specified version of Go. If it was your GOROOT/PATH, make sure to set a new one after.")
fmt.Println(" gvm use <version> : Switch to use the specified version. This will set your GOROOT and PATH.")
fmt.Println(" gvm version : Displays the current running version of gvm for Windows. Aliased as v.")
}
|
[
"\"PROCESSOR_ARCHITECTURE\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\""
] |
[] |
[
"GOROOT",
"PROCESSOR_ARCHITECTURE"
] |
[]
|
["GOROOT", "PROCESSOR_ARCHITECTURE"]
|
go
| 2 | 0 | |
api4/apitestlib.go
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package api4
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
s3 "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/stretchr/testify/require"
"github.com/mattermost/mattermost-server/v5/app"
"github.com/mattermost/mattermost-server/v5/config"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock"
"github.com/mattermost/mattermost-server/v5/services/searchengine"
"github.com/mattermost/mattermost-server/v5/shared/mlog"
"github.com/mattermost/mattermost-server/v5/store"
"github.com/mattermost/mattermost-server/v5/store/localcachelayer"
"github.com/mattermost/mattermost-server/v5/store/storetest/mocks"
"github.com/mattermost/mattermost-server/v5/testlib"
"github.com/mattermost/mattermost-server/v5/utils"
"github.com/mattermost/mattermost-server/v5/web"
"github.com/mattermost/mattermost-server/v5/wsapi"
)
type TestHelper struct {
App *app.App
Server *app.Server
ConfigStore *config.Store
Client *model.Client4
BasicUser *model.User
BasicUser2 *model.User
TeamAdminUser *model.User
BasicTeam *model.Team
BasicChannel *model.Channel
BasicPrivateChannel *model.Channel
BasicPrivateChannel2 *model.Channel
BasicDeletedChannel *model.Channel
BasicChannel2 *model.Channel
BasicPost *model.Post
Group *model.Group
SystemAdminClient *model.Client4
SystemAdminUser *model.User
tempWorkspace string
SystemManagerClient *model.Client4
SystemManagerUser *model.User
LocalClient *model.Client4
IncludeCacheLayer bool
}
var mainHelper *testlib.MainHelper
func SetMainHelper(mh *testlib.MainHelper) {
mainHelper = mh
}
func setupTestHelper(dbStore store.Store, searchEngine *searchengine.Broker, enterprise bool, includeCache bool, updateConfig func(*model.Config)) *TestHelper {
tempWorkspace, err := ioutil.TempDir("", "apptest")
if err != nil {
panic(err)
}
memoryStore, err := config.NewMemoryStoreWithOptions(&config.MemoryStoreOptions{IgnoreEnvironmentOverrides: true})
if err != nil {
panic("failed to initialize memory store: " + err.Error())
}
memoryConfig := &model.Config{}
memoryConfig.SetDefaults()
*memoryConfig.PluginSettings.Directory = filepath.Join(tempWorkspace, "plugins")
*memoryConfig.PluginSettings.ClientDirectory = filepath.Join(tempWorkspace, "webapp")
memoryConfig.ServiceSettings.EnableLocalMode = model.NewBool(true)
*memoryConfig.ServiceSettings.LocalModeSocketLocation = filepath.Join(tempWorkspace, "mattermost_local.sock")
*memoryConfig.AnnouncementSettings.AdminNoticesEnabled = false
*memoryConfig.AnnouncementSettings.UserNoticesEnabled = false
*memoryConfig.PluginSettings.AutomaticPrepackagedPlugins = false
if updateConfig != nil {
updateConfig(memoryConfig)
}
memoryStore.Set(memoryConfig)
configStore, err := config.NewStoreFromBacking(memoryStore, nil, false)
if err != nil {
panic(err)
}
var options []app.Option
options = append(options, app.ConfigStore(configStore))
if includeCache {
// Adds the cache layer to the test store
options = append(options, app.StoreOverride(func(s *app.Server) store.Store {
lcl, err2 := localcachelayer.NewLocalCacheLayer(dbStore, s.Metrics, s.Cluster, s.CacheProvider)
if err2 != nil {
panic(err2)
}
return lcl
}))
} else {
options = append(options, app.StoreOverride(dbStore))
}
s, err := app.NewServer(options...)
if err != nil {
panic(err)
}
th := &TestHelper{
App: app.New(app.ServerConnector(s)),
Server: s,
ConfigStore: configStore,
IncludeCacheLayer: includeCache,
}
if searchEngine != nil {
th.App.SetSearchEngine(searchEngine)
}
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.TeamSettings.MaxUsersPerTeam = 50
*cfg.RateLimitSettings.Enable = false
*cfg.EmailSettings.SendEmailNotifications = true
*cfg.ServiceSettings.SiteURL = ""
// Disable sniffing, otherwise elastic client fails to connect to docker node
// More details: https://github.com/olivere/elastic/wiki/Sniffing
*cfg.ElasticsearchSettings.Sniff = false
*cfg.TeamSettings.EnableOpenServer = true
// Disable strict password requirements for test
*cfg.PasswordSettings.MinimumLength = 5
*cfg.PasswordSettings.Lowercase = false
*cfg.PasswordSettings.Uppercase = false
*cfg.PasswordSettings.Symbol = false
*cfg.PasswordSettings.Number = false
*cfg.ServiceSettings.ListenAddress = ":0"
})
if err := th.Server.Start(); err != nil {
panic(err)
}
Init(th.Server, th.Server.AppOptions, th.App.Srv().Router)
InitLocal(th.Server, th.Server.AppOptions, th.App.Srv().LocalRouter)
web.New(th.Server, th.Server.AppOptions, th.App.Srv().Router)
wsapi.Init(th.App.Srv())
if enterprise {
th.App.Srv().SetLicense(model.NewTestLicense())
} else {
th.App.Srv().SetLicense(nil)
}
th.Client = th.CreateClient()
th.SystemAdminClient = th.CreateClient()
th.SystemManagerClient = th.CreateClient()
// Verify handling of the supported true/false values by randomizing on each run.
rand.Seed(time.Now().UTC().UnixNano())
trueValues := []string{"1", "t", "T", "TRUE", "true", "True"}
falseValues := []string{"0", "f", "F", "FALSE", "false", "False"}
trueString := trueValues[rand.Intn(len(trueValues))]
falseString := falseValues[rand.Intn(len(falseValues))]
mlog.Debug("Configured Client4 bool string values", mlog.String("true", trueString), mlog.String("false", falseString))
th.Client.SetBoolString(true, trueString)
th.Client.SetBoolString(false, falseString)
th.LocalClient = th.CreateLocalClient(*memoryConfig.ServiceSettings.LocalModeSocketLocation)
if th.tempWorkspace == "" {
th.tempWorkspace = tempWorkspace
}
th.App.InitServer()
return th
}
func SetupEnterprise(tb testing.TB) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
mainHelper.PreloadMigrations()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, true, true, nil)
th.InitLogin()
return th
}
func Setup(tb testing.TB) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
mainHelper.PreloadMigrations()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, nil)
th.InitLogin()
return th
}
func SetupConfig(tb testing.TB, updateConfig func(cfg *model.Config)) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, updateConfig)
th.InitLogin()
return th
}
func SetupConfigWithStoreMock(tb testing.TB, updateConfig func(cfg *model.Config)) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, false, false, updateConfig)
statusMock := mocks.StatusStore{}
statusMock.On("UpdateExpiredDNDStatuses").Return([]*model.Status{}, nil)
statusMock.On("Get", "user1").Return(&model.Status{UserId: "user1", Status: model.STATUS_ONLINE}, nil)
statusMock.On("UpdateLastActivityAt", "user1", mock.Anything).Return(nil)
statusMock.On("SaveOrUpdate", mock.AnythingOfType("*model.Status")).Return(nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
emptyMockStore.On("Status").Return(&statusMock)
th.App.Srv().Store = &emptyMockStore
return th
}
func SetupWithStoreMock(tb testing.TB) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, false, false, nil)
statusMock := mocks.StatusStore{}
statusMock.On("UpdateExpiredDNDStatuses").Return([]*model.Status{}, nil)
statusMock.On("Get", "user1").Return(&model.Status{UserId: "user1", Status: model.STATUS_ONLINE}, nil)
statusMock.On("UpdateLastActivityAt", "user1", mock.Anything).Return(nil)
statusMock.On("SaveOrUpdate", mock.AnythingOfType("*model.Status")).Return(nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
emptyMockStore.On("Status").Return(&statusMock)
th.App.Srv().Store = &emptyMockStore
return th
}
func SetupEnterpriseWithStoreMock(tb testing.TB) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, true, false, nil)
statusMock := mocks.StatusStore{}
statusMock.On("UpdateExpiredDNDStatuses").Return([]*model.Status{}, nil)
statusMock.On("Get", "user1").Return(&model.Status{UserId: "user1", Status: model.STATUS_ONLINE}, nil)
statusMock.On("UpdateLastActivityAt", "user1", mock.Anything).Return(nil)
statusMock.On("SaveOrUpdate", mock.AnythingOfType("*model.Status")).Return(nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
emptyMockStore.On("Status").Return(&statusMock)
th.App.Srv().Store = &emptyMockStore
return th
}
func (th *TestHelper) ShutdownApp() {
done := make(chan bool)
go func() {
th.Server.Shutdown()
close(done)
}()
select {
case <-done:
case <-time.After(30 * time.Second):
// panic instead of fatal to terminate all tests in this package, otherwise the
// still running App could spuriously fail subsequent tests.
panic("failed to shutdown App within 30 seconds")
}
}
func (th *TestHelper) TearDown() {
utils.DisableDebugLogForTest()
if th.IncludeCacheLayer {
// Clean all the caches
th.App.Srv().InvalidateAllCaches()
}
th.ShutdownApp()
utils.EnableDebugLogForTest()
}
var initBasicOnce sync.Once
var userCache struct {
SystemAdminUser *model.User
SystemManagerUser *model.User
TeamAdminUser *model.User
BasicUser *model.User
BasicUser2 *model.User
}
func (th *TestHelper) InitLogin() *TestHelper {
th.waitForConnectivity()
// create users once and cache them because password hashing is slow
initBasicOnce.Do(func() {
th.SystemAdminUser = th.CreateUser()
th.App.UpdateUserRoles(th.SystemAdminUser.Id, model.SYSTEM_USER_ROLE_ID+" "+model.SYSTEM_ADMIN_ROLE_ID, false)
th.SystemAdminUser, _ = th.App.GetUser(th.SystemAdminUser.Id)
userCache.SystemAdminUser = th.SystemAdminUser.DeepCopy()
th.SystemManagerUser = th.CreateUser()
th.App.UpdateUserRoles(th.SystemManagerUser.Id, model.SYSTEM_USER_ROLE_ID+" "+model.SYSTEM_MANAGER_ROLE_ID, false)
th.SystemManagerUser, _ = th.App.GetUser(th.SystemManagerUser.Id)
userCache.SystemManagerUser = th.SystemManagerUser.DeepCopy()
th.TeamAdminUser = th.CreateUser()
th.App.UpdateUserRoles(th.TeamAdminUser.Id, model.SYSTEM_USER_ROLE_ID, false)
th.TeamAdminUser, _ = th.App.GetUser(th.TeamAdminUser.Id)
userCache.TeamAdminUser = th.TeamAdminUser.DeepCopy()
th.BasicUser = th.CreateUser()
th.BasicUser, _ = th.App.GetUser(th.BasicUser.Id)
userCache.BasicUser = th.BasicUser.DeepCopy()
th.BasicUser2 = th.CreateUser()
th.BasicUser2, _ = th.App.GetUser(th.BasicUser2.Id)
userCache.BasicUser2 = th.BasicUser2.DeepCopy()
})
// restore cached users
th.SystemAdminUser = userCache.SystemAdminUser.DeepCopy()
th.SystemManagerUser = userCache.SystemManagerUser.DeepCopy()
th.TeamAdminUser = userCache.TeamAdminUser.DeepCopy()
th.BasicUser = userCache.BasicUser.DeepCopy()
th.BasicUser2 = userCache.BasicUser2.DeepCopy()
mainHelper.GetSQLStore().GetMaster().Insert(th.SystemAdminUser, th.TeamAdminUser, th.BasicUser, th.BasicUser2, th.SystemManagerUser)
// restore non hashed password for login
th.SystemAdminUser.Password = "Pa$$word11"
th.TeamAdminUser.Password = "Pa$$word11"
th.BasicUser.Password = "Pa$$word11"
th.BasicUser2.Password = "Pa$$word11"
th.SystemManagerUser.Password = "Pa$$word11"
var wg sync.WaitGroup
wg.Add(3)
go func() {
th.LoginSystemAdmin()
wg.Done()
}()
go func() {
th.LoginSystemManager()
wg.Done()
}()
go func() {
th.LoginTeamAdmin()
wg.Done()
}()
wg.Wait()
return th
}
func (th *TestHelper) InitBasic() *TestHelper {
th.BasicTeam = th.CreateTeam()
th.BasicChannel = th.CreatePublicChannel()
th.BasicPrivateChannel = th.CreatePrivateChannel()
th.BasicPrivateChannel2 = th.CreatePrivateChannel()
th.BasicDeletedChannel = th.CreatePublicChannel()
th.BasicChannel2 = th.CreatePublicChannel()
th.BasicPost = th.CreatePost()
th.LinkUserToTeam(th.BasicUser, th.BasicTeam)
th.LinkUserToTeam(th.BasicUser2, th.BasicTeam)
th.App.AddUserToChannel(th.BasicUser, th.BasicChannel, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicChannel, false)
th.App.AddUserToChannel(th.BasicUser, th.BasicChannel2, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicChannel2, false)
th.App.AddUserToChannel(th.BasicUser, th.BasicPrivateChannel, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicPrivateChannel, false)
th.App.AddUserToChannel(th.BasicUser, th.BasicDeletedChannel, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicDeletedChannel, false)
th.App.UpdateUserRoles(th.BasicUser.Id, model.SYSTEM_USER_ROLE_ID, false)
th.Client.DeleteChannel(th.BasicDeletedChannel.Id)
th.LoginBasic()
th.Group = th.CreateGroup()
return th
}
func (th *TestHelper) waitForConnectivity() {
for i := 0; i < 1000; i++ {
conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", th.App.Srv().ListenAddr.Port))
if err == nil {
conn.Close()
return
}
time.Sleep(time.Millisecond * 20)
}
panic("unable to connect")
}
func (th *TestHelper) CreateClient() *model.Client4 {
return model.NewAPIv4Client(fmt.Sprintf("http://localhost:%v", th.App.Srv().ListenAddr.Port))
}
// ToDo: maybe move this to NewAPIv4SocketClient and reuse it in mmctl
func (th *TestHelper) CreateLocalClient(socketPath string) *model.Client4 {
httpClient := &http.Client{
Transport: &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
return net.Dial("unix", socketPath)
},
},
}
return &model.Client4{
ApiUrl: "http://_" + model.API_URL_SUFFIX,
HttpClient: httpClient,
}
}
func (th *TestHelper) CreateWebSocketClient() (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), th.Client.AuthToken)
}
func (th *TestHelper) CreateWebSocketSystemAdminClient() (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), th.SystemAdminClient.AuthToken)
}
func (th *TestHelper) CreateWebSocketSystemManagerClient() (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), th.SystemManagerClient.AuthToken)
}
func (th *TestHelper) CreateWebSocketClientWithClient(client *model.Client4) (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), client.AuthToken)
}
func (th *TestHelper) CreateBotWithSystemAdminClient() *model.Bot {
return th.CreateBotWithClient((th.SystemAdminClient))
}
func (th *TestHelper) CreateBotWithClient(client *model.Client4) *model.Bot {
bot := &model.Bot{
Username: GenerateTestUsername(),
DisplayName: "a bot",
Description: "bot",
}
utils.DisableDebugLogForTest()
rbot, resp := client.CreateBot(bot)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rbot
}
func (th *TestHelper) CreateUser() *model.User {
return th.CreateUserWithClient(th.Client)
}
func (th *TestHelper) CreateTeam() *model.Team {
return th.CreateTeamWithClient(th.Client)
}
func (th *TestHelper) CreateTeamWithClient(client *model.Client4) *model.Team {
id := model.NewId()
team := &model.Team{
DisplayName: "dn_" + id,
Name: GenerateTestTeamName(),
Email: th.GenerateTestEmail(),
Type: model.TEAM_OPEN,
}
utils.DisableDebugLogForTest()
rteam, resp := client.CreateTeam(team)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rteam
}
func (th *TestHelper) CreateUserWithClient(client *model.Client4) *model.User {
id := model.NewId()
user := &model.User{
Email: th.GenerateTestEmail(),
Username: GenerateTestUsername(),
Nickname: "nn_" + id,
FirstName: "f_" + id,
LastName: "l_" + id,
Password: "Pa$$word11",
}
utils.DisableDebugLogForTest()
ruser, response := client.CreateUser(user)
if response.Error != nil {
panic(response.Error)
}
ruser.Password = "Pa$$word11"
_, err := th.App.Srv().Store.User().VerifyEmail(ruser.Id, ruser.Email)
if err != nil {
return nil
}
utils.EnableDebugLogForTest()
return ruser
}
func (th *TestHelper) CreateUserWithAuth(authService string) *model.User {
id := model.NewId()
user := &model.User{
Email: "success+" + id + "@simulator.amazonses.com",
Username: "un_" + id,
Nickname: "nn_" + id,
EmailVerified: true,
AuthService: authService,
}
user, err := th.App.CreateUser(user)
if err != nil {
panic(err)
}
return user
}
func (th *TestHelper) SetupLdapConfig() {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.EnableMultifactorAuthentication = true
*cfg.LdapSettings.Enable = true
*cfg.LdapSettings.EnableSync = true
*cfg.LdapSettings.LdapServer = "dockerhost"
*cfg.LdapSettings.BaseDN = "dc=mm,dc=test,dc=com"
*cfg.LdapSettings.BindUsername = "cn=admin,dc=mm,dc=test,dc=com"
*cfg.LdapSettings.BindPassword = "mostest"
*cfg.LdapSettings.FirstNameAttribute = "cn"
*cfg.LdapSettings.LastNameAttribute = "sn"
*cfg.LdapSettings.NicknameAttribute = "cn"
*cfg.LdapSettings.EmailAttribute = "mail"
*cfg.LdapSettings.UsernameAttribute = "uid"
*cfg.LdapSettings.IdAttribute = "cn"
*cfg.LdapSettings.LoginIdAttribute = "uid"
*cfg.LdapSettings.SkipCertificateVerification = true
*cfg.LdapSettings.GroupFilter = ""
*cfg.LdapSettings.GroupDisplayNameAttribute = "cN"
*cfg.LdapSettings.GroupIdAttribute = "entRyUuId"
*cfg.LdapSettings.MaxPageSize = 0
})
th.App.Srv().SetLicense(model.NewTestLicense("ldap"))
}
func (th *TestHelper) SetupSamlConfig() {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.SamlSettings.Enable = true
*cfg.SamlSettings.Verify = false
*cfg.SamlSettings.Encrypt = false
*cfg.SamlSettings.IdpUrl = "https://does.notmatter.com"
*cfg.SamlSettings.IdpDescriptorUrl = "https://localhost/adfs/services/trust"
*cfg.SamlSettings.AssertionConsumerServiceURL = "https://localhost/login/sso/saml"
*cfg.SamlSettings.ServiceProviderIdentifier = "https://localhost/login/sso/saml"
*cfg.SamlSettings.IdpCertificateFile = app.SamlIdpCertificateName
*cfg.SamlSettings.PrivateKeyFile = app.SamlPrivateKeyName
*cfg.SamlSettings.PublicCertificateFile = app.SamlPublicCertificateName
*cfg.SamlSettings.EmailAttribute = "Email"
*cfg.SamlSettings.UsernameAttribute = "Username"
*cfg.SamlSettings.FirstNameAttribute = "FirstName"
*cfg.SamlSettings.LastNameAttribute = "LastName"
*cfg.SamlSettings.NicknameAttribute = ""
*cfg.SamlSettings.PositionAttribute = ""
*cfg.SamlSettings.LocaleAttribute = ""
*cfg.SamlSettings.SignatureAlgorithm = model.SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA256
*cfg.SamlSettings.CanonicalAlgorithm = model.SAML_SETTINGS_CANONICAL_ALGORITHM_C14N11
})
th.App.Srv().SetLicense(model.NewTestLicense("saml"))
}
func (th *TestHelper) CreatePublicChannel() *model.Channel {
return th.CreateChannelWithClient(th.Client, model.CHANNEL_OPEN)
}
func (th *TestHelper) CreatePrivateChannel() *model.Channel {
return th.CreateChannelWithClient(th.Client, model.CHANNEL_PRIVATE)
}
func (th *TestHelper) CreateChannelWithClient(client *model.Client4, channelType string) *model.Channel {
return th.CreateChannelWithClientAndTeam(client, channelType, th.BasicTeam.Id)
}
func (th *TestHelper) CreateChannelWithClientAndTeam(client *model.Client4, channelType string, teamId string) *model.Channel {
id := model.NewId()
channel := &model.Channel{
DisplayName: "dn_" + id,
Name: GenerateTestChannelName(),
Type: channelType,
TeamId: teamId,
}
utils.DisableDebugLogForTest()
rchannel, resp := client.CreateChannel(channel)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rchannel
}
func (th *TestHelper) CreatePost() *model.Post {
return th.CreatePostWithClient(th.Client, th.BasicChannel)
}
func (th *TestHelper) CreatePinnedPost() *model.Post {
return th.CreatePinnedPostWithClient(th.Client, th.BasicChannel)
}
func (th *TestHelper) CreateMessagePost(message string) *model.Post {
return th.CreateMessagePostWithClient(th.Client, th.BasicChannel, message)
}
func (th *TestHelper) CreatePostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (th *TestHelper) CreatePinnedPostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
IsPinned: true,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (th *TestHelper) CreateMessagePostWithClient(client *model.Client4, channel *model.Channel, message string) *model.Post {
post := &model.Post{
ChannelId: channel.Id,
Message: message,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (th *TestHelper) CreateMessagePostNoClient(channel *model.Channel, message string, createAtTime int64) *model.Post {
post, err := th.App.Srv().Store.Post().Save(&model.Post{
UserId: th.BasicUser.Id,
ChannelId: channel.Id,
Message: message,
CreateAt: createAtTime,
})
if err != nil {
panic(err)
}
return post
}
func (th *TestHelper) CreateDmChannel(user *model.User) *model.Channel {
utils.DisableDebugLogForTest()
var err *model.AppError
var channel *model.Channel
if channel, err = th.App.GetOrCreateDirectChannel(th.BasicUser.Id, user.Id); err != nil {
panic(err)
}
utils.EnableDebugLogForTest()
return channel
}
func (th *TestHelper) LoginBasic() {
th.LoginBasicWithClient(th.Client)
}
func (th *TestHelper) LoginBasic2() {
th.LoginBasic2WithClient(th.Client)
}
func (th *TestHelper) LoginTeamAdmin() {
th.LoginTeamAdminWithClient(th.Client)
}
func (th *TestHelper) LoginSystemAdmin() {
th.LoginSystemAdminWithClient(th.SystemAdminClient)
}
func (th *TestHelper) LoginSystemManager() {
th.LoginSystemManagerWithClient(th.SystemManagerClient)
}
func (th *TestHelper) LoginBasicWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(th.BasicUser.Email, th.BasicUser.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) LoginBasic2WithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(th.BasicUser2.Email, th.BasicUser2.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) LoginTeamAdminWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(th.TeamAdminUser.Email, th.TeamAdminUser.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) LoginSystemManagerWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(th.SystemManagerUser.Email, th.SystemManagerUser.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) LoginSystemAdminWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(th.SystemAdminUser.Email, th.SystemAdminUser.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) UpdateActiveUser(user *model.User, active bool) {
utils.DisableDebugLogForTest()
_, err := th.App.UpdateActive(user, active)
if err != nil {
panic(err)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) LinkUserToTeam(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
_, err := th.App.JoinUserToTeam(team, user, "")
if err != nil {
panic(err)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) AddUserToChannel(user *model.User, channel *model.Channel) *model.ChannelMember {
utils.DisableDebugLogForTest()
member, err := th.App.AddUserToChannel(user, channel, false)
if err != nil {
panic(err)
}
utils.EnableDebugLogForTest()
return member
}
func (th *TestHelper) GenerateTestEmail() string {
if *th.App.Config().EmailSettings.SMTPServer != "localhost" && os.Getenv("CI_INBUCKET_PORT") == "" {
return strings.ToLower("success+" + model.NewId() + "@simulator.amazonses.com")
}
return strings.ToLower(model.NewId() + "@localhost")
}
func (th *TestHelper) CreateGroup() *model.Group {
id := model.NewId()
group := &model.Group{
Name: model.NewString("n-" + id),
DisplayName: "dn_" + id,
Source: model.GroupSourceLdap,
RemoteId: "ri_" + id,
}
utils.DisableDebugLogForTest()
group, err := th.App.CreateGroup(group)
if err != nil {
panic(err)
}
utils.EnableDebugLogForTest()
return group
}
// TestForSystemAdminAndLocal runs a test function for both
// SystemAdmin and Local clients. Several endpoints work in the same
// way when used by a fully privileged user and through the local
// mode, so this helper facilitates checking both
func (th *TestHelper) TestForSystemAdminAndLocal(t *testing.T, f func(*testing.T, *model.Client4), name ...string) {
var testName string
if len(name) > 0 {
testName = name[0] + "/"
}
t.Run(testName+"SystemAdminClient", func(t *testing.T) {
f(t, th.SystemAdminClient)
})
t.Run(testName+"LocalClient", func(t *testing.T) {
f(t, th.LocalClient)
})
}
// TestForAllClients runs a test function for all the clients
// registered in the TestHelper
func (th *TestHelper) TestForAllClients(t *testing.T, f func(*testing.T, *model.Client4), name ...string) {
var testName string
if len(name) > 0 {
testName = name[0] + "/"
}
t.Run(testName+"Client", func(t *testing.T) {
f(t, th.Client)
})
t.Run(testName+"SystemAdminClient", func(t *testing.T) {
f(t, th.SystemAdminClient)
})
t.Run(testName+"LocalClient", func(t *testing.T) {
f(t, th.LocalClient)
})
}
func GenerateTestUsername() string {
return "fakeuser" + model.NewRandomString(10)
}
func GenerateTestTeamName() string {
return "faketeam" + model.NewRandomString(6)
}
func GenerateTestChannelName() string {
return "fakechannel" + model.NewRandomString(10)
}
func GenerateTestAppName() string {
return "fakeoauthapp" + model.NewRandomString(10)
}
func GenerateTestId() string {
return model.NewId()
}
func CheckUserSanitization(t *testing.T, user *model.User) {
t.Helper()
require.Equal(t, "", user.Password, "password wasn't blank")
require.Empty(t, user.AuthData, "auth data wasn't blank")
require.Equal(t, "", user.MfaSecret, "mfa secret wasn't blank")
}
func CheckEtag(t *testing.T, data interface{}, resp *model.Response) {
t.Helper()
require.Empty(t, data)
require.Equal(t, resp.StatusCode, http.StatusNotModified, "wrong status code for etag")
}
func CheckNoError(t *testing.T, resp *model.Response) {
t.Helper()
require.Nil(t, resp.Error, "expected no error")
}
func checkHTTPStatus(t *testing.T, resp *model.Response, expectedStatus int, expectError bool) {
t.Helper()
require.NotNilf(t, resp, "Unexpected nil response, expected http:%v, expectError:%v", expectedStatus, expectError)
if expectError {
require.NotNil(t, resp.Error, "Expected a non-nil error and http status:%v, got nil, %v", expectedStatus, resp.StatusCode)
} else {
require.Nil(t, resp.Error, "Expected no error and http status:%v, got %q, http:%v", expectedStatus, resp.Error, resp.StatusCode)
}
require.Equalf(t, expectedStatus, resp.StatusCode, "Expected http status:%v, got %v (err: %q)", expectedStatus, resp.StatusCode, resp.Error)
}
func CheckOKStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusOK, false)
}
func CheckCreatedStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusCreated, false)
}
func CheckForbiddenStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusForbidden, true)
}
func CheckUnauthorizedStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusUnauthorized, true)
}
func CheckNotFoundStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusNotFound, true)
}
func CheckBadRequestStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusBadRequest, true)
}
func CheckNotImplementedStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusNotImplemented, true)
}
func CheckRequestEntityTooLargeStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusRequestEntityTooLarge, true)
}
func CheckInternalErrorStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusInternalServerError, true)
}
func CheckServiceUnavailableStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusServiceUnavailable, true)
}
func CheckErrorMessage(t *testing.T, resp *model.Response, errorId string) {
t.Helper()
require.NotNilf(t, resp.Error, "should have errored with message: %s", errorId)
require.Equalf(t, errorId, resp.Error.Id, "incorrect error message, actual: %s, expected: %s", resp.Error.Id, errorId)
}
func CheckStartsWith(t *testing.T, value, prefix, message string) {
require.True(t, strings.HasPrefix(value, prefix), message, value)
}
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
// If signV2 input is false, function always returns signature v4.
//
// Additionally this function also takes a user defined region, if set
// disables automatic region lookup.
func s3New(endpoint, accessKey, secretKey string, secure bool, signV2 bool, region string) (*s3.Client, error) {
var creds *credentials.Credentials
if signV2 {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV2)
} else {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV4)
}
opts := s3.Options{
Creds: creds,
Secure: secure,
Region: region,
}
return s3.New(endpoint, &opts)
}
func (th *TestHelper) cleanupTestFile(info *model.FileInfo) error {
cfg := th.App.Config()
if *cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 {
endpoint := *cfg.FileSettings.AmazonS3Endpoint
accessKey := *cfg.FileSettings.AmazonS3AccessKeyId
secretKey := *cfg.FileSettings.AmazonS3SecretAccessKey
secure := *cfg.FileSettings.AmazonS3SSL
signV2 := *cfg.FileSettings.AmazonS3SignV2
region := *cfg.FileSettings.AmazonS3Region
s3Clnt, err := s3New(endpoint, accessKey, secretKey, secure, signV2, region)
if err != nil {
return err
}
bucket := *cfg.FileSettings.AmazonS3Bucket
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.Path, s3.RemoveObjectOptions{}); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.ThumbnailPath, s3.RemoveObjectOptions{}); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.PreviewPath, s3.RemoveObjectOptions{}); err != nil {
return err
}
}
} else if *cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL {
if err := os.Remove(*cfg.FileSettings.Directory + info.Path); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := os.Remove(*cfg.FileSettings.Directory + info.ThumbnailPath); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := os.Remove(*cfg.FileSettings.Directory + info.PreviewPath); err != nil {
return err
}
}
}
return nil
}
func (th *TestHelper) MakeUserChannelAdmin(user *model.User, channel *model.Channel) {
utils.DisableDebugLogForTest()
if cm, err := th.App.Srv().Store.Channel().GetMember(context.Background(), channel.Id, user.Id); err == nil {
cm.SchemeAdmin = true
if _, err = th.App.Srv().Store.Channel().UpdateMember(cm); err != nil {
utils.EnableDebugLogForTest()
panic(err)
}
} else {
utils.EnableDebugLogForTest()
panic(err)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) UpdateUserToTeamAdmin(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
if tm, err := th.App.Srv().Store.Team().GetMember(context.Background(), team.Id, user.Id); err == nil {
tm.SchemeAdmin = true
if _, err = th.App.Srv().Store.Team().UpdateMember(tm); err != nil {
utils.EnableDebugLogForTest()
panic(err)
}
} else {
utils.EnableDebugLogForTest()
panic(err)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) UpdateUserToNonTeamAdmin(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
if tm, err := th.App.Srv().Store.Team().GetMember(context.Background(), team.Id, user.Id); err == nil {
tm.SchemeAdmin = false
if _, err = th.App.Srv().Store.Team().UpdateMember(tm); err != nil {
utils.EnableDebugLogForTest()
panic(err)
}
} else {
utils.EnableDebugLogForTest()
panic(err)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) SaveDefaultRolePermissions() map[string][]string {
utils.DisableDebugLogForTest()
results := make(map[string][]string)
for _, roleName := range []string{
"system_user",
"system_admin",
"team_user",
"team_admin",
"channel_user",
"channel_admin",
} {
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
results[roleName] = role.Permissions
}
utils.EnableDebugLogForTest()
return results
}
func (th *TestHelper) RestoreDefaultRolePermissions(data map[string][]string) {
utils.DisableDebugLogForTest()
for roleName, permissions := range data {
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
if strings.Join(role.Permissions, " ") == strings.Join(permissions, " ") {
continue
}
role.Permissions = permissions
_, err2 := th.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) RemovePermissionFromRole(permission string, roleName string) {
utils.DisableDebugLogForTest()
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
var newPermissions []string
for _, p := range role.Permissions {
if p != permission {
newPermissions = append(newPermissions, p)
}
}
if strings.Join(role.Permissions, " ") == strings.Join(newPermissions, " ") {
utils.EnableDebugLogForTest()
return
}
role.Permissions = newPermissions
_, err2 := th.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) AddPermissionToRole(permission string, roleName string) {
utils.DisableDebugLogForTest()
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
for _, existingPermission := range role.Permissions {
if existingPermission == permission {
utils.EnableDebugLogForTest()
return
}
}
role.Permissions = append(role.Permissions, permission)
_, err2 := th.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
utils.EnableDebugLogForTest()
}
func (th *TestHelper) SetupTeamScheme() *model.Scheme {
return th.SetupScheme(model.SCHEME_SCOPE_TEAM)
}
func (th *TestHelper) SetupChannelScheme() *model.Scheme {
return th.SetupScheme(model.SCHEME_SCOPE_CHANNEL)
}
func (th *TestHelper) SetupScheme(scope string) *model.Scheme {
scheme, err := th.App.CreateScheme(&model.Scheme{
Name: model.NewId(),
DisplayName: model.NewId(),
Scope: scope,
})
if err != nil {
panic(err)
}
return scheme
}
|
[
"\"CI_INBUCKET_PORT\""
] |
[] |
[
"CI_INBUCKET_PORT"
] |
[]
|
["CI_INBUCKET_PORT"]
|
go
| 1 | 0 | |
day001-014/day003/day-3-1-exercise.py
|
# ## Odd or Even
# # Instructions
# Write a program that works out whether if a given number is an odd or even number.
# Even numbers can be divided by 2 with no remainder.
# e.g. 86 is **even** because 86 ÷ 2 = 43
# 43 does not have any decimal places. Therefore the division is clean.
# e.g. 59 is **odd** because 59 ÷ 2 = 29.5
# 29.5 is not a whole number, it has decimal places. Therefore there is a remainder of 0.5, so the division is not clean.
# The **modulo** is written as a percentage sign (%) in Python. It gives you the remainder after a division.
# e.g.
# 6 ÷ 2 = 3 with no remainder.
# ```
# 6 % 2 = 0
# ```
# 5 ÷ 2 = 2 x **2** + 1, remainder is 1.
# ```
# 5 % 2 = 1
# ```
# 14 ÷ 4 = 3 x **4** + 2, remainder is 2.
# ```
# 14 % 4 = 2
# ```
# **Warning** your output should match the Example Output format exactly, even the positions of the commas and full stops.
# # Example Input 1
# ```
# 43
# ```
# # Example Output 1
# ```
# This is an odd number.
# ```
# # Example Input 2
# ```
# 94
# ```
# # Example Output 2
# ```
# This is an even number.
# ```
# e.g. When you hit **run**, this is what should happen:
# 
# # Hint
# 1. All even numbers can be divided by 2 with 0 remainder.
# 2. Try some using the modulo with some odd numbers e.g.
# ```
# 3 % 2
# ```
# ```
# 5 % 2
# ```
# ```
# 7 % 2
# ```
# Then try using the modulo with some even numbers e.g.
# ```
# 4 % 2
# ```
# ```
# 6 % 2
# ```
# ```
# 8 % 2
# ```
# See what's in common each time.
# 🚨 Don't change the code below 👇
number = int(input("Which number do you want to check? "))
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
if number % 2 > 0:
print("This is an odd number.")
else:
print("This is an even number.")
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
utils/bug_reducer/tests/test_funcbugreducer.py
|
# ==--- opt_bug_reducer_test.py ------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ==----------------------------------------------------------------------===#
import os
import platform
import re
import shutil
import subprocess
import unittest
import bug_reducer.swift_tools as swift_tools
@unittest.skipUnless(platform.system() == 'Darwin',
'func_bug_reducer is only available on Darwin for now')
class FuncBugReducerTestCase(unittest.TestCase):
def setUp(self):
self.file_dir = os.path.dirname(os.path.abspath(__file__))
self.reducer = os.path.join(os.path.dirname(self.file_dir),
'bug_reducer', 'bug_reducer.py')
self.build_dir = os.path.abspath(
os.environ['BUGREDUCE_TEST_SWIFT_OBJ_ROOT'])
(root, _) = os.path.splitext(os.path.abspath(__file__))
self.root_basename = ''.join(os.path.basename(root).split('_'))
self.tmp_dir = os.path.join(
os.path.abspath(os.environ['BUGREDUCE_TEST_TMP_DIR']),
self.root_basename)
subprocess.call(['mkdir', '-p', self.tmp_dir])
self.module_cache = os.path.join(self.tmp_dir, 'module_cache')
self.sdk = subprocess.check_output(['xcrun', '--sdk', 'macosx',
'--toolchain', 'Default',
'--show-sdk-path']).strip("\n")
self.tools = swift_tools.SwiftTools(self.build_dir)
self.passes = ['--pass=-bug-reducer-tester']
if os.access(self.tmp_dir, os.F_OK):
shutil.rmtree(self.tmp_dir)
os.makedirs(self.tmp_dir)
os.makedirs(self.module_cache)
def _get_test_file_path(self, module_name):
return os.path.join(self.file_dir,
'{}_{}.swift'.format(
self.root_basename, module_name))
def _get_sib_file_path(self, filename):
(root, ext) = os.path.splitext(filename)
return os.path.join(self.tmp_dir, os.path.basename(root) + '.sib')
def run_swiftc_command(self, name):
input_file_path = self._get_test_file_path(name)
sib_path = self._get_sib_file_path(input_file_path)
args = [self.tools.swiftc,
'-module-cache-path', self.module_cache,
'-sdk', self.sdk,
'-Onone', '-parse-as-library',
'-module-name', name,
'-emit-sib',
'-resource-dir', os.path.join(self.build_dir, 'lib', 'swift'),
'-o', sib_path,
input_file_path]
return subprocess.check_call(args)
def test_basic(self):
name = 'testbasic'
result_code = self.run_swiftc_command(name)
assert result_code == 0, "Failed initial compilation"
args = [
self.reducer,
'func',
self.build_dir,
self._get_sib_file_path(self._get_test_file_path(name)),
'--sdk=%s' % self.sdk,
'--module-cache=%s' % self.module_cache,
'--module-name=%s' % name,
'--work-dir=%s' % self.tmp_dir,
('--extra-silopt-arg='
'-bug-reducer-tester-target-func=__TF_test_target'),
'--extra-silopt-arg=-bug-reducer-tester-failure-kind=opt-crasher'
]
args.extend(self.passes)
output = subprocess.check_output(args).split("\n")
self.assertTrue("*** Successfully Reduced file!" in output)
self.assertTrue("*** Final Functions: _TF9testbasic6foo413FT_T_")
re_end = 'testfuncbugreducer_testbasic_'
re_end += 'c36efe1eb0993b53c570bfed38933af8.sib'
output_file_re = re.compile('\*\*\* Final File: .*' + re_end)
output_matches = [
1 for o in output if output_file_re.match(o) is not None]
self.assertEquals(sum(output_matches), 1)
# Make sure our final output command does not have -emit-sib in
# the output. We want users to get sil output when they type in
# the relevant command.
self.assertEquals([], [o for o in output if '-emit-sib' in o])
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"BUGREDUCE_TEST_TMP_DIR",
"BUGREDUCE_TEST_SWIFT_OBJ_ROOT"
] |
[]
|
["BUGREDUCE_TEST_TMP_DIR", "BUGREDUCE_TEST_SWIFT_OBJ_ROOT"]
|
python
| 2 | 0 | |
vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go
|
package lintersdb
import (
"os"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/golinters"
"github.com/golangci/golangci-lint/pkg/lint/linter"
)
type Manager struct {
nameToLC map[string]*linter.Config
cfg *config.Config
}
func NewManager(cfg *config.Config) *Manager {
m := &Manager{cfg: cfg}
nameToLC := make(map[string]*linter.Config)
for _, lc := range m.GetAllSupportedLinterConfigs() {
for _, name := range lc.AllNames() {
nameToLC[name] = lc
}
}
m.nameToLC = nameToLC
return m
}
func (Manager) AllPresets() []string {
return []string{linter.PresetBugs, linter.PresetComplexity, linter.PresetFormatting,
linter.PresetPerformance, linter.PresetStyle, linter.PresetUnused}
}
func (m Manager) allPresetsSet() map[string]bool {
ret := map[string]bool{}
for _, p := range m.AllPresets() {
ret[p] = true
}
return ret
}
func (m Manager) GetMetaLinter(name string) linter.MetaLinter {
return m.GetMetaLinters()[name]
}
func (m Manager) GetLinterConfig(name string) *linter.Config {
lc, ok := m.nameToLC[name]
if !ok {
return nil
}
return lc
}
func enableLinterConfigs(lcs []*linter.Config, isEnabled func(lc *linter.Config) bool) []*linter.Config {
var ret []*linter.Config
for _, lc := range lcs {
lc := lc
lc.EnabledByDefault = isEnabled(lc)
ret = append(ret, lc)
}
return ret
}
func (Manager) GetMetaLinters() map[string]linter.MetaLinter {
metaLinters := []linter.MetaLinter{
golinters.MegacheckMetalinter{},
}
ret := map[string]linter.MetaLinter{}
for _, metaLinter := range metaLinters {
ret[metaLinter.Name()] = metaLinter
}
return ret
}
func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
var govetCfg *config.GovetSettings
if m.cfg != nil {
govetCfg = &m.cfg.LintersSettings.Govet
}
lcs := []*linter.Config{
linter.NewConfig(golinters.NewGovet(govetCfg)).
WithSSA(). // TODO: extract from the linter config and don't build SSA, just use LoadAllSyntax mode
WithPresets(linter.PresetBugs).
WithSpeed(4).
WithAlternativeNames("vet", "vetshadow").
WithURL("https://golang.org/cmd/vet/"),
linter.NewConfig(golinters.Errcheck{}).
WithTypeInfo().
WithPresets(linter.PresetBugs).
WithSpeed(10).
WithURL("https://github.com/kisielk/errcheck"),
linter.NewConfig(golinters.Golint{}).
WithPresets(linter.PresetStyle).
WithSpeed(3).
WithURL("https://github.com/golang/lint"),
linter.NewConfig(golinters.NewStaticcheck()).
WithSSA().
WithPresets(linter.PresetBugs).
WithSpeed(2).
WithURL("https://staticcheck.io/"),
linter.NewConfig(golinters.NewUnused()).
WithSSA().
WithPresets(linter.PresetUnused).
WithSpeed(5).
WithURL("https://github.com/dominikh/go-tools/tree/master/cmd/unused"),
linter.NewConfig(golinters.NewGosimple()).
WithSSA().
WithPresets(linter.PresetStyle).
WithSpeed(5).
WithURL("https://github.com/dominikh/go-tools/tree/master/cmd/gosimple"),
linter.NewConfig(golinters.NewStylecheck()).
WithSSA().
WithPresets(linter.PresetStyle).
WithSpeed(5).
WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"),
linter.NewConfig(golinters.Gosec{}).
WithTypeInfo().
WithPresets(linter.PresetBugs).
WithSpeed(8).
WithURL("https://github.com/securego/gosec").
WithAlternativeNames("gas"),
linter.NewConfig(golinters.Structcheck{}).
WithTypeInfo().
WithPresets(linter.PresetUnused).
WithSpeed(10).
WithURL("https://github.com/opennota/check"),
linter.NewConfig(golinters.Varcheck{}).
WithTypeInfo().
WithPresets(linter.PresetUnused).
WithSpeed(10).
WithURL("https://github.com/opennota/check"),
linter.NewConfig(golinters.Interfacer{}).
WithSSA().
WithPresets(linter.PresetStyle).
WithSpeed(6).
WithURL("https://github.com/mvdan/interfacer"),
linter.NewConfig(golinters.Unconvert{}).
WithTypeInfo().
WithPresets(linter.PresetStyle).
WithSpeed(10).
WithURL("https://github.com/mdempsky/unconvert"),
linter.NewConfig(golinters.Ineffassign{}).
WithPresets(linter.PresetUnused).
WithSpeed(9).
WithURL("https://github.com/gordonklaus/ineffassign"),
linter.NewConfig(golinters.Dupl{}).
WithPresets(linter.PresetStyle).
WithSpeed(7).
WithURL("https://github.com/mibk/dupl"),
linter.NewConfig(golinters.Goconst{}).
WithPresets(linter.PresetStyle).
WithSpeed(9).
WithURL("https://github.com/jgautheron/goconst"),
linter.NewConfig(golinters.Deadcode{}).
WithTypeInfo().
WithPresets(linter.PresetUnused).
WithSpeed(10).
WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"),
linter.NewConfig(golinters.Gocyclo{}).
WithPresets(linter.PresetComplexity).
WithSpeed(8).
WithURL("https://github.com/alecthomas/gocyclo"),
linter.NewConfig(golinters.TypeCheck{}).
WithTypeInfo().
WithPresets(linter.PresetBugs).
WithSpeed(10).
WithURL(""),
linter.NewConfig(golinters.Gofmt{}).
WithPresets(linter.PresetFormatting).
WithSpeed(7).
WithAutoFix().
WithURL("https://golang.org/cmd/gofmt/"),
linter.NewConfig(golinters.Gofmt{UseGoimports: true}).
WithPresets(linter.PresetFormatting).
WithSpeed(5).
WithAutoFix().
WithURL("https://godoc.org/golang.org/x/tools/cmd/goimports"),
linter.NewConfig(golinters.Maligned{}).
WithTypeInfo().
WithPresets(linter.PresetPerformance).
WithSpeed(10).
WithURL("https://github.com/mdempsky/maligned"),
linter.NewConfig(golinters.Depguard{}).
WithTypeInfo().
WithPresets(linter.PresetStyle).
WithSpeed(6).
WithURL("https://github.com/OpenPeeDeeP/depguard"),
linter.NewConfig(golinters.Misspell{}).
WithPresets(linter.PresetStyle).
WithSpeed(7).
WithAutoFix().
WithURL("https://github.com/client9/misspell"),
linter.NewConfig(golinters.Lll{}).
WithPresets(linter.PresetStyle).
WithSpeed(10).
WithURL("https://github.com/walle/lll"),
linter.NewConfig(golinters.Unparam{}).
WithPresets(linter.PresetUnused).
WithSpeed(3).
WithSSA().
WithURL("https://github.com/mvdan/unparam"),
linter.NewConfig(golinters.Nakedret{}).
WithPresets(linter.PresetComplexity).
WithSpeed(10).
WithURL("https://github.com/alexkohler/nakedret"),
linter.NewConfig(golinters.Prealloc{}).
WithPresets(linter.PresetPerformance).
WithSpeed(8).
WithURL("https://github.com/alexkohler/prealloc"),
linter.NewConfig(golinters.Scopelint{}).
WithPresets(linter.PresetBugs).
WithSpeed(8).
WithURL("https://github.com/kyoh86/scopelint"),
linter.NewConfig(golinters.Gocritic{}).
WithPresets(linter.PresetStyle).
WithSpeed(5).
WithTypeInfo().
WithURL("https://github.com/go-critic/go-critic"),
linter.NewConfig(golinters.Gochecknoinits{}).
WithPresets(linter.PresetStyle).
WithSpeed(10).
WithURL("https://github.com/leighmcculloch/gochecknoinits"),
linter.NewConfig(golinters.Gochecknoglobals{}).
WithPresets(linter.PresetStyle).
WithSpeed(10).
WithURL("https://github.com/leighmcculloch/gochecknoglobals"),
}
isLocalRun := os.Getenv("GOLANGCI_COM_RUN") == ""
enabledByDefault := map[string]bool{
golinters.NewGovet(nil).Name(): true,
golinters.Errcheck{}.Name(): true,
golinters.Staticcheck{}.Name(): true,
golinters.Unused{}.Name(): true,
golinters.Gosimple{}.Name(): true,
golinters.Structcheck{}.Name(): true,
golinters.Varcheck{}.Name(): true,
golinters.Ineffassign{}.Name(): true,
golinters.Deadcode{}.Name(): true,
// don't typecheck for golangci.com: too many troubles
golinters.TypeCheck{}.Name(): isLocalRun,
}
return enableLinterConfigs(lcs, func(lc *linter.Config) bool {
return enabledByDefault[lc.Name()]
})
}
func (m Manager) GetAllEnabledByDefaultLinters() []*linter.Config {
var ret []*linter.Config
for _, lc := range m.GetAllSupportedLinterConfigs() {
if lc.EnabledByDefault {
ret = append(ret, lc)
}
}
return ret
}
func linterConfigsToMap(lcs []*linter.Config) map[string]*linter.Config {
ret := map[string]*linter.Config{}
for _, lc := range lcs {
lc := lc // local copy
ret[lc.Name()] = lc
}
return ret
}
func (m Manager) GetAllLinterConfigsForPreset(p string) []*linter.Config {
var ret []*linter.Config
for _, lc := range m.GetAllSupportedLinterConfigs() {
for _, ip := range lc.InPresets {
if p == ip {
ret = append(ret, lc)
break
}
}
}
return ret
}
|
[
"\"GOLANGCI_COM_RUN\""
] |
[] |
[
"GOLANGCI_COM_RUN"
] |
[]
|
["GOLANGCI_COM_RUN"]
|
go
| 1 | 0 | |
remove.go
|
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"os"
"github.com/tidwall/gjson"
)
func (m *Message) parseRemoveReq(input string) error {
ouf := gjson.Get(input, os.Getenv("OLD_USER_FIELD"))
oof := gjson.Get(input, os.Getenv("OLD_ORG_FIELD"))
if !ouf.Exists() || !oof.Exists() {
return errors.New("missing environment variable")
}
var n []string
m.Usernames = append(n, ouf.Str)
m.Org = oof.Str
log.Printf("removing %v from org %v ...", m.Usernames[0], m.Org)
m.Method = "DELETE"
m.URI = fmt.Sprintf("/rest/servicedeskapi/organization/%s/user", m.Org)
j := Message{
Usernames: m.Usernames,
}
p, err := json.Marshal(j)
if err != nil {
return err
}
m.Payload = p
return nil
}
func (m *Message) removeHandler(w http.ResponseWriter, r *http.Request) {
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
input := buf.String()
err = m.parseRemoveReq(input)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(http.StatusOK)
}
|
[
"\"OLD_USER_FIELD\"",
"\"OLD_ORG_FIELD\""
] |
[] |
[
"OLD_ORG_FIELD",
"OLD_USER_FIELD"
] |
[]
|
["OLD_ORG_FIELD", "OLD_USER_FIELD"]
|
go
| 2 | 0 | |
thesis/train_HeLa_unet.py
|
import os
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import segmentation_models_v1 as sm
from segmentation_models_v1 import Unet, Linknet, PSPNet, FPN, DUNet, BiFPN, Nestnet, ResUnet, AtUnet
from unet_model import unet_std, unet_std2
sm.set_framework('tf.keras')
from helper_function import plot_deeply_history, plot_history, save_history
from helper_function import precision, recall, f1_score
from sklearn.metrics import confusion_matrix
from helper_function import plot_history_for_callback, save_history_for_callback
def str2bool(value):
return value.lower() == 'true'
def generate_folder(folder_name):
if not os.path.exists(folder_name):
os.system('mkdir -p {}'.format(folder_name))
parser = argparse.ArgumentParser()
parser.add_argument("--docker", type=str2bool, default = True)
parser.add_argument("--gpu", type=str, default = '0')
parser.add_argument("--net_type", type=str, default = 'UNet') #Unet, Linknet, PSPNet, FPN
parser.add_argument("--backbone", type=str, default = 'efficientnetb3')
parser.add_argument("--feat_version", type=int, default = None)
parser.add_argument("--epoch", type=int, default = 2)
parser.add_argument("--dim", type=int, default = 512)
parser.add_argument("--batch_size", type=int, default = 2)
parser.add_argument("--dataset", type=str, default = 'live_dead')
parser.add_argument("--ext", type=str2bool, default = False)
parser.add_argument("--upsample", type=str, default = 'upsampling')
parser.add_argument("--pyramid_agg", type=str, default = 'sum')
parser.add_argument("--filters", type=int, default = 256)
parser.add_argument("--rot", type=float, default = 0)
parser.add_argument("--lr", type=float, default = 1e-3)
parser.add_argument("--bk", type=float, default = 0.5)
parser.add_argument("--focal_weight", type=float, default = 1)
parser.add_argument("--pre_train", type=str2bool, default = True)
parser.add_argument("--train", type=int, default = None)
parser.add_argument("--loss", type=str, default = 'focal+dice')
parser.add_argument("--reduce_factor", type=float, default = 0.1)
args = parser.parse_args()
print(args)
model_name = 'single-net-{}-bone-{}-pre-{}-epoch-{}-batch-{}-lr-{}-dim-{}-train-{}-rot-{}-set-{}-ext-{}-loss-{}-up-{}-filters-{}-red_factor-{}-pyr_agg-{}-bk-{}-fl_weight-{}-fv-{}'.format(args.net_type,\
args.backbone, args.pre_train, args.epoch, args.batch_size, args.lr, args.dim,\
args.train, args.rot, args.dataset, args.ext, args.loss, args.upsample, args.filters, args.reduce_factor, args.pyramid_agg, args.bk, args.focal_weight, args.feat_version)
print(model_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.dataset == 'live_dead':
val_dim = 832 if not args.net_type == 'PSPNet' else 864
test_dim = val_dim; img_dim = 832
train_image_set = 'train_images2'
val_image_set = 'val_images2'
test_image_set = 'test_images2'
elif args.dataset == 'cell_cycle_1984_v2' or args.dataset == 'cell_cycle_1984':
val_dim = 1984 if not args.net_type == 'PSPNet' else 2016
test_dim = val_dim; img_dim = 1984
train_image_set = 'train_images'
val_image_set = 'val_images'
test_image_set = 'test_images'
DATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './data/{}'.format(args.dataset)
x_train_dir = os.path.join(DATA_DIR, train_image_set) if not args.ext else os.path.join(DATA_DIR, 'ext_train_images')
y_train_dir = os.path.join(DATA_DIR, 'train_masks') if not args.ext else os.path.join(DATA_DIR, 'ext_train_masks')
x_valid_dir = os.path.join(DATA_DIR, val_image_set)
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, test_image_set)
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
print(x_train_dir); print(x_valid_dir); print(x_test_dir)
# classes for data loading and preprocessing
class Dataset:
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
CLASSES = ['bk', 'live', 'inter', 'dead']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
nb_data=None,
augmentation=None,
preprocessing=None,
):
id_list = os.listdir(images_dir)
if nb_data ==None:
self.ids = id_list
else:
self.ids = id_list[:int(min(nb_data,len(id_list)))]
#self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
#print(self.images_fps[:4]); print(self.masks_fps[:4])
print(len(self.images_fps)); print(len(self.masks_fps))
# convert str names to class values on masks
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
# print(np.unique(mask))
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in self.class_values]
# print(self.class_values)
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
# map_batch = batch[1]
# map_batch_list = [map_batch]
# for i in range(4):
# map_batch_list.append(map_batch[:,::2,::2,:])
# map_batch = map_batch[:,::2,::2,:]
# map_batch_list.reverse()
# map_tuple = ()
# for i in range(5):
# map_tuple = map_tuple+(map_batch_list[i],)
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
import albumentations as A
def round_clip_0_1(x, **kwargs):
return x.round().clip(0, 1)
# define heavy augmentations
def get_training_augmentation(dim = 512, rot_limit = 45):
train_transform = [
A.HorizontalFlip(p=0.5),
A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=rot_limit, shift_limit=0.1, p=1, border_mode=0),
A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),
A.RandomCrop(height=dim, width=dim, always_apply=True),
A.IAAAdditiveGaussianNoise(p=0.2),
A.IAAPerspective(p=0.5),
A.OneOf(
[
A.CLAHE(p=1),
A.RandomBrightness(p=1),
A.RandomGamma(p=1),
],
p=0.9,
),
A.OneOf(
[
A.IAASharpen(p=1),
A.Blur(blur_limit=3, p=1),
A.MotionBlur(blur_limit=3, p=1),
],
p=0.9,
),
A.OneOf(
[
A.RandomContrast(p=1),
A.HueSaturationValue(p=1),
],
p=0.9,
),
A.Lambda(mask=round_clip_0_1)
]
return A.Compose(train_transform)
def get_validation_augmentation(dim = 832):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(dim, dim),
A.RandomCrop(height=dim, width=dim, always_apply=True)
# A.PadIfNeeded(384, 480)
]
return A.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
# BACKBONE = 'efficientnetb3'
BACKBONE = args.backbone
BATCH_SIZE = args.batch_size
CLASSES = ['live', 'inter', 'dead']
LR = args.lr
EPOCHS = args.epoch
preprocess_input = sm.get_preprocessing(BACKBONE)
# define network parameters
n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation
activation = 'sigmoid' if n_classes == 1 else 'softmax'
#create model
net_func = globals()[args.net_type]
encoder_weights='imagenet' if args.pre_train else None
if args.net_type == 'PSPNet':
model = net_func(BACKBONE, encoder_weights=encoder_weights, input_shape = (args.dim, args.dim, 3), classes=n_classes, activation=activation)
elif args.net_type == 'FPN':
model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation, pyramid_aggregation = args.pyramid_agg)
elif args.net_type == 'unet_std' or args.net_type == 'unet_std2':
model = net_func(classes=n_classes, activation=activation)
else:
model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation,\
decoder_block_type = args.upsample, feature_version = args.feat_version,\
decoder_filters=(int(args.filters),int(args.filters/2), int(args.filters/4), int(args.filters/8), int(args.filters/16)))
print('{}'.format((int(args.filters),int(args.filters/2), int(args.filters/4), int(args.filters/8), int(args.filters/16))))
# else:
# model = net_func(BACKBONE, encoder_weights=encoder_weights, input_shape = (args.dim, args.dim, 3), classes=n_classes, activation=activation)
# model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
# define optomizer
optim = tf.keras.optimizers.Adam(LR)
class_weights = [1,1,1,args.bk]
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
# set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
if args.loss =='focal+dice':
dice_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + (args.focal_weight * focal_loss)
elif args.loss =='dice':
total_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
elif args.loss =='jaccard':
total_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))
elif args.loss =='focal+jaccard':
dice_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + (args.focal_weight * focal_loss)
elif args.loss =='focal+jaccard+dice':
dice_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))
jaccard_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + jaccard_loss+ (args.focal_weight * focal_loss)
elif args.loss == 'focal':
total_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
elif args.loss == 'ce':
total_loss = sm.losses.BinaryCELoss() if n_classes == 1 else sm.losses.CategoricalCELoss()
elif args.loss == 'wce':
# weighted wce (bk, live, injured, dead)
#ratios: 0.929, 0.01 , 0.056, 0.004
class_weights = [1.08, 100., 17.86, 250.]
total_loss = sm.losses.BinaryCELoss() if n_classes == 1 else sm.losses.CategoricalCELoss(class_weights=np.array(class_weights))
# focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
# total_loss = dice_loss + (args.forcal_weight * focal_loss)
# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
# total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# compile keras model with defined optimozer, loss and metrics
model.compile(optimizer=optim, loss=total_loss, metrics = metrics)
# Dataset for train images
train_dataset = Dataset(
x_train_dir,
y_train_dir,
classes=CLASSES,
nb_data=args.train,
augmentation=get_training_augmentation(args.dim, args.rot),
preprocessing= None,
)
if args.net_type == 'PSPNet':
val_dim = args.dim
# Dataset for validation images
valid_dataset = Dataset(
x_valid_dir,
y_valid_dir,
classes=CLASSES,
augmentation=get_validation_augmentation(val_dim),
preprocessing= None,
)
train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)
print(train_dataloader[0][0].shape)
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, args.dim, args.dim, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, args.dim, args.dim, n_classes)
model_folder = '/data/thesis_models/{}'.format(model_name) if args.docker else './models/thesis_models/{}'.format(model_name)
generate_folder(model_folder)
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
def save_images(file_name, vols):
# vols = vols[:,:,:,1] if vols.shape[-1] >= 2 else vols[:,:,:,0]
shp = vols.shape
ls, lx, ly, lc = shp
sx, sy = int(lx/256), int(ly/256)
vols = vols[:,::sx,::sy,:]
slice_list, rows = [], []
for si in range(vols.shape[0]):
slice = vols[si,:,:,:]
rows.append(slice)
if si%4 == 3 and not si == vols.shape[0]-1:
slice_list.append(rows)
rows = []
save_img = concat_tile(slice_list)
cv2.imwrite(file_name, save_img)
def map2rgb(maps):
shp = maps.shape
rgb_maps = np.zeros((shp[0], shp[1], shp[2], 3), dtype=np.uint8)
rgb_maps[:,:,:,0] = np.uint8((maps==0)*255)
rgb_maps[:,:,:,1] = np.uint8((maps==1)*255)
rgb_maps[:,:,:,2] = np.uint8((maps==2)*255)
return rgb_maps
class HistoryPrintCallback(tf.keras.callbacks.Callback):
def __init__(self):
super(HistoryPrintCallback, self).__init__()
self.history = {}
def on_epoch_end(self, epoch, logs=None):
if logs:
for key in logs.keys():
if epoch == 0:
self.history[key] = []
self.history[key].append(logs[key])
if epoch%5 == 0:
plot_history_for_callback(model_folder+'/train_history.png', self.history)
save_history_for_callback(model_folder, self.history)
gt_vols, pr_vols = [],[]
for i in range(0, len(valid_dataset),int(len(valid_dataset)/36)):
gt_vols.append(valid_dataloader[i][1])
pr_vols.append(self.model.predict(valid_dataloader[i]))
gt_vols = np.concatenate(gt_vols, axis = 0); gt_map = map2rgb(np.argmax(gt_vols,axis =-1))
pr_vols = np.concatenate(pr_vols, axis = 0); pr_map = map2rgb(np.argmax(pr_vols,axis =-1))
if epoch == 0:
save_images(model_folder+'/ground_truth.png'.format(epoch), gt_map)
save_images(model_folder+'/pr-{}.png'.format(epoch), pr_map)
# define callbacks for learning rate scheduling and best checkpoints saving
if args.reduce_factor < 1.0:
callbacks = [
tf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', save_weights_only=True, save_best_only=True, mode='min'),
tf.keras.callbacks.ReduceLROnPlateau(factor=args.reduce_factor),
HistoryPrintCallback(),
]
else:
callbacks = [
tf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', save_weights_only=True, save_best_only=True, mode='min'),
HistoryPrintCallback(),
]
# train model
history = model.fit_generator(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
callbacks=callbacks,
validation_data=valid_dataloader,
validation_steps=len(valid_dataloader),
)
# save the training information
plot_history(model_folder+'/train_history.png',history)
record_dir = model_folder+'/train_dir'
generate_folder(record_dir)
save_history(record_dir, history)
# evaluate model
# test_dataset = Dataset(
# x_test_dir,
# y_test_dir,
# classes=CLASSES,
# augmentation=get_validation_augmentation(test_dim),
# preprocessing=get_preprocessing(preprocess_input),
# )
# evaluate model
test_dataset = Dataset(
x_test_dir,
y_test_dir,
classes=CLASSES,
augmentation=get_validation_augmentation(test_dim),
preprocessing= None,
)
test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)
if args.net_type == 'FPN':
model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation, pyramid_aggregation = args.pyramid_agg)
else:
model = net_func(BACKBONE, encoder_weights=encoder_weights, input_shape = (test_dim, test_dim, 3), classes=n_classes, activation=activation, feature_version = args.feat_version,)
model.compile(optimizer=optim, loss=total_loss, metrics = metrics)
# load best weights
model.load_weights(model_folder+'/best_model.h5')
scores = model.evaluate_generator(test_dataloader)
print("Loss: {:.5}".format(scores[0]))
for metric, value in zip(metrics, scores[1:]):
print("mean {}: {:.5}".format(metric.__name__, value))
# calculate the pixel-level classification performance
pr_masks = model.predict(test_dataloader); pr_maps = np.argmax(pr_masks,axis=-1)
gt_masks = []
for i in range(len(test_dataset)):
_, gt_mask = test_dataset[i];gt_masks.append(gt_mask)
gt_masks = np.stack(gt_masks);gt_maps = np.argmax(gt_masks,axis=-1)
# crop
if args.net_type == 'PSPNet':
offset1, offset2 = int((test_dim-img_dim)/2), val_dim-int((test_dim-img_dim)/2)
gt_maps=gt_maps[:,offset1:offset2,offset1:offset2]
pr_maps=pr_maps[:,offset1:offset2,offset1:offset2]
print('PSP output: {}'.format(pr_maps.shape))
y_true=gt_maps.flatten(); y_pred = pr_maps.flatten()
cf_mat = confusion_matrix(y_true, y_pred)
cf_mat_reord = np.zeros(cf_mat.shape)
cf_mat_reord[1:,1:]=cf_mat[:3,:3];cf_mat_reord[0,1:]=cf_mat[3,0:3]; cf_mat_reord[1:,0]=cf_mat[0:3,3]
cf_mat_reord[0,0] = cf_mat[3,3]
print('Confusion matrix:')
print(cf_mat_reord)
prec_scores = []; recall_scores = []; f1_scores = []; iou_scores=[]
for i in range(cf_mat.shape[0]):
prec_scores.append(precision(i,cf_mat_reord))
recall_scores.append(recall(i,cf_mat_reord))
f1_scores.append(f1_score(i,cf_mat_reord))
print('Precision:{:.4f},{:,.4f},{:.4f},{:.4f}'.format(prec_scores[0], prec_scores[1], prec_scores[2], prec_scores[3]))
print('Recall:{:.4f},{:,.4f},{:.4f},{:.4f}'.format(recall_scores[0], recall_scores[1], recall_scores[2], recall_scores[3]))
# f1 score
print('f1-score (pixel):{:.4f},{:,.4f},{:.4f},{:.4f}'.format(f1_scores[0],f1_scores[1],f1_scores[2],f1_scores[3]))
print('mean f1-score (pixel):{:.4f}'.format(np.mean(f1_scores)))
with open(model_folder+'/metric_summary.txt','w+') as f:
# save iou and dice
for metric, value in zip(metrics, scores[1:]):
f.write("mean {}: {:.5}\n".format(metric.__name__, value))
# save confusion matrix
f.write('confusion matrix:\n')
np.savetxt(f, cf_mat_reord, fmt='%-7d')
# save precision
f.write('precision:{:.4f},{:,.4f},{:.4f},{:.4f}\n'.format(prec_scores[0], prec_scores[1], prec_scores[2], prec_scores[3]))
f.write('mean precision: {:.4f}\n'.format(np.mean(prec_scores)))
# save recall
f.write('recall:{:.4f},{:,.4f},{:.4f},{:.4f}\n'.format(recall_scores[0], recall_scores[1], recall_scores[2], recall_scores[3]))
f.write('mean recall:{:.4f}\n'.format(np.mean(recall_scores)))
# save f1-score
f.write('f1-score (pixel):{:.4f},{:,.4f},{:.4f},{:.4f}\n'.format(f1_scores[0],f1_scores[1],f1_scores[2],f1_scores[3]))
f.write('mean f1-score (pixel):{:.4f}\n'.format(np.mean(f1_scores)))
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
images/marathon/haproxy/resources/pod/pod.py
|
#
# Copyright (c) 2012-2015 by Autodesk, Inc.
# All rights reserved.
#
# The information contained herein is confidential and proprietary to
# Autodesk, Inc., and considered a trade secret as defined under civil
# and criminal statutes. Autodesk shall pursue its civil and criminal
# remedies in the event of unauthorized use or misappropriation of its
# trade secrets. Use of this information by anyone other than authorized
# employees of Autodesk, Inc. is granted only under a written non-
# disclosure agreement, expressly prescribing the scope and manner of
# such use.
#
import json
import logging
import os
import time
from jinja2 import Environment, FileSystemLoader
from ochopod.bindings.generic.marathon import Pod
from ochopod.core.tools import Shell
from ochopod.models.piped import Actor as Piped
from ochopod.models.reactive import Actor as Reactive
from os.path import join, dirname
logger = logging.getLogger('ochopod')
if __name__ == '__main__':
#
# - load our pod configuration settings
# - this little json payload is packaged by the marathon toolset upon a push
# - is it passed down to the container as the $pod environment variable
#
cfg = json.loads(os.environ['pod'])
class Model(Reactive):
depends_on = ['hook']
class Strategy(Piped):
cwd = '/opt/haproxy'
check_every = 60.0
pid = None
since = 0.0
def sanity_check(self, pid):
#
# - simply use the provided process ID to start counting time
# - this is a cheap way to measure the sub-process up-time
#
now = time.time()
if pid != self.pid:
self.pid = pid
self.since = now
lapse = (now - self.since) / 3600.0
return \
{
'uptime': '%.2f hours (pid %s)' % (lapse, pid)
}
def can_configure(self, cluster):
#
# - we need at least one downstream url to redirect traffic to
#
assert cluster.grep('hook', 5000), 'need 1+ downstream listener'
def tear_down(self, running):
#
# - force a SIGKILL to shut the proxy down
#
running.kill()
def configure(self, cluster):
#
# - grep our listeners (the CI hooks)
# - render into our 'local' backend directive (which is a standalone file)
#
urls = cluster.grep('hook', 5000).split(',')
env = Environment(loader=FileSystemLoader(join(dirname(__file__), 'templates')))
logger.info('%d downstream urls ->\n - %s' % (len(urls), '\n - '.join(urls)))
mappings = \
{
'port': 9000,
'listeners': {'listener-%d' % index: endpoint for index, endpoint in enumerate(urls)}
}
template = env.get_template('haproxy.cfg')
with open('%s/haproxy.cfg' % self.cwd, 'w') as f:
f.write(template.render(mappings))
#
# - at this point we have both the global/frontend and our default backend
# - start haproxy using both configuration files
#
return '/usr/sbin/haproxy -f haproxy.cfg', {}
Pod().boot(Strategy, model=Model, tools=[Shell])
|
[] |
[] |
[
"pod"
] |
[]
|
["pod"]
|
python
| 1 | 0 | |
internal/gitaly/hook/testhelper_test.go
|
package hook
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"testing"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly/v14/internal/command"
"gitlab.com/gitlab-org/gitaly/v14/internal/git"
"gitlab.com/gitlab-org/gitaly/v14/internal/gitaly/storage"
"gitlab.com/gitlab-org/gitaly/v14/internal/testhelper"
"gitlab.com/gitlab-org/gitaly/v14/proto/go/gitalypb"
)
func TestMain(m *testing.M) {
testhelper.Run(m)
}
func getExpectedEnv(ctx context.Context, t testing.TB, locator storage.Locator, gitCmdFactory git.CommandFactory, repo *gitalypb.Repository) []string {
repoPath, err := locator.GetPath(repo)
require.NoError(t, err)
expectedEnv := map[string]string{
"GIT_DIR": repoPath,
"GIT_TERMINAL_PROMPT": "0",
"GL_ID": "1234",
"GL_PROJECT_PATH": repo.GetGlProjectPath(),
"GL_PROTOCOL": "web",
"GL_REPOSITORY": repo.GetGlRepository(),
"GL_USERNAME": "user",
"PWD": repoPath,
}
execEnv := gitCmdFactory.GetExecutionEnvironment(ctx)
// This is really quite roundabout given that we'll convert it back to an array next, but
// we need to deduplicate environment variables here.
for _, allowedEnvVar := range append(command.AllowedEnvironment(os.Environ()), execEnv.EnvironmentVariables...) {
kv := strings.SplitN(allowedEnvVar, "=", 2)
require.Len(t, kv, 2)
expectedEnv[kv[0]] = kv[1]
}
expectedEnv["PATH"] = fmt.Sprintf("%s:%s", filepath.Dir(execEnv.BinaryPath), os.Getenv("PATH"))
result := make([]string, 0, len(expectedEnv))
for key, value := range expectedEnv {
result = append(result, fmt.Sprintf("%s=%s", key, value))
}
sort.Strings(result)
return result
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
vendor/github.com/containers/storage/types/options.go
|
package types
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
)
// TOML-friendly explicit tables used for conversions.
type TomlConfig struct {
Storage struct {
Driver string `toml:"driver,omitempty"`
RunRoot string `toml:"runroot,omitempty"`
GraphRoot string `toml:"graphroot,omitempty"`
RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
Options cfg.OptionsConfig `toml:"options,omitempty"`
} `toml:"storage"`
}
const (
overlayDriver = "overlay"
overlay2 = "overlay2"
)
func init() {
defaultStoreOptions.RunRoot = defaultRunRoot
defaultStoreOptions.GraphRoot = defaultGraphRoot
defaultStoreOptions.GraphDriverName = ""
if _, err := os.Stat(defaultOverrideConfigFile); err == nil {
// The DefaultConfigFile(rootless) function returns the path
// of the used storage.conf file, by returning defaultConfigFile
// If override exists containers/storage uses it by default.
defaultConfigFile = defaultOverrideConfigFile
ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions)
} else {
if !os.IsNotExist(err) {
logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err)
}
ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
}
// reload could set values to empty for run and graph root if config does not contains anything
if defaultStoreOptions.RunRoot == "" {
defaultStoreOptions.RunRoot = defaultRunRoot
}
if defaultStoreOptions.GraphRoot == "" {
defaultStoreOptions.GraphRoot = defaultGraphRoot
}
}
// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing.
// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function.
func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) {
var (
defaultRootlessRunRoot string
defaultRootlessGraphRoot string
err error
)
storageOpts := defaultStoreOptions
if rootless && rootlessUID != 0 {
storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
if err != nil {
return storageOpts, err
}
}
_, err = os.Stat(storageConf)
if err != nil && !os.IsNotExist(err) {
return storageOpts, err
}
if err == nil && !defaultConfigFileSet {
defaultRootlessRunRoot = storageOpts.RunRoot
defaultRootlessGraphRoot = storageOpts.GraphRoot
storageOpts = StoreOptions{}
reloadConfigurationFileIfNeeded(storageConf, &storageOpts)
if rootless && rootlessUID != 0 {
// If the file did not specify a graphroot or runroot,
// set sane defaults so we don't try and use root-owned
// directories
if storageOpts.RunRoot == "" {
storageOpts.RunRoot = defaultRootlessRunRoot
}
if storageOpts.GraphRoot == "" {
if storageOpts.RootlessStoragePath != "" {
storageOpts.GraphRoot = storageOpts.RootlessStoragePath
} else {
storageOpts.GraphRoot = defaultRootlessGraphRoot
}
}
}
}
if storageOpts.RunRoot != "" {
runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)
if err != nil {
return storageOpts, err
}
storageOpts.RunRoot = runRoot
}
if storageOpts.GraphRoot != "" {
graphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID)
if err != nil {
return storageOpts, err
}
storageOpts.GraphRoot = graphRoot
}
if storageOpts.RootlessStoragePath != "" {
storagePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID)
if err != nil {
return storageOpts, err
}
storageOpts.RootlessStoragePath = storagePath
}
return storageOpts, nil
}
// DefaultStoreOptions returns the default storage ops for containers
func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0)
if err != nil {
return defaultStoreOptions, err
}
return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf)
}
// StoreOptions is used for passing initialization options to GetStore(), for
// initializing a Store object and the underlying storage that it controls.
type StoreOptions struct {
// RunRoot is the filesystem path under which we can store run-time
// information, such as the locations of active mount points, that we
// want to lose if the host is rebooted.
RunRoot string `json:"runroot,omitempty"`
// GraphRoot is the filesystem path under which we will store the
// contents of layers, images, and containers.
GraphRoot string `json:"root,omitempty"`
// RootlessStoragePath is the storage path for rootless users
// default $HOME/.local/share/containers/storage
RootlessStoragePath string `toml:"rootless_storage_path"`
// GraphDriverName is the underlying storage driver that we'll be
// using. It only needs to be specified the first time a Store is
// initialized for a given RunRoot and GraphRoot.
GraphDriverName string `json:"driver,omitempty"`
// GraphDriverOptions are driver-specific options.
GraphDriverOptions []string `json:"driver-options,omitempty"`
// UIDMap and GIDMap are used for setting up a container's root filesystem
// for use inside of a user namespace where UID mapping is being used.
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
// RootAutoNsUser is the user used to pick a subrange when automatically setting
// a user namespace for the root user.
RootAutoNsUser string `json:"root_auto_ns_user,omitempty"`
// AutoNsMinSize is the minimum size for an automatic user namespace.
AutoNsMinSize uint32 `json:"auto_userns_min_size,omitempty"`
// AutoNsMaxSize is the maximum size for an automatic user namespace.
AutoNsMaxSize uint32 `json:"auto_userns_max_size,omitempty"`
// PullOptions specifies options to be handed to pull managers
// This API is experimental and can be changed without bumping the major version number.
PullOptions map[string]string `toml:"pull_options"`
// DisableVolatile doesn't allow volatile mounts when it is set.
DisableVolatile bool `json:"disable-volatile,omitempty"`
}
// isRootlessDriver returns true if the given storage driver is valid for containers running as non root
func isRootlessDriver(driver string) bool {
validDrivers := map[string]bool{
"btrfs": true,
"overlay": true,
"overlay2": true,
"vfs": true,
}
return validDrivers[driver]
}
// getRootlessStorageOpts returns the storage opts for containers running as non root
func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {
var opts StoreOptions
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)
if err != nil {
return opts, err
}
opts.RunRoot = rootlessRuntime
opts.PullOptions = systemOpts.PullOptions
if systemOpts.RootlessStoragePath != "" {
opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID)
if err != nil {
return opts, err
}
} else {
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
}
if driver := systemOpts.GraphDriverName; isRootlessDriver(driver) {
opts.GraphDriverName = driver
}
if driver := os.Getenv("STORAGE_DRIVER"); driver != "" {
opts.GraphDriverName = driver
}
if opts.GraphDriverName == overlay2 {
logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
opts.GraphDriverName = overlayDriver
}
if opts.GraphDriverName == overlayDriver {
for _, o := range systemOpts.GraphDriverOptions {
if strings.Contains(o, "ignore_chown_errors") {
opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
break
}
}
}
if opts.GraphDriverName == "" {
opts.GraphDriverName = "vfs"
}
if os.Getenv("STORAGE_OPTS") != "" {
opts.GraphDriverOptions = append(opts.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
}
return opts, nil
}
// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {
uid := getRootlessUID()
return DefaultStoreOptions(uid != 0, uid)
}
var prevReloadConfig = struct {
storeOptions *StoreOptions
mod time.Time
mutex sync.Mutex
configFile string
}{}
// SetDefaultConfigFilePath sets the default configuration to the specified path
func SetDefaultConfigFilePath(path string) {
defaultConfigFile = path
defaultConfigFileSet = true
ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
}
func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) {
prevReloadConfig.mutex.Lock()
defer prevReloadConfig.mutex.Unlock()
fi, err := os.Stat(configFile)
if err != nil {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
}
return
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
return
}
ReloadConfigurationFile(configFile, storeOptions)
prevReloadConfig.storeOptions = storeOptions
prevReloadConfig.mod = mtime
prevReloadConfig.configFile = configFile
}
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
config := new(TomlConfig)
meta, err := toml.DecodeFile(configFile, &config)
if err == nil {
keys := meta.Undecoded()
if len(keys) > 0 {
logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile)
}
} else {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
return
}
}
// Clear storeOptions of previous settings
*storeOptions = StoreOptions{}
if config.Storage.Driver != "" {
storeOptions.GraphDriverName = config.Storage.Driver
}
if os.Getenv("STORAGE_DRIVER") != "" {
config.Storage.Driver = os.Getenv("STORAGE_DRIVER")
storeOptions.GraphDriverName = config.Storage.Driver
}
if storeOptions.GraphDriverName == overlay2 {
logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
storeOptions.GraphDriverName = overlayDriver
}
if storeOptions.GraphDriverName == "" {
logrus.Errorf("The storage 'driver' option must be set in %s to guarantee proper operation", configFile)
}
if config.Storage.RunRoot != "" {
storeOptions.RunRoot = config.Storage.RunRoot
}
if config.Storage.GraphRoot != "" {
storeOptions.GraphRoot = config.Storage.GraphRoot
}
if config.Storage.RootlessStoragePath != "" {
storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath
}
for _, s := range config.Storage.Options.AdditionalImageStores {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
}
for _, s := range config.Storage.Options.AdditionalLayerStores {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.additionallayerstore=%s", config.Storage.Driver, s))
}
if config.Storage.Options.Size != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size))
}
if config.Storage.Options.MountProgram != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram))
}
if config.Storage.Options.SkipMountHome != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome))
}
if config.Storage.Options.IgnoreChownErrors != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors))
}
if config.Storage.Options.ForceMask != 0 {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.force_mask=%o", config.Storage.Driver, config.Storage.Options.ForceMask))
}
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
}
if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" {
config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" {
mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
if err != nil {
fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
return
}
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
}
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
fmt.Print(err)
} else {
storeOptions.UIDMap = uidmap
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
fmt.Print(err)
} else {
storeOptions.GIDMap = gidmap
}
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
if config.Storage.Options.AutoUsernsMinSize > 0 {
storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize
}
if config.Storage.Options.AutoUsernsMaxSize > 0 {
storeOptions.AutoNsMaxSize = config.Storage.Options.AutoUsernsMaxSize
}
if config.Storage.Options.PullOptions != nil {
storeOptions.PullOptions = config.Storage.Options.PullOptions
}
storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)
if opts, ok := os.LookupEnv("STORAGE_OPTS"); ok {
storeOptions.GraphDriverOptions = strings.Split(opts, ",")
}
if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" {
storeOptions.GraphDriverOptions = nil
}
}
func Options() StoreOptions {
return defaultStoreOptions
}
// Save overwrites the tomlConfig in storage.conf with the given conf
func Save(conf TomlConfig, rootless bool) error {
configFile, err := DefaultConfigFile(rootless)
if err != nil {
return err
}
if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil {
return err
}
f, err := os.Create(configFile)
if err != nil {
return err
}
return toml.NewEncoder(f).Encode(conf)
}
// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it
func StorageConfig(rootless bool) (*TomlConfig, error) {
config := new(TomlConfig)
configFile, err := DefaultConfigFile(rootless)
if err != nil {
return nil, err
}
_, err = toml.DecodeFile(configFile, &config)
if err != nil {
return nil, err
}
return config, nil
}
|
[
"\"STORAGE_DRIVER\"",
"\"STORAGE_OPTS\"",
"\"STORAGE_OPTS\"",
"\"STORAGE_DRIVER\"",
"\"STORAGE_DRIVER\""
] |
[] |
[
"STORAGE_OPTS",
"STORAGE_DRIVER"
] |
[]
|
["STORAGE_OPTS", "STORAGE_DRIVER"]
|
go
| 2 | 0 | |
hack/prowimagebuilder/main.go
|
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"time"
"context"
"github.com/sirupsen/logrus"
"k8s.io/test-infra/prow/flagutil"
"sigs.k8s.io/yaml"
)
const (
defaultArch = "linux/amd64"
allArch = "all"
gatherStaicScriptName = "gather-static.sh"
// Relative to root of the repo
defaultProwImageListFile = "prow/.prow-images.yaml"
defaultWorkersCount = 10
defaultRetry = 3
// noOpKoDocerRepo is used when images are not pushed
noOpKoDocerRepo = "ko.local"
)
var (
rootDir string
otherArches = []string{
"linux/arm64",
"linux/s390x",
"linux/ppc64le",
}
defaultTags = []string{
"latest",
"latest-root",
}
)
func init() {
out, err := runCmd(nil, "git", "rev-parse", "--show-toplevel")
if err != nil {
logrus.WithError(err).Error("Failed getting git root dir")
os.Exit(1)
}
rootDir = out
if _, err := runCmdInDirFunc(path.Join(rootDir, "hack/tools"), nil, "go", "build", "-o", path.Join(rootDir, "_bin/ko"), "github.com/google/ko"); err != nil {
logrus.WithError(err).Error("Failed ensure ko")
os.Exit(1)
}
}
type options struct {
dockerRepo string
prowImageListFile string
images flagutil.Strings
workers int
push bool
maxRetry int
}
// Mock for unit testing purpose
var runCmdInDirFunc = runCmdInDir
func runCmdInDir(dir string, additionalEnv []string, cmd string, args ...string) (string, error) {
command := exec.Command(cmd, args...)
if dir != "" {
command.Dir = dir
}
command.Env = append(os.Environ(), additionalEnv...)
stdOut, err := command.StdoutPipe()
if err != nil {
return "", err
}
stdErr, err := command.StderrPipe()
if err != nil {
return "", err
}
if err := command.Start(); err != nil {
return "", err
}
scanner := bufio.NewScanner(stdOut)
var allOut string
for scanner.Scan() {
out := scanner.Text()
allOut = allOut + out
logrus.WithField("cmd", command.Args).Info(out)
}
allErr, _ := io.ReadAll(stdErr)
err = command.Wait()
// Print error only when command failed
if err != nil && len(allErr) > 0 {
logrus.WithField("cmd", command.Args).Error(string(allErr))
}
return strings.TrimSpace(allOut), err
}
func runCmd(additionalEnv []string, cmd string, args ...string) (string, error) {
return runCmdInDirFunc(rootDir, additionalEnv, cmd, args...)
}
type imageDef struct {
Dir string `json:"dir"`
Arch string `json:"arch"`
remainingRetry int
}
type imageDefs struct {
Defs []imageDef `json:"images"`
}
func loadImageDefs(p string) ([]imageDef, error) {
b, err := ioutil.ReadFile(p)
if err != nil {
return nil, err
}
var res imageDefs
if err := yaml.Unmarshal(b, &res); err != nil {
return nil, err
}
return res.Defs, nil
}
func allBaseTags() ([]string, error) {
gitTag, err := gitTag()
if err != nil {
return nil, err
}
// Add a `ko-<GIT_TAG>` tag so that it's easy to identify images built from
// ko vs. images built from bazel, in case there is a revert needed.
// TODO(chaodaiG): remove `ko-` tag once the images produced by ko proved to
// be working
return append(defaultTags, gitTag, "ko-"+gitTag), nil
}
func allTags(arch string) ([]string, error) {
baseTags, err := allBaseTags()
if err != nil {
return nil, err
}
var allTags = baseTags
for _, otherArch := range otherArches {
if arch != allArch && arch != otherArch {
continue
}
for _, base := range baseTags {
// So far only platform supported is linux, trimming off the linux/
// prefix so that there is no slash in tag. Also for consistency reasons.
platform := strings.Replace(otherArch, "linux/", "", 1)
allTags = append(allTags, fmt.Sprintf("%s-%s", base, platform))
}
}
return allTags, nil
}
// gitTag returns YYYYMMDD-<GIT_TAG>
func gitTag() (string, error) {
prefix, err := runCmd(nil, "date", "+v%Y%m%d")
if err != nil {
return "", err
}
postfix, err := runCmd(nil, "git", "describe", "--always", "--dirty")
if err != nil {
return "", err
}
return fmt.Sprintf("%s-%s", prefix, postfix), nil
}
func runGatherStaticScript(id *imageDef, args ...string) error {
script := path.Join(rootDir, id.Dir, gatherStaicScriptName)
if _, err := os.Lstat(script); err != nil {
if !os.IsNotExist(err) {
return err
}
return nil
}
if _, err := runCmd(nil, script, args...); err != nil {
return err
}
return nil
}
func setup(id *imageDef) error {
return runGatherStaticScript(id)
}
func teardown(id *imageDef) error {
return runGatherStaticScript(id, "--cleanup")
}
func buildAndPush(id *imageDef, dockerRepos []string, push bool) error {
logger := logrus.WithField("image", id.Dir)
logger.Info("Build and push")
start := time.Now()
defer func(logger *logrus.Entry, start time.Time) {
logger.WithField("duration", time.Since(start).String()).Info("Duration of image building.")
}(logger, start)
// So far only supports certain arch
isSupportedArch := (id.Arch == defaultArch || id.Arch == allArch)
for _, otherArch := range otherArches {
if id.Arch == otherArch {
isSupportedArch = true
}
}
if !isSupportedArch {
return fmt.Errorf("Arch '%s' not supported, only support %v", id.Arch, append([]string{defaultArch, allArch}, otherArches...))
}
publishArgs := []string{"publish", fmt.Sprintf("--tarball=_bin/%s.tar", path.Base(id.Dir)), "--push=false"}
if push {
publishArgs = []string{"publish", "--push=true"}
}
tags, err := allTags(id.Arch)
if err != nil {
return fmt.Errorf("collecting tags: %w", err)
}
for _, tag := range tags {
publishArgs = append(publishArgs, fmt.Sprintf("--tags=%s", tag))
}
publishArgs = append(publishArgs, "--base-import-paths", "--platform="+id.Arch, "./"+id.Dir)
defer teardown(id)
if err := setup(id); err != nil {
return fmt.Errorf("setup: %w", err)
}
// ko only supports a single docker repo at a time, running this repeatedly
// on different docker repos so that multiple docker repos can be supported.
// This process utilized the built in cache of ko, so that pushing to
// subsequent docker repo(s) is relatively cheap.
for _, dockerRepo := range dockerRepos {
logger.WithField("args", publishArgs).Info("Running ko.")
if _, err = runCmd([]string{"KO_DOCKER_REPO=" + dockerRepo}, "_bin/ko", publishArgs...); err != nil {
return fmt.Errorf("running ko: %w", err)
}
}
return nil
}
func (o *options) imageAllowed(image string) bool {
return len(o.images.Strings()) == 0 || o.images.StringSet().Has(image)
}
func main() {
var o options
flag.StringVar(&o.prowImageListFile, "prow-images-file", path.Join(rootDir, defaultProwImageListFile), "Yaml file contains list of prow images")
flag.Var(&o.images, "image", "Images to be built, must be part of --prow-images-file, can be passed in repeatedly")
flag.StringVar(&o.dockerRepo, "ko-docker-repo", os.Getenv("KO_DOCKER_REPO"), "Dockers repos, separated by comma")
flag.IntVar(&o.workers, "workers", defaultWorkersCount, "Number of workers in parallel")
flag.BoolVar(&o.push, "push", false, "whether push or not")
flag.IntVar(&o.maxRetry, "retry", defaultRetry, "Number of times retrying for each image")
flag.Parse()
if !o.push && o.dockerRepo == "" {
o.dockerRepo = noOpKoDocerRepo
}
// By default ensures timestamp of images, ref:
// https://github.com/google/ko#why-are-my-images-all-created-in-1970
if err := os.Setenv("SOURCE_DATE_EPOCH", strconv.Itoa(int(time.Now().Unix()))); err != nil {
logrus.WithError(err).Error("Failed setting SOURCE_DATE_EPOCH")
os.Exit(1)
}
// Set VERSION for embedding versions with go build
gitTag, err := gitTag()
if err != nil {
logrus.WithError(err).Error("Failed get git tag")
os.Exit(1)
}
if err := os.Setenv("VERSION", gitTag); err != nil {
logrus.WithError(err).Error("Failed setting VERSION")
os.Exit(1)
}
ids, err := loadImageDefs(o.prowImageListFile)
if err != nil {
logrus.WithError(err).WithField("prow-image-file", o.prowImageListFile).Error("Failed loading")
os.Exit(1)
}
var wg sync.WaitGroup
imageChan := make(chan imageDef, 10)
errChan := make(chan error, len(ids))
doneChan := make(chan imageDef, len(ids))
// Start workers
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i := 0; i < o.workers; i++ {
go func(ctx context.Context, imageChan chan imageDef, errChan chan error, doneChan chan imageDef) {
for {
select {
case id := <-imageChan:
err := buildAndPush(&id, strings.Split(o.dockerRepo, ","), o.push)
if err != nil {
if id.remainingRetry > 0 {
// Let another routine handle this, better luck maybe?
id.remainingRetry--
imageChan <- id
// Don't call wg.Done() as we are not done yet
continue
}
errChan <- fmt.Errorf("building image for %s failed: %w", id.Dir, err)
}
doneChan <- id
case <-ctx.Done():
return
}
}
}(ctx, imageChan, errChan, doneChan)
}
var targetImagesCount int
for _, id := range ids {
id := id
if !o.imageAllowed(id.Dir) {
logrus.WithFields(logrus.Fields{"allowed-images": o.images, "image": id.Dir}).Info("Skipped.")
continue
}
id.remainingRetry = o.maxRetry
if id.Arch == "" {
id.Arch = defaultArch
}
// Feed into channel instead
wg.Add(1)
imageChan <- id
targetImagesCount++
}
// This is used for testing images building, let's make sure it does something.
if targetImagesCount == 0 {
logrus.Error("There is no image to build.")
os.Exit(1)
}
go func(ctx context.Context, wg *sync.WaitGroup, doneChan chan imageDef) {
var done int
for {
select {
case id := <-doneChan:
done++
logrus.WithFields(logrus.Fields{"image": id.Dir, "done": done, "total": targetImagesCount}).Info("Done with image.")
wg.Done()
case <-ctx.Done():
return
}
}
}(ctx, &wg, doneChan)
wg.Wait()
for {
select {
case err := <-errChan:
logrus.WithError(err).Error("Failed.")
os.Exit(1)
default:
return
}
}
}
|
[
"\"KO_DOCKER_REPO\""
] |
[] |
[
"KO_DOCKER_REPO"
] |
[]
|
["KO_DOCKER_REPO"]
|
go
| 1 | 0 | |
cmd/virtual-kubelet/root/opts.go
|
// Copyright © 2017 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"fmt"
"os"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/liqotech/liqo/pkg/consts"
argsutils "github.com/liqotech/liqo/pkg/utils/args"
)
// Defaults for root command options.
const (
DefaultNodeName = "virtual-kubelet"
DefaultInformerResyncPeriod = 1 * time.Minute
DefaultLiqoInformerResyncPeriod time.Duration = 0
DefaultMetricsAddr = ":10255"
DefaultListenPort = 10250
DefaultPodWorkers = 10
DefaultServiceWorkers = 3
DefaultEndpointSliceWorkers = 10
DefaultConfigMapWorkers = 3
DefaultSecretWorkers = 3
DefaultPersistenVolumeClaimWorkers = 3
DefaultKubeletNamespace = "default"
DefaultLiqoIpamServer = consts.NetworkManagerServiceName
)
// Opts stores all the options for configuring the root virtual-kubelet command.
// It is used for setting flag values.
//
// You can set the default options by creating a new `Opts` struct and passing
// it into `SetDefaultOpts`.
type Opts struct {
// Sets the port to listen for requests from the Kubernetes API server
ListenPort int32
// Node name to use when creating a node in Kubernetes
NodeName string
HomeKubeconfig string
MetricsAddr string
// Number of workers to use to handle pod notifications and resource reflection
PodWorkers uint
ServiceWorkers uint
EndpointSliceWorkers uint
ConfigMapWorkers uint
SecretWorkers uint
PersistenVolumeClaimWorkers uint
InformerResyncPeriod time.Duration
LiqoInformerResyncPeriod time.Duration
// Startup Timeout is how long to wait for the kubelet to start
StartupTimeout time.Duration
ForeignClusterID string
HomeClusterID string
KubeletNamespace string
LiqoIpamServer string
Profiling bool
NodeExtraAnnotations argsutils.StringMap
NodeExtraLabels argsutils.StringMap
EnableStorage bool
VirtualStorageClassName string
RemoteRealStorageClassName string
}
// SetDefaultOpts sets default options for unset values on the passed in option struct.
// Fields tht are already set will not be modified.
func SetDefaultOpts(c *Opts) error {
if c.InformerResyncPeriod == 0 {
c.InformerResyncPeriod = DefaultInformerResyncPeriod
}
if c.LiqoInformerResyncPeriod == 0 {
c.InformerResyncPeriod = DefaultLiqoInformerResyncPeriod
}
if c.MetricsAddr == "" {
c.MetricsAddr = DefaultMetricsAddr
}
if c.PodWorkers == 0 {
c.PodWorkers = DefaultPodWorkers
}
if c.ServiceWorkers == 0 {
c.ServiceWorkers = DefaultServiceWorkers
}
if c.EndpointSliceWorkers == 0 {
c.EndpointSliceWorkers = DefaultEndpointSliceWorkers
}
if c.ConfigMapWorkers == 0 {
c.ConfigMapWorkers = DefaultConfigMapWorkers
}
if c.SecretWorkers == 0 {
c.SecretWorkers = DefaultSecretWorkers
}
if c.PersistenVolumeClaimWorkers == 0 {
c.PersistenVolumeClaimWorkers = DefaultPersistenVolumeClaimWorkers
}
if c.ListenPort == 0 {
if kp := os.Getenv("KUBELET_PORT"); kp != "" {
p, err := strconv.ParseInt(kp, 10, 32)
if err != nil {
return errors.Wrap(err, "error parsing KUBELET_PORT environment variable")
}
c.ListenPort = int32(p)
} else {
c.ListenPort = DefaultListenPort
}
}
if c.KubeletNamespace == "" {
c.KubeletNamespace = DefaultKubeletNamespace
}
if c.HomeKubeconfig == "" {
c.HomeKubeconfig = os.Getenv("KUBECONFIG")
}
if c.LiqoIpamServer == "" {
c.LiqoIpamServer = fmt.Sprintf("%v:%v", consts.NetworkManagerServiceName, consts.NetworkManagerIpamPort)
}
return nil
}
|
[
"\"KUBELET_PORT\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBELET_PORT",
"KUBECONFIG"
] |
[]
|
["KUBELET_PORT", "KUBECONFIG"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Website.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/example-codes/kick-bot-example-01.py
|
import os
import random
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix='.')
@bot.command(name='99', help='Responds with a random quote from Brooklyn 99')
async def nine_nine(ctx):
brooklyn_99_quotes = [
'I\'m the human form of the 💯 emoji.',
'Bingpot!',
(
'Cool. Cool cool cool cool cool cool cool, '
'no doubt no doubt no doubt no doubt.'
),
]
response = random.choice(brooklyn_99_quotes)
await ctx.send(response)
bot.run(TOKEN)
|
[] |
[] |
[
"DISCORD_TOKEN"
] |
[]
|
["DISCORD_TOKEN"]
|
python
| 1 | 0 | |
python/paddle/fluid/__init__.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import atexit
# The legacy core need to be removed before "import core",
# in case of users installing paddlepadde without -U option
core_suffix = 'so'
if os.name == 'nt':
core_suffix = 'pyd'
legacy_core = os.path.abspath(os.path.dirname(
__file__)) + os.sep + 'core.' + core_suffix
if os.path.exists(legacy_core):
sys.stderr.write('Deleting legacy file ' + legacy_core + '\n')
try:
os.remove(legacy_core)
except Exception as e:
raise e
# Patch LoDTensor
from . import core
core.LoDTensor = core.Tensor
# import all class inside framework into fluid module
from . import framework
from .framework import *
# import all class inside executor into fluid module
from . import executor
from .executor import *
from . import data_feed_desc
from .data_feed_desc import *
from . import dataset
from .dataset import *
from .data import *
from . import trainer_desc
from . import io
from . import evaluator
from . import initializer
from .initializer import set_global_initializer
from . import layers
from . import dygraph
from . import contrib
from . import nets
from . import optimizer
from . import backward
from .backward import gradients
from . import regularizer
from . import average
from . import metrics
from . import transpiler
from . import incubate
from .input import embedding, one_hot
from . import distribute_lookup_table
from .param_attr import ParamAttr, WeightNormParamAttr
from .data_feeder import DataFeeder
from .core import LoDTensor, LoDTensorArray, Scope, _Scope
from .core import CPUPlace, XPUPlace, CUDAPlace, CUDAPinnedPlace, NPUPlace, IPUPlace, MLUPlace
from .incubate import fleet
from .transpiler import DistributeTranspiler, \
memory_optimize, release_memory, DistributeTranspilerConfig
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
from . import clip
from . import profiler
from . import unique_name
from . import parallel_executor
from .parallel_executor import *
from . import compiler
from .compiler import *
from paddle.fluid.layers.math_op_patch import monkey_patch_variable
from . import install_check
from .dygraph.nn import *
from .dygraph.layers import *
from .dygraph.base import enable_dygraph, disable_dygraph
from .io import save, load, load_program_state, set_program_state
from .dygraph.checkpoint import save_dygraph, load_dygraph
from .dygraph.varbase_patch_methods import monkey_patch_varbase
from . import generator
from .core import _cuda_synchronize
from .generator import Generator
from .trainer_desc import TrainerDesc, DistMultiTrainer, PipelineTrainer, HeterPipelineTrainer, MultiTrainer, HeterXpuTrainer
from .transpiler import HashName, RoundRobin
from .backward import append_backward
Tensor = LoDTensor
enable_imperative = enable_dygraph
disable_imperative = disable_dygraph
__all__ = framework.__all__ + executor.__all__ + \
trainer_desc.__all__ + transpiler.__all__ + \
parallel_executor.__all__ + lod_tensor.__all__ + \
data_feed_desc.__all__ + compiler.__all__ + backward.__all__ + generator.__all__ + [
'io',
'initializer',
'embedding',
'one_hot',
'layers',
'contrib',
'data',
'dygraph',
'enable_dygraph',
'disable_dygraph',
'enable_imperative',
'disable_imperative',
'transpiler',
'nets',
'optimizer',
'backward',
'regularizer',
'LoDTensor',
'LoDTensorArray',
'CPUPlace',
'XPUPlace',
'CUDAPlace',
'CUDAPinnedPlace',
'NPUPlace',
'IPUPlace',
'MLUPlace',
'Tensor',
'ParamAttr',
'WeightNormParamAttr',
'DataFeeder',
'clip',
'profiler',
'unique_name',
'Scope',
'install_check',
'save',
'load',
'_cuda_synchronize'
]
def __bootstrap__():
"""
Enable reading gflags from environment variables.
Returns:
None
"""
import sys
import os
import platform
from . import core
# NOTE(zhiqiu): When (1)numpy < 1.19; (2) python < 3.7,
# unittest is always imported in numpy (maybe some versions not).
# so is_test is True and p2p is not inited.
in_test = 'unittest' in sys.modules
try:
num_threads = int(os.getenv('OMP_NUM_THREADS', '1'))
except ValueError:
num_threads = 1
if num_threads > 1:
print(
'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation '
'speed will not be optimized if you use data parallel. It will '
'fail if this PaddlePaddle binary is compiled with OpenBlas since'
' OpenBlas does not support multi-threads.'.format(num_threads),
file=sys.stderr)
print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr)
os.environ['OMP_NUM_THREADS'] = str(num_threads)
flag_prefix = "FLAGS_"
read_env_flags = [
key[len(flag_prefix):] for key in core.globals().keys()
if key.startswith(flag_prefix)
]
def remove_flag_if_exists(name):
if name in read_env_flags:
read_env_flags.remove(name)
sysstr = platform.system()
if 'Darwin' in sysstr:
remove_flag_if_exists('use_pinned_memory')
if os.name == 'nt':
remove_flag_if_exists('cpu_deterministic')
if core.is_compiled_with_ipu():
# Currently we request all ipu available for training and testing
# finer control of pod of IPUs will be added later
read_env_flags += []
core.init_gflags(["--tryfromenv=" + ",".join(read_env_flags)])
# Note(zhouwei25): sys may not have argv in some cases,
# Such as: use Python/C API to call Python from C++
try:
core.init_glog(sys.argv[0])
except Exception:
sys.argv = [""]
core.init_glog(sys.argv[0])
# don't init_p2p when in unittest to save time.
core.init_devices()
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args)
monkey_patch_variable()
__bootstrap__()
monkey_patch_varbase()
# NOTE(zhiqiu): register npu_finalize on the exit of Python,
# do some clean up manually.
if core.is_compiled_with_npu():
atexit.register(core.npu_finalize)
# NOTE(Aurelius84): clean up ExecutorCacheInfo in advance manually.
atexit.register(core.clear_executor_cache)
|
[] |
[] |
[
"OMP_NUM_THREADS"
] |
[]
|
["OMP_NUM_THREADS"]
|
python
| 1 | 0 | |
youtube_dl/YoutubeDL.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import socket
import sys
import time
import tokenize
import traceback
import random
from string import ascii_letters
from .compat import (
compat_basestring,
compat_cookiejar,
compat_get_terminal_size,
compat_http_client,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DownloadError,
encode_compat_str,
encodeFilename,
error_to_compat_str,
expand_path,
ExtractorError,
format_bytes,
formatSeconds,
GeoRestrictedError,
int_or_none,
ISO3166Utils,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
orderedSet,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
PostProcessingError,
preferredencoding,
prepend_extension,
register_socks_protocols,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
str_or_none,
subtitles_filename,
UnavailableVideoError,
url_basename,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieJar,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
YoutubeDLRedirectHandler,
)
from .cache import Cache
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
from .extractor.openload import PhantomJSwrapper
from .downloader import get_suitable_downloader
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegMergerPP,
FFmpegPostProcessor,
get_postprocessor,
)
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forceid: Force printing ID.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
simulate: Do not download the video files.
format: Video format code. See options.py for more information.
outtmpl: Template for output names.
restrictfilenames: Do not allow "&" and spaces in file names
ignoreerrors: Do not stop on download errors.
force_generic_extractor: Force downloader to use the generic extractor
nooverwrites: Prevent overwriting files.
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
write_all_thumbnails: Write all thumbnail formats to files
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
cookiefile: File name where cookies should be read from and dumped to.
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites.
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
youtube_dl/postprocessor/__init__.py for a list.
as well as any further keyword arguments for the
postprocessor.
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
merge_output_format: Extension to use when merging formats.
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: Client-side IP address to bind to.
call_home: Boolean, true iff we are allowed to contact the
youtube-dl servers for debugging.
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called with the info_dict of
every video.
If it returns a message, the video is ignored.
If it returns None, the video is downloaded.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header
geo_bypass_ip_block:
IP range in CIDR notation that will be used similarly to
geo_bypass_country
The following options determine which downloader is picked:
external_downloader: Executable of the external downloader to call.
None or unset for standard (built-in) downloader.
hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see youtube_dl/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
noresizebuffer, retries, continuedl, noprogress, consoletitle,
xattr_set_filesize, external_downloader_args, hls_use_mpegts,
http_chunk_size.
The following options are used by the post processors:
prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
otherwise prefer ffmpeg.
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
to the binary or its containing directory.
postprocessor_args: A list of additional command-line arguments for the
postprocessor.
The following options are used by the Youtube extractor:
youtube_include_dash_manifest: If True (default), DASH manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about DASH.
"""
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'upload_year', 'upload_month', 'upload_day',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
params = None
_ies = []
_pps = []
_download_retcode = None
_num_downloads = None
_screen_file = None
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = {
# Default parameters
'nocheckcertificate': False,
}
self.params.update(params)
self.cache = Cache(self)
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning(
'%s is deprecated. Use %s instead.' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.platform != 'win32'
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
if isinstance(params.get('outtmpl'), bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
self._setup_opener()
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_class = get_postprocessor(pp_def_raw['key'])
pp_def = dict(pp_def_raw)
del pp_def['key']
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['youtube-dl']
+ [a for i, a in enumerate(argv) if i not in idxs]
+ ['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
if not isinstance(ie, type):
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp)
pp.set_downloader(self)
def add_progress_hook(self, ph):
"""Add the progress hook (currently only for the file downloader)"""
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode."""
return self.to_stdout(message, skip_eol, check_quiet=True)
def _write_string(self, s, out=None):
write_string(s, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, check_quiet=False):
"""Print message to stdout if not in quiet mode."""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not check_quiet or not self.params.get('quiet', False):
message = self._bidi_workaround(message)
terminator = ['\n', ''][skip_eol]
output = message + terminator
self._write_string(output, self._screen_file)
def to_stderr(self, message):
"""Print message to stderr."""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
message = self._bidi_workaround(message)
output = message + '\n'
self._write_string(output, self._err_file)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt':
if ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate', False):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate', False):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def report_warning(self, message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time())
autonumber_size = self.params.get('autonumber_size')
if autonumber_size is None:
autonumber_size = 5
template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
elif template_dict.get('height'):
template_dict['resolution'] = '%sp' % template_dict['height']
elif template_dict.get('width'):
template_dict['resolution'] = '%dx?' % template_dict['width']
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id' or k.endswith('_id')))
template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
for k, v in template_dict.items()
if v is not None and not isinstance(v, (list, tuple, dict)))
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(template_dict['n_entries'])),
'autonumber': autonumber_size,
}
FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
if mobj:
outtmpl = re.sub(
FIELD_SIZE_COMPAT_RE,
r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
outtmpl)
# Missing numeric fields used together with integer presentation types
# in format specification will break the argument substitution since
# string 'NA' is returned for missing fields. We will patch output
# template for missing fields to meet string presentation type.
for numeric_field in self._NUMERIC_FIELDS:
if numeric_field not in template_dict:
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
FORMAT_RE = r'''(?x)
(?<!%)
%
\({0}\) # mapping key
(?:[#0\-+ ]+)? # conversion flags (optional)
(?:\d+)? # minimum field width (optional)
(?:\.\d+)? # precision (optional)
[hlL]? # length modifier (optional)
[diouxXeEfFgGcrs%] # conversion type
'''
outtmpl = re.sub(
FORMAT_RE.format(numeric_field),
r'%({0})s'.format(numeric_field), outtmpl)
# expand_path translates '%%' into '%' and '$$' into '$'
# correspondingly that is not what we want since we need to keep
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
# title "Hello $PATH", we don't want `$PATH` to be expanded.
filename = expand_path(outtmpl).replace(sep, '') % template_dict
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
filename = encodeFilename(filename, True).decode(preferredencoding())
return sanitize_path(filename)
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def _match_entry(self, info_dict, incomplete):
""" Returns None iff the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
if self.in_download_archive(info_dict):
return '%s has already been recorded in archive' % video_title
if not incomplete:
match_filter = self.params.get('match_filter')
if match_filter is not None:
ret = match_filter(info_dict)
if ret is not None:
return ret
return None
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True, force_generic_extractor=False):
'''
Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result
'''
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
ie = self.get_info_extractor(ie.ie_key())
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
return self.__extract_info(url, ie, download, extra_info, process)
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def __handle_extraction_exceptions(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback())
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
else:
raise
return wrapper
@__handle_extraction_exceptions
def __extract_info(self, url, ie, download, extra_info, process):
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
return
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
def add_default_extra_info(self, ie_result, ie, url):
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'webpage_url': url,
'webpage_url_basename': url_basename(url),
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
or extract_flat is True):
self.__forced_printings(
ie_result, self.prepare_filename(ie_result),
incomplete=True)
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(ie_result['url'],
download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
playlist_results = []
playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
ie_entries = ie_result['entries']
def make_playlistitems_entries(list_ie_entries):
num_entries = len(list_ie_entries)
return [
list_ie_entries[i - 1] for i in playlistitems
if -num_entries <= i - 1 < num_entries]
def report_download(num_entries):
self.to_screen(
'[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, num_entries))
if isinstance(ie_entries, list):
n_all_entries = len(ie_entries)
if playlistitems:
entries = make_playlistitems_entries(ie_entries)
else:
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
entries = []
for item in playlistitems:
entries.extend(ie_entries.getslice(
item - 1, item
))
else:
entries = ie_entries.getslice(
playliststart, playlistend)
n_entries = len(entries)
report_download(n_entries)
else: # iterable
if playlistitems:
entries = make_playlistitems_entries(list(itertools.islice(
ie_entries, 0, max(playlistitems))))
else:
entries = list(itertools.islice(
ie_entries, playliststart, playlistend))
n_entries = len(entries)
report_download(n_entries)
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
reason = self._match_entry(entry, incomplete=True)
if reason is not None:
self.to_screen('[download] ' + reason)
continue
entry_result = self.__process_iterable_entry(entry, download, extra)
# TODO: skip failed (empty) entries?
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(
r,
{
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
)
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
@__handle_extraction_exceptions
def __process_iterable_entry(self, entry, download, extra_info):
return self.process_ie_result(
entry, download=download, extra_info=extra_info)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
$
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.search(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
\s*(?P<value>[a-zA-Z0-9._-]+)
\s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.search(filter_spec)
if m:
comparison_value = m.group('value')
str_op = STR_OPERATORS[m.group('op')]
if m.group('negation'):
op = lambda attr, value: not str_op(attr, value)
else:
op = str_op
if not m:
raise ValueError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def _default_format_spec(self, info_dict, download=True):
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
def prefer_best():
if self.params.get('simulate', False):
return False
if not download:
return False
if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
return True
if info_dict.get('is_live'):
return True
if not can_merge():
return True
return False
req_format_list = ['bestvideo+bestaudio', 'best']
if prefer_best():
req_format_list.reverse()
return '/'.join(req_format_list)
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
video_selector = current_selector
audio_selector = _parse_format_selection(tokens, inside_merge=True)
if not video_selector or not audio_selector:
raise syntax_error('"+" must be between two format selectors', start)
current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _build_selector_function(selector):
if isinstance(selector, list):
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
for format in f(ctx):
yield format
return selector_function
elif selector.type == GROUP:
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST:
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == SINGLE:
format_spec = selector.selector
def selector_function(ctx):
formats = list(ctx['formats'])
if not formats:
return
if format_spec == 'all':
for f in formats:
yield f
elif format_spec in ['best', 'worst', None]:
format_idx = 0 if format_spec == 'worst' else -1
audiovideo_formats = [
f for f in formats
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
if audiovideo_formats:
yield audiovideo_formats[format_idx]
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) we will fallback to best/worst
# {video,audio}-only format
elif ctx['incomplete_formats']:
yield formats[format_idx]
elif format_spec == 'bestaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[-1]
elif format_spec == 'worstaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[0]
elif format_spec == 'bestvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[-1]
elif format_spec == 'worstvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
filter_f = lambda f: f['format_id'] == format_spec
matches = list(filter(filter_f, formats))
if matches:
yield matches[-1]
elif selector.type == MERGE:
def _merge(formats_info):
format_1, format_2 = [f['format_id'] for f in formats_info]
# The first format must contain the video and the
# second the audio
if formats_info[0].get('vcodec') == 'none':
self.report_error('The first format must '
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
# Formats must be opposite (video+audio)
if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
self.report_error(
'Both formats %s and %s are video-only, you must specify "-f video+audio"'
% (format_1, format_2))
return
output_ext = (
formats_info[0]['ext']
if self.params.get('merge_output_format') is None
else self.params['merge_output_format'])
return {
'requested_formats': formats_info,
'format': '%s+%s' % (formats_info[0].get('format'),
formats_info[1].get('format')),
'format_id': '%s+%s' % (formats_info[0].get('format_id'),
formats_info[1].get('format_id')),
'width': formats_info[0].get('width'),
'height': formats_info[0].get('height'),
'resolution': formats_info[0].get('resolution'),
'fps': formats_info[0].get('fps'),
'vcodec': formats_info[0].get('vcodec'),
'vbr': formats_info[0].get('vbr'),
'stretched_ratio': formats_info[0].get('stretched_ratio'),
'acodec': formats_info[1].get('acodec'),
'abr': formats_info[1].get('abr'),
'ext': output_ext,
}
video_selector, audio_selector = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
yield _merge(pair)
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
def report_force_conversion(field, field_not, conversion):
self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
% (field, field_not, conversion))
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field)
if field is None or isinstance(field, compat_numeric_types):
continue
report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field)
sanitize_string_field(info_dict, 'id')
sanitize_numeric_fields(info_dict)
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '', t.get('url')))
for i, t in enumerate(thumbnails):
t['url'] = sanitize_url(t['url'])
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
if t.get('id') is None:
t['id'] = '%d' % i
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
return
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
if cc:
for _, subtitle in cc.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
automatic_captions = info_dict.get('automatic_captions')
subtitles = info_dict.get('subtitles')
if self.params.get('listsubtitles', False):
if 'automatic_captions' in info_dict:
self.list_subtitles(
info_dict['id'], automatic_captions, 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
return
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles, automatic_captions)
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
raise ExtractorError('No video formats found!')
def is_wellformed(f):
url = f.get('url')
if not url:
self.report_warning(
'"url" field is missing or empty - skipping format, '
'there is an error in extractor')
return False
if isinstance(url, bytes):
sanitize_string_field(f, 'url')
return True
# Filter out malformed formats for better extraction robustness
formats = list(filter(is_wellformed, formats))
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
sanitize_string_field(format, 'format_id')
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
for format_id, ambiguous_formats in formats_dict.items():
if len(ambiguous_formats) > 1:
for i, format in enumerate(ambiguous_formats):
format['format_id'] = '%s-%d' % (format_id, i)
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
)
# Automatically determine file extension if missing
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Automatically determine protocol if missing (useful for format
# selection purposes)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
if self.params.get('listformats'):
self.list_formats(info_dict)
return
req_format = self.params.get('format')
if req_format is None:
req_format = self._default_format_spec(info_dict, download=download)
if self.params.get('verbose'):
self._write_string('[debug] Default format spec: %s\n' % req_format)
format_selector = self.build_format_selector(req_format)
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
# https://github.com/ytdl-org/youtube-dl/pull/5556).
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
# https://github.com/ytdl-org/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
# all formats are audio-only
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
if download:
if len(formats_to_download) > 1:
self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
for format in formats_to_download:
new_info = dict(info_dict)
new_info.update(format)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
if self.params.get('allsubtitles', False):
requested_langs = available_subs.keys()
else:
if self.params.get('subtitleslangs', False):
requested_langs = self.params.get('subtitleslangs')
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(available_subs.keys())[0]]
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def __forced_printings(self, info_dict, filename, incomplete):
def print_mandatory(field):
if (self.params.get('force%s' % field, False)
and (not incomplete or info_dict.get(field) is not None)):
self.to_stdout(info_dict[field])
def print_optional(field):
if (self.params.get('force%s' % field, False)
and info_dict.get(field) is not None):
self.to_stdout(info_dict[field])
print_mandatory('title')
print_mandatory('id')
if self.params.get('forceurl', False) and not incomplete:
if info_dict.get('requested_formats') is not None:
for f in info_dict['requested_formats']:
self.to_stdout(f['url'] + f.get('play_path', ''))
else:
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
print_optional('thumbnail')
print_optional('description')
if self.params.get('forcefilename', False) and filename is not None:
self.to_stdout(filename)
if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
print_mandatory('format')
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(info_dict))
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
# TODO: backward compatibility, to be removed
info_dict['fulltitle'] = info_dict['title']
if 'format' not in info_dict:
info_dict['format'] = info_dict['ext']
reason = self._match_entry(info_dict, incomplete=False)
if reason is not None:
self.to_screen('[download] ' + reason)
return
self._num_downloads += 1
info_dict['_filename'] = filename = self.prepare_filename(info_dict)
# Forced printings
self.__forced_printings(info_dict, filename, incomplete=False)
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
return
if filename is None:
return
def ensure_dir_exists(path):
try:
dn = os.path.dirname(path)
if dn and not os.path.exists(dn):
os.makedirs(dn)
return True
except (OSError, IOError) as err:
self.report_error('unable to create directory ' + error_to_compat_str(err))
return False
if not ensure_dir_exists(sanitize_path(encodeFilename(filename))):
return
if self.params.get('writedescription', False):
descfn = replace_extension(filename, 'description', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
elif info_dict.get('description') is None:
self.report_warning('There\'s no description to write.')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
elif not info_dict.get('annotations'):
self.report_warning('There are no annotations to write.')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and info_dict.get('requested_subtitles'):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['requested_subtitles']
ie = self.get_info_extractor(info_dict['extractor_key'])
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
if sub_info.get('data') is not None:
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
else:
try:
sub_data = ie._request_webpage(
sub_info['url'], info_dict['id'], note=False).read()
with io.open(encodeFilename(sub_filename), 'wb') as subfile:
subfile.write(sub_data)
except (ExtractorError, IOError, OSError, ValueError) as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err)))
continue
if self.params.get('writeinfojson', False):
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video description metadata is already present')
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(self.filter_requested_info(info_dict), infofn)
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
self._write_thumbnails(info_dict, filename)
if not self.params.get('skip_download', False):
try:
def dl(name, info):
fd = get_suitable_downloader(info, self.params)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if self.params.get('verbose'):
self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
return fd.download(name, info)
if info_dict.get('requested_formats') is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self)
if not merger.available:
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged.')
else:
postprocessors = [merger]
def compatible_formats(formats):
video, audio = formats
# Check extension
video_ext, audio_ext = video.get('ext'), audio.get('ext')
if video_ext and audio_ext:
COMPATIBLE_EXTS = (
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
('webm')
)
for exts in COMPATIBLE_EXTS:
if video_ext in exts and audio_ext in exts:
return True
# TODO: Check acodec/vcodec
return False
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext == info_dict['ext']
else filename)
requested_formats = info_dict['requested_formats']
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv.')
# Ensure filename always has a correct extension for successful merge
filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
if os.path.exists(encodeFilename(filename)):
self.to_screen(
'[download] %s has already been downloaded and '
'merged' % filename)
else:
for f in requested_formats:
new_info = dict(info_dict)
new_info.update(f)
fname = prepend_extension(
self.prepare_filename(new_info),
'f%s' % f['format_id'], new_info['ext'])
if not ensure_dir_exists(fname):
return
downloaded.append(fname)
partial_success = dl(self.params["output_dir"] +fname, new_info)
success = success and partial_success
info_dict['__postprocessors'] = postprocessors
info_dict['__files_to_merge'] = downloaded
else:
# Just a single file
success = dl(self.params["output_dir"] +filename, info_dict)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and filename != '-':
# Fixup content
fixup_policy = self.params.get('fixup')
if fixup_policy is None:
fixup_policy = 'detect_or_warn'
INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
stretched_ratio = info_dict.get('stretched_ratio')
if stretched_ratio is not None and stretched_ratio != 1:
if fixup_policy == 'warn':
self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
info_dict['id'], stretched_ratio))
elif fixup_policy == 'detect_or_warn':
stretched_pp = FFmpegFixupStretchedPP(self)
if stretched_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(stretched_pp)
else:
self.report_warning(
'%s: Non-uniform pixel ratio (%s). %s'
% (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('requested_formats') is None
and info_dict.get('container') == 'm4a_dash'):
if fixup_policy == 'warn':
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container.'
% info_dict['id'])
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM4aPP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('protocol') == 'm3u8_native'
or info_dict.get('protocol') == 'm3u8'
and self.params.get('hls_prefer_native')):
if fixup_policy == 'warn':
self.report_warning('%s: malformed AAC bitstream detected.' % (
info_dict['id']))
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM3u8PP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: malformed AAC bitstream detected. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
self.report_error('postprocessing: %s' % str(err))
return
self.record_download_archive(info_dict)
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
else:
if self.params.get('dump_single_json', False):
self.to_stdout(json.dumps(res))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.filter_requested_info(json.loads('\n'.join(f)))
try:
self.process_ie_result(info, download=True)
except DownloadError:
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def filter_requested_info(info_dict):
return dict(
(k, v) for k, v in info_dict.items()
if k not in ['requested_formats', 'requested_subtitles'])
def post_process(self, filename, ie_info):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
pps_chain = []
if ie_info.get('__postprocessors') is not None:
pps_chain.extend(ie_info['__postprocessors'])
pps_chain.extend(self._pps)
for pp in pps_chain:
files_to_delete = []
try:
files_to_delete, info = pp.run(info)
except PostProcessingError as e:
self.report_error(e.msg)
if files_to_delete and not self.params.get('keepvideo', False):
for old_filename in files_to_delete:
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
def _make_archive_id(self, info_dict):
video_id = info_dict.get('id')
if not video_id:
return
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
if extractor is None:
url = str_or_none(info_dict.get('url'))
if not url:
return
# Try to find matching extractor for the URL and take its ie_key
for ie in self._ies:
if ie.suitable(url):
extractor = ie.ie_key()
break
else:
return
return extractor.lower() + ' ' + video_id
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if not vid_id:
return False # Incomplete video information
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
if line.strip() == vid_id:
return True
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('height') is not None:
if format.get('width') is not None:
res = '%sx%s' % (format['width'], format['height'])
else:
res = '%sp' % format['height']
elif format.get('width') is not None:
res = '%dx?' % format['width']
else:
res = default
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
table = [
[f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
if len(formats) > 1:
table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:\n%s' %
(info_dict['id'], render_table(header_line, table)))
def list_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_screen(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
self.to_screen(render_table(
['Language', 'formats'],
[[lang, ', '.join(f['ext'] for f in reversed(formats))]
for lang, formats in subtitles.items()]))
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
if type('') is not compat_str:
# Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
self.report_warning(
'Your Python is broken! Update to a newer and supported version')
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled' + '\n')
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: ' + out + '\n')
except Exception:
try:
sys.exc_clear()
except Exception:
pass
def python_implementation():
impl_name = platform.python_implementation()
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name
self._write_string('[debug] Python version %s (%s) - %s\n' % (
platform.python_version(), python_implementation(),
platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join(
'%s %s' % (exe, v)
for exe, v in sorted(exe_versions.items())
if v
)
if not exe_str:
exe_str = 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
if opts_cookiefile is None:
self.cookiejar = compat_cookiejar.CookieJar()
else:
opts_cookiefile = expand_path(opts_cookiefile)
self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load(ignore_discard=True, ignore_expires=True)
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
redirect_handler = YoutubeDLRedirectHandler()
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/ytdl-org/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_thumbnails(self, info_dict, filename):
if self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails')
if thumbnails:
thumbnails = [thumbnails[-1]]
elif self.params.get('write_all_thumbnails', False):
thumbnails = info_dict.get('thumbnails')
else:
return
if not thumbnails:
# No thumbnails present, so return immediately
return
for t in thumbnails:
thumb_ext = determine_ext(t['url'], 'jpg')
suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
self.to_screen('[%s] %s: Thumbnail %sis already present' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
else:
self.to_screen('[%s] %s: Downloading thumbnail %s...' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(t['url'], error_to_compat_str(err)))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/runtime/runtime-gdb_test.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"bytes"
"fmt"
"go/build"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
)
func checkGdbEnvironment(t *testing.T) {
testenv.MustHaveGoBuild(t)
if runtime.GOOS == "darwin" {
t.Skip("gdb does not work on darwin")
}
if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64" {
t.Skip("skipping gdb tests on linux/ppc64; see golang.org/issue/17366")
}
if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final {
t.Skip("gdb test can fail with GOROOT_FINAL pending")
}
}
func checkGdbVersion(t *testing.T) {
// Issue 11214 reports various failures with older versions of gdb.
out, err := exec.Command("gdb", "--version").CombinedOutput()
if err != nil {
t.Skipf("skipping: error executing gdb: %v", err)
}
re := regexp.MustCompile(`([0-9]+)\.([0-9]+)`)
matches := re.FindSubmatch(out)
if len(matches) < 3 {
t.Skipf("skipping: can't determine gdb version from\n%s\n", out)
}
major, err1 := strconv.Atoi(string(matches[1]))
minor, err2 := strconv.Atoi(string(matches[2]))
if err1 != nil || err2 != nil {
t.Skipf("skipping: can't determine gdb version: %v, %v", err1, err2)
}
if major < 7 || (major == 7 && minor < 7) {
t.Skipf("skipping: gdb version %d.%d too old", major, minor)
}
t.Logf("gdb version %d.%d", major, minor)
}
func checkGdbPython(t *testing.T) {
if runtime.GOOS == "solaris" && testenv.Builder() != "solaris-amd64-smartosbuildlet" {
t.Skip("skipping gdb python tests on solaris; see golang.org/issue/20821")
}
cmd := exec.Command("gdb", "-nx", "-q", "--batch", "-iex", "python import sys; print('go gdb python support')")
out, err := cmd.CombinedOutput()
if err != nil {
t.Skipf("skipping due to issue running gdb: %v", err)
}
if string(out) != "go gdb python support\n" {
t.Skipf("skipping due to lack of python gdb support: %s", out)
}
}
const helloSource = `
import "fmt"
import "runtime"
var gslice []string
func main() {
mapvar := make(map[string]string,5)
mapvar["abc"] = "def"
mapvar["ghi"] = "jkl"
strvar := "abc"
ptrvar := &strvar
slicevar := make([]string, 0, 16)
slicevar = append(slicevar, mapvar["abc"])
fmt.Println("hi") // line 13
runtime.KeepAlive(ptrvar)
gslice = slicevar
runtime.KeepAlive(mapvar)
}
`
func TestGdbPython(t *testing.T) {
testGdbPython(t, false)
}
func TestGdbPythonCgo(t *testing.T) {
if runtime.GOARCH == "mips" || runtime.GOARCH == "mipsle" || runtime.GOARCH == "mips64" {
testenv.SkipFlaky(t, 18784)
}
testGdbPython(t, true)
}
func testGdbPython(t *testing.T, cgo bool) {
if cgo && !build.Default.CgoEnabled {
t.Skip("skipping because cgo is not enabled")
}
t.Parallel()
checkGdbEnvironment(t)
checkGdbVersion(t)
checkGdbPython(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
var buf bytes.Buffer
buf.WriteString("package main\n")
if cgo {
buf.WriteString(`import "C"` + "\n")
}
buf.WriteString(helloSource)
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, buf.Bytes(), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
args := []string{"-nx", "-q", "--batch", "-iex",
fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()),
"-ex", "set startup-with-shell off",
"-ex", "info auto-load python-scripts",
"-ex", "set python print-stack full",
"-ex", "br fmt.Println",
"-ex", "run",
"-ex", "echo BEGIN info goroutines\n",
"-ex", "info goroutines",
"-ex", "echo END\n",
"-ex", "up", // up from fmt.Println to main
"-ex", "echo BEGIN print mapvar\n",
"-ex", "print mapvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN print strvar\n",
"-ex", "print strvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN info locals\n",
"-ex", "info locals",
"-ex", "echo END\n",
"-ex", "down", // back to fmt.Println (goroutine 2 below only works at bottom of stack. TODO: fix that)
"-ex", "echo BEGIN goroutine 1 bt\n",
"-ex", "goroutine 1 bt",
"-ex", "echo END\n",
"-ex", "echo BEGIN goroutine 2 bt\n",
"-ex", "goroutine 2 bt",
"-ex", "echo END\n",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
firstLine := bytes.SplitN(got, []byte("\n"), 2)[0]
if string(firstLine) != "Loading Go Runtime support." {
// This can happen when using all.bash with
// GOROOT_FINAL set, because the tests are run before
// the final installation of the files.
cmd := exec.Command(testenv.GoToolPath(t), "env", "GOROOT")
cmd.Env = []string{}
out, err := cmd.CombinedOutput()
if err != nil && bytes.Contains(out, []byte("cannot find GOROOT")) {
t.Skipf("skipping because GOROOT=%s does not exist", runtime.GOROOT())
}
_, file, _, _ := runtime.Caller(1)
t.Logf("package testing source file: %s", file)
t.Fatalf("failed to load Go runtime support: %s\n%s", firstLine, got)
}
// Extract named BEGIN...END blocks from output
partRe := regexp.MustCompile(`(?ms)^BEGIN ([^\n]*)\n(.*?)\nEND`)
blocks := map[string]string{}
for _, subs := range partRe.FindAllSubmatch(got, -1) {
blocks[string(subs[1])] = string(subs[2])
}
infoGoroutinesRe := regexp.MustCompile(`\*\s+\d+\s+running\s+`)
if bl := blocks["info goroutines"]; !infoGoroutinesRe.MatchString(bl) {
t.Fatalf("info goroutines failed: %s", bl)
}
printMapvarRe := regexp.MustCompile(`\Q = map[string]string = {["abc"] = "def", ["ghi"] = "jkl"}\E$`)
if bl := blocks["print mapvar"]; !printMapvarRe.MatchString(bl) {
t.Fatalf("print mapvar failed: %s", bl)
}
strVarRe := regexp.MustCompile(`\Q = "abc"\E$`)
if bl := blocks["print strvar"]; !strVarRe.MatchString(bl) {
t.Fatalf("print strvar failed: %s", bl)
}
// Issue 16338: ssa decompose phase can split a structure into
// a collection of scalar vars holding the fields. In such cases
// the DWARF variable location expression should be of the
// form "var.field" and not just "field".
infoLocalsRe := regexp.MustCompile(`^slicevar.len = `)
if bl := blocks["info locals"]; !infoLocalsRe.MatchString(bl) {
t.Fatalf("info locals failed: %s", bl)
}
btGoroutine1Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?fmt\.Println.+at`)
if bl := blocks["goroutine 1 bt"]; !btGoroutine1Re.MatchString(bl) {
t.Fatalf("goroutine 1 bt failed: %s", bl)
}
btGoroutine2Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?runtime.+at`)
if bl := blocks["goroutine 2 bt"]; !btGoroutine2Re.MatchString(bl) {
t.Fatalf("goroutine 2 bt failed: %s", bl)
}
}
const backtraceSource = `
package main
//go:noinline
func aaa() bool { return bbb() }
//go:noinline
func bbb() bool { return ccc() }
//go:noinline
func ccc() bool { return ddd() }
//go:noinline
func ddd() bool { return f() }
//go:noinline
func eee() bool { return true }
var f = eee
func main() {
_ = aaa()
}
`
// TestGdbBacktrace tests that gdb can unwind the stack correctly
// using only the DWARF debug info.
func TestGdbBacktrace(t *testing.T) {
if runtime.GOOS == "netbsd" {
testenv.SkipFlaky(t, 15603)
}
t.Parallel()
checkGdbEnvironment(t)
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(backtraceSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-ex", "set startup-with-shell off",
"-ex", "break main.eee",
"-ex", "run",
"-ex", "backtrace",
"-ex", "continue",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
// Check that the backtrace matches the source code.
bt := []string{
"eee",
"ddd",
"ccc",
"bbb",
"aaa",
"main",
}
for i, name := range bt {
s := fmt.Sprintf("#%v.*main\\.%v", i, name)
re := regexp.MustCompile(s)
if found := re.Find(got) != nil; !found {
t.Errorf("could not find '%v' in backtrace", s)
t.Fatalf("gdb output:\n%v", string(got))
}
}
}
const autotmpTypeSource = `
package main
type astruct struct {
a, b int
}
func main() {
var iface interface{} = map[string]astruct{}
var iface2 interface{} = []astruct{}
println(iface, iface2)
}
`
// TestGdbAutotmpTypes ensures that types of autotmp variables appear in .debug_info
// See bug #17830.
func TestGdbAutotmpTypes(t *testing.T) {
t.Parallel()
checkGdbEnvironment(t)
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(autotmpTypeSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=-N -l", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-ex", "set startup-with-shell off",
"-ex", "break main.main",
"-ex", "run",
"-ex", "step",
"-ex", "info types astruct",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
sgot := string(got)
// Check that the backtrace matches the source code.
types := []string{
"struct []main.astruct;",
"struct bucket<string,main.astruct>;",
"struct hash<string,main.astruct>;",
"struct main.astruct;",
"typedef struct hash<string,main.astruct> * map[string]main.astruct;",
}
for _, name := range types {
if !strings.Contains(sgot, name) {
t.Errorf("could not find %s in 'info typrs astruct' output", name)
t.Fatalf("gdb output:\n%v", sgot)
}
}
}
const constsSource = `
package main
const aConstant int = 42
const largeConstant uint64 = ^uint64(0)
const minusOne int64 = -1
func main() {
println("hello world")
}
`
func TestGdbConst(t *testing.T) {
t.Parallel()
checkGdbEnvironment(t)
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(constsSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=-N -l", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-ex", "set startup-with-shell off",
"-ex", "break main.main",
"-ex", "run",
"-ex", "print main.aConstant",
"-ex", "print main.largeConstant",
"-ex", "print main.minusOne",
"-ex", "print 'runtime._MSpanInUse'",
"-ex", "print 'runtime._PageSize'",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
sgot := strings.Replace(string(got), "\r\n", "\n", -1)
t.Logf("output %q", sgot)
if !strings.Contains(sgot, "\n$1 = 42\n$2 = 18446744073709551615\n$3 = -1\n$4 = 1 '\\001'\n$5 = 8192") {
t.Fatalf("output mismatch")
}
}
|
[
"\"GOROOT_FINAL\""
] |
[] |
[
"GOROOT_FINAL"
] |
[]
|
["GOROOT_FINAL"]
|
go
| 1 | 0 | |
sch/log_source.go
|
// Copyright (c) 2016, 2018, 2022, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// Service Connector Hub API
//
// Use the Service Connector Hub API to transfer data between services in Oracle Cloud Infrastructure.
// For more information about Service Connector Hub, see
// Service Connector Hub Overview (https://docs.cloud.oracle.com/iaas/Content/service-connector-hub/overview.htm).
//
package sch
import (
"fmt"
"github.com/oracle/oci-go-sdk/v58/common"
"strings"
)
// LogSource The logs for this Logging source.
type LogSource struct {
// The OCID (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the log source.
CompartmentId *string `mandatory:"true" json:"compartmentId"`
// The OCID (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log group.
LogGroupId *string `mandatory:"false" json:"logGroupId"`
// The OCID (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log.
LogId *string `mandatory:"false" json:"logId"`
}
func (m LogSource) String() string {
return common.PointerString(m)
}
// ValidateEnumValue returns an error when providing an unsupported enum value
// This function is being called during constructing API request process
// Not recommended for calling this function directly
func (m LogSource) ValidateEnumValue() (bool, error) {
errMessage := []string{}
if len(errMessage) > 0 {
return true, fmt.Errorf(strings.Join(errMessage, "\n"))
}
return false, nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
core/tests/test_utils.py
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import collections
import contextlib
import copy
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import feconf
import main
import main_mail
import main_taskqueue
from proto import text_classifier_pb2
import python_utils
import schema_utils
import utils
import contextlib2
import elasticsearch
from google.appengine.api import mail
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import requests_mock
import webtest
(
auth_models, exp_models, feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models,) = (
models.Registry.import_models([
models.NAMES.auth, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.story,
models.NAMES.suggestion, models.NAMES.topic]))
current_user_services = models.Registry.import_current_user_services()
datastore_services = models.Registry.import_datastore_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
# Prefix to append to all lines printed by tests to the console.
# We are using the b' prefix as all the stdouts are in bytes.
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# methods defined because they're not used directly but only as
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
"""Returns filepath using the filename. Different files are present in
different subdirectories in the rootdir. So, we walk through the rootdir and
match the all the filenames with the given filename. When a match is found
the function returns the complete path of the filename by using
os.path.join(root, filename).
For example signup-page.mainpage.html is present in
core/templates/pages/signup-page and error-page.mainpage.html is present in
core/templates/pages/error-pages. So we walk through core/templates/pages
and a match for signup-page.component.html is found in signup-page
subdirectory and a match for error-page.directive.html is found in
error-pages subdirectory.
Args:
filename: str. The name of the file.
rootdir: str. The directory to search the file in.
Returns:
str | None. The path of the file if file is found otherwise
None.
"""
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
"""Mock for load_template function. This mock is required for backend tests
since we do not have webpack compilation before backend tests. The folder to
search templates is webpack_bundles which is generated after webpack
compilation. Since this folder will be missing, load_template function will
return an error. So, we use a mock for load_template which returns the html
file from the source directory instead.
Args:
filename: str. The name of the file for which template is to be
returned.
Returns:
str. The contents of the given file.
"""
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
"""Checks if the image is in png or webp format only.
Args:
image_string: str. Image url in base64 format.
Returns:
bool. Returns true if image is in WebP format.
"""
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
"""Get all module names in storage."""
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES.__dict__:
if '__' not in name:
yield name
def get_storage_model_classes():
"""Get all model classes in storage."""
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub(python_utils.OBJECT):
"""This stub class mocks the functionality of ES in
elastic_search_services.py.
IMPORTANT NOTE TO DEVELOPERS: These mock functions are NOT guaranteed to
be exact implementations of elasticsearch functionality. If the results of
this mock and the local dev elasticsearch instance differ, the mock
functions should be updated so that their behaviour matches what a local
dev instance would return. (For example, this mock always has a 'version'
of 1 in the return dict and an arbitrary '_seq_no', although the version
number increments with every PUT in the elasticsearch Python client
library and the '_seq_no' increments with every operation.)
"""
_DB = {}
def reset(self):
"""Helper method that clears the mock database."""
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
"""Helper method that generates an elasticsearch 'index not found' 404
error.
Args:
index_name: str. The index that was not found.
Returns:
elasticsearch.NotFoundError. A manually-constructed error
indicating that the index was not found.
"""
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
"""Creates an index with the given name.
Args:
index_name: str. The name of the index to create.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
"""Adds a document with the given ID to the index.
Note that, unfortunately, we have to keep the name of "id" for the
last kwarg, although it conflicts with a Python builtin. This is
because the name is an existing part of the API defined at
https://elasticsearch-py.readthedocs.io/en/v7.10.1/api.html
Args:
index_name: str. The name of the index to create.
document: dict. The document to store.
id: str. The unique identifier of the document.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
"""Checks whether a document with the given ID exists in the mock
database.
Args:
index_name: str. The name of the index to check.
doc_id: str. The document id to check.
Returns:
bool. Whether the document exists in the index.
Raises:
elasticsearch.NotFoundError: The given index name was not found.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any([d['id'] == doc_id for d in self._DB[index_name]])
def mock_delete(self, index_name, doc_id):
"""Deletes a document from an index in the mock database. Does nothing
if the document is not in the index.
Args:
index_name: str. The name of the index to delete the document from.
doc_id: str. The document id to be deleted from the index.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
Exception. The document does not exist in the index.
elasticsearch.NotFoundError. The given index name was not found, or
the given doc_id was not found in the given index.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
"""Deletes documents from an index based on the given query.
Note that this mock only supports a specific for the query, i.e. the
one which clears the entire index. It asserts that all calls to this
function use that query format.
Args:
index_name: str. The name of the index to delete the documents from.
query: dict. The query that defines which documents to delete.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The query is not in the correct form.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert query.keys() == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
"""Searches and returns documents that match the given query.
Args:
body: dict. A dictionary search definition that uses Query DSL.
index: str. The name of the index to search.
params: dict. A dict with two keys: `size` and `from`. The
corresponding values are ints which represent the number of
results to fetch, and the offset from which to fetch them,
respectively.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The given arguments are not supported by this mock.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all([value in words for value in values]):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub(python_utils.OBJECT):
"""Test-only implementation of the public API in core.platform.auth."""
def __init__(self):
"""Initializes a new instance that emulates an empty auth server."""
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
"""Installs a new instance of the stub onto the given test instance.
Args:
test: GenericTestBase. The test instance to install the stub on.
Returns:
callable. A function that will uninstall the stub when called.
"""
with contextlib2.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
unused_request: webapp2.Request. Unused because os.environ handles
sessions.
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def destroy_auth_session(cls, unused_response):
"""Clears login cookies from the given response headers.
Args:
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
"""Authenticates the request and returns claims about its authorizer.
This stub obtains authorization information from os.environ. To make the
operation more authentic, this method also creates a new "external"
association for the user to simulate a genuine "provided" value.
Args:
unused_request: webapp2.Request. The HTTP request to authenticate.
Unused because auth-details are extracted from environment
variables.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no
user is signed in, then returns None.
"""
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
"""Marks the user, and all of their auth associations, as deleted.
Since the stub does not use models, this operation actually deletes the
user's association. The "external" associations, however, are not
deleted yet.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
Returns:
str|None. The auth ID associated with the given user ID, or None if
no association exists.
"""
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
Returns:
str|None. The user ID associated with the given auth ID, or None if
no association exists.
"""
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth
IDs, or None for associations which don't exist.
"""
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user
IDs, or None for associations which don't exist.
"""
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
This method also adds the user to the "external" set of associations.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association
to commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
This method also adds the users to the "external" set of associations.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.taskqueue taskqueue services API.
"""
def __init__(self, test_base):
"""Initializes a taskqueue services stub that replaces the API
functionality of core.platform.taskqueue.
Args:
test_base: GenericTestBase. The current test base.
"""
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
"""Makes a POST request to the task URL in the test app.
Args:
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
queue_name: str. The name of the queue to add the task to.
task_name: str|None. Optional. The name of the task.
"""
headers = {
'X-Appengine-QueueName': python_utils.convert_to_bytes(queue_name),
'X-Appengine-TaskName': (
# Maps empty strings to None so the output can become 'None'.
python_utils.convert_to_bytes(task_name or None)),
'X-AppEngine-Fake-Is-Admin': python_utils.convert_to_bytes(1),
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
"""Creates a Task in the corresponding queue that will be executed when
the 'scheduled_for' countdown expires using the cloud tasks emulator.
Args:
queue_name: str. The name of the queue to add the task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults to
None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the time
to execute the task. Ignored by this stub.
task_name: str|None. Optional. The name of the task.
"""
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.cache cache services API.
"""
_CACHE_DICT = {}
def get_memory_cache_stats(self):
"""Returns a mock profile of the cache dictionary. This mock does not
have the functionality to test for peak memory usage and total memory
usage so the values for those attributes will be 0.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total
number of keys in the cache dictionary.
"""
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_cache(self):
"""Wipes the cache dictionary clean."""
self._CACHE_DICT.clear()
def get_multi(self, keys):
"""Looks up a list of keys in cache dictionary.
Args:
keys: list(str). A list of keys (strings) to look up.
Returns:
list(str). A list of values in the cache dictionary corresponding to
the keys that are passed in.
"""
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
"""Sets multiple keys' values at once in the cache dictionary.
Args:
key_value_mapping: dict(str, str). Both the key and value are
strings. The value can either be a primitive binary-safe string
or the JSON-encoded string version of the object.
Returns:
bool. Whether the set action succeeded.
"""
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
"""Deletes multiple keys in the cache dictionary.
Args:
keys: list(str). The keys to delete.
Returns:
int. Number of successfully deleted keys.
"""
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
def _get_unicode_test_string(self, suffix):
"""Returns a string that contains unicode characters and ends with the
given suffix. This is used to test that functions behave correctly when
handling strings with unicode characters.
Args:
suffix: str. The suffix to append to the UNICODE_TEST_STRING.
Returns:
str. A string that contains unicode characters and ends with the
given suffix.
"""
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
"""Checks that the given item passes default validation."""
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
"""Print the line with a prefix that can be identified by the script
that calls the test.
"""
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, python_utils.convert_to_bytes(line)))
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter changes
later in the list may depend on parameter changes that have been set
earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
"""Returns filepath to the static files on disk ('' or 'build/')."""
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
"""Returns the relative path for the asset, appending it to the
corresponding cache slug. asset_suffix should have a leading slash.
"""
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
"""Context manager that captures logs into a list.
Strips whitespace from messages for convenience.
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Yields:
list(str). A live-feed of the logging messages captured so-far.
"""
captured_logs = []
class ListStream(python_utils.OBJECT):
"""Stream-like object that appends writes to the captured logs."""
def write(self, msg):
"""Appends stripped messages to captured logs."""
captured_logs.append(msg.strip())
def flush(self):
"""Does nothing."""
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
To mock class methods, pass the function to the classmethod decorator
first, for example:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
classmethod(new_classmethod)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
"""Swap obj.attr with a function that always returns the given value."""
def function_that_always_returns(*unused_args, **unused_kwargs):
"""Returns the input value."""
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
"""Swap obj.attr with a function that always raises the given error."""
def function_that_always_raises(*unused_args, **unused_kwargs):
"""Raises the input exception."""
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_value, expected_args=None,
expected_kwargs=None, called=True):
"""Swap an object's function value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Examples:
If you want to check subprocess.Popen is invoked twice like
`subprocess.Popen(['python'], shell=True)` and
`subprocess.Popen(['python2], shell=False), you can first define the
mock function, then the swap, and just run the target function in
context, as follows:
def mock_popen(command, shell):
return
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen,
expected_args=[(['python'],), (['python2'],)],
expected_kwargs=[{'shell': True}, {'shell': False}])
with popen_swap:
function_that_invokes_popen()
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
new_value: function. The new function you want to use.
expected_args: None|list(tuple). The expected args that you want
this function to be invoked with. When its value is None, args
will not be checked. If the value type is list, the function
will check whether the called args is the first element in the
list. If matched, this tuple will be removed from the list.
expected_kwargs: None|list(dict). The expected keyword args you want
this function to be invoked with. Similar to expected_args.
called: bool. Whether the function is expected to be invoked. This
will always be checked.
Yields:
context. The context with function replaced.
"""
original = getattr(obj, attr)
# The actual error message will also include detail assert error message
# via the `self.longMessage` below.
msg = 'Expected checks failed when swapping out in %s.%s tests.' % (
obj.__name__, attr)
def wrapper(*args, **kwargs):
"""Wrapper function for the new value. This function will do the
check before the wrapped function is invoked. After the function
finished, the wrapper will update how many times this function is
invoked.
Args:
*args: list(*). The args passed into `attr` function.
**kwargs: dict. The key word args passed into `attr` function.
Returns:
*. Result of `new_value`.
"""
wrapper.called = True
if expected_args is not None:
self.assertEqual(args, expected_args[0], msg=msg)
expected_args.pop(0)
if expected_kwargs is not None:
self.assertEqual(kwargs, expected_kwargs[0], msg=msg)
expected_kwargs.pop(0)
result = new_value(*args, **kwargs)
return result
wrapper.called = False
setattr(obj, attr, wrapper)
error_occurred = False
try:
# This will show the detailed assert message.
self.longMessage = True
yield
except Exception:
error_occurred = True
# Raise issues thrown by the called function or assert error.
raise
finally:
setattr(obj, attr, original)
if not error_occurred:
self.assertEqual(wrapper.called, called, msg=msg)
self.assertFalse(expected_args, msg=msg)
self.assertFalse(expected_kwargs, msg=msg)
self.longMessage = False
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( # pylint: disable=keyword-arg-before-vararg
self, expected_exception, expected_regexp, callable_obj=None,
*args, **kwargs):
if not expected_regexp:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regexp,
callable_obj=callable_obj, *args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
"""Asserts that each item matches the corresponding regexp.
If there are any missing or extra items that do not correspond to a
regexp element, then the assertion fails.
Args:
items: list(str). The string elements being matched.
regexps: list(str|RegexObject). The patterns that each item is
expected to match.
full_match: bool. Whether to require items to match exactly with the
corresponding pattern.
Raises:
AssertionError. At least one item does not match its corresponding
pattern, or the number of items does not match the number of
regexp patterns.
"""
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
"""Minimal base class for tests that need Google App Engine functionality.
This class is primarily designed for unit tests in core.platform, where we
write adapters around Oppia's third-party dependencies. Generally, our unit
tests depend on stub implementations of these adapters to protect them from
platform-specific behavior. Such stubs are installed in the
GenericTestBase.run() method.
Most of the unit tests in our code base do, and should, inherit from
`GenericTestBase` to stay platform-agnostic. The platform layer itself,
however, can _not_ mock out platform-specific behavior. Those unit tests
need to interact with a real implementation. This base class provides the
bare-minimum functionality and stubs necessary to do so.
"""
# Environment values that our tests depend on.
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args, **kwargs):
super(AppEngineTestBase, self).__init__(*args, **kwargs)
# Defined outside of setUp() because we access it from methods, but can
# only install it during the run() method. Defining it in __init__
# satisfies pylint's attribute-defined-outside-init warning.
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self):
super(AppEngineTestBase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
auth_domain=self.AUTH_DOMAIN, http_host=self.HTTP_HOST,
server_name=self.SERVER_NAME, server_port=self.SERVER_PORT,
default_version_hostname=self.DEFAULT_VERSION_HOSTNAME)
# Google App Engine service stubs.
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_memcache_stub()
self.testbed.init_search_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
policy = (
datastore_services.make_instantaneous_global_consistency_policy())
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
# The root path tells the testbed where to find the queue.yaml file.
self.testbed.init_taskqueue_stub(root_path=os.getcwd())
self._testbed_taskqueue_stub = (
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app)
self.taskqueue_testapp = webtest.TestApp(main_taskqueue.app)
self.mail_testapp = webtest.TestApp(main_mail.app)
def tearDown(self):
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
AppEngineTestBase's override of run() wraps super().run() in "swap"
contexts which stub out the platform taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def _get_all_queue_names(self):
"""Returns a list of all queue names."""
return [q['name'] for q in self._testbed_taskqueue_stub.GetQueues()]
def count_jobs_in_taskqueue(self, queue_name):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
def count_jobs_in_mapreduce_taskqueue(self, queue_name):
"""Counts the jobs in the given MapReduce taskqueue."""
return len(self.get_pending_mapreduce_tasks(queue_name=queue_name))
def get_pending_mapreduce_tasks(self, queue_name=None):
"""Returns the jobs in the given MapReduce taskqueue. If queue_name is
None, defaults to returning the jobs in all available queues.
"""
queue_names = None if queue_name is None else [queue_name]
return self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
def _execute_mapreduce_tasks(self, tasks):
"""Execute MapReduce queued tasks.
Args:
tasks: list(google.appengine.api.taskqueue.taskqueue.Task). The
queued tasks.
"""
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks will be for MapReduce or taskqueue.
params = task.payload or ''
headers = {
'Content-Length': python_utils.convert_to_bytes(len(params))
}
headers.update(
(key, python_utils.convert_to_bytes(val))
for key, val in task.headers.items())
app = (
self.taskqueue_testapp if task.url.startswith('/task') else
self.testapp)
response = app.post(
task.url, params=params, headers=headers,
expect_errors=True)
if response.status_code != 200:
raise RuntimeError('MapReduce task failed: %r' % task)
def process_and_flush_pending_mapreduce_tasks(self, queue_name=None):
"""Runs and flushes pending MapReduce tasks. If queue_name is None, does
so for all queues; otherwise, this only runs and flushes tasks for the
specified queue.
For more information on taskqueue_stub, see:
https://code.google.com/p/googleappengine/source/browse/trunk/python/google/appengine/api/taskqueue/taskqueue_stub.py
"""
queue_names = (
self._get_all_queue_names() if queue_name is None else [queue_name])
get_enqueued_tasks = lambda: list(
self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names))
# Loops until get_enqueued_tasks() returns an empty list.
for tasks in iter(get_enqueued_tasks, []):
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
def run_but_do_not_flush_pending_mapreduce_tasks(self):
""""Runs, but does not flush, the pending MapReduce tasks."""
queue_names = self._get_all_queue_names()
tasks = self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
class GenericTestBase(AppEngineTestBase):
"""Base test class with common/generic helper methods.
Unless a class is testing for "platform"-specific behavior (e.g., testing
third-party library code or database model implementations), always inherit
from this base class. Otherwise, inherit from unittest.TestCase (preferred)
or AppEngineTestBase if Google App Engine services/behavior is needed.
TODO(#12135): Split this enormous test base into smaller, focused pieces.
"""
# NOTE: For tests that do not/can not use the default super-admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
# This is the value that gets returned by default when
# app_identity.get_application_id() is called during tests.
EXPECTED_TEST_APP_ID = 'dummy-cloudsdk-project-id'
SUPER_ADMIN_EMAIL = '[email protected]'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
ADMIN_EMAIL = '[email protected]'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
ADMIN_USERNAME = 'adm'
MODERATOR_EMAIL = '[email protected]'
MODERATOR_USERNAME = 'moderator'
OWNER_EMAIL = '[email protected]'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = '[email protected]'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = '[email protected]'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = '[email protected]'
VOICE_ARTIST_USERNAME = 'voiceartist'
VIEWER_EMAIL = '[email protected]'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = '[email protected]'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_21_STATE_DICT = {
'END': {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': 'Congratulations, you have finished!',
},
'content_ids_to_audio_translations': {'content': {}},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'recommendedExplorationIds': {'value': []},
},
'default_outcome': None,
'hints': [],
'id': 'EndExploration',
'solution': None,
},
'param_changes': [],
},
'Introduction': {
'classifier_model_id': None,
'content': {'content_id': 'content', 'html': ''},
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'feedback_1': {},
},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': 'END',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Correct!</p>',
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {'x': 'InputString'},
'rule_type': 'Equals',
}],
'tagged_misconception_id': None,
'training_data': ['answer1', 'answer2', 'answer3'],
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {'value': ''},
'rows': {'value': 1},
},
'default_outcome': {
'dest': 'Introduction',
'feedback': {'content_id': 'default_outcome', 'html': ''},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': 'TextInput',
'solution': None,
},
'param_changes': [],
},
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_cache()
es_stub = ElasticSearchStub()
es_stub.reset()
with contextlib2.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_cache',
memory_cache_services_stub.flush_cache))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self):
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def tearDown(self):
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
super(GenericTestBase, self).tearDown()
def login(self, email, is_super_admin=False):
"""Sets the environment variables to simulate a login.
Args:
email: str. The email of the user who is to be logged in.
is_super_admin: bool. Whether the user is a super admin.
"""
self.testbed.setup_env(
overwrite=True,
user_email=email, user_id=self.get_auth_id_from_email(email),
user_is_admin=('1' if is_super_admin else '0'))
def logout(self):
"""Simulates a logout by resetting the environment variables."""
self.testbed.setup_env(
overwrite=True, user_email='', user_id='', user_is_admin='0')
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_datetime):
"""Mocks response from datetime.datetime.utcnow method.
Example usage:
import datetime
mocked_datetime_utcnow = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
print datetime.datetime.utcnow() # prints time reduced by 1 day
print datetime.datetime.utcnow() # prints current time.
Args:
mocked_datetime: datetime.datetime. The datetime which will be used
instead of the current UTC datetime.
Yields:
None. Empty yield statement.
"""
with datastore_services.mock_datetime_for_datastore(mocked_datetime):
yield
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
"""Log in with the given email under the context of a 'with' statement.
Args:
email: str. An email associated with a user account.
is_super_admin: bool. Whether the user is a super admin.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
"""Log in as a global admin under the context of a 'with' statement.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(self, email, username):
"""Complete the signup process for the user with the given username.
Args:
email: str. Email of the given user.
username: str. Username of the given user.
"""
user_services.create_new_user(self.get_auth_id_from_email(email), email)
with self.login_context(email), requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
"""Signs up a superadmin user. Must be called at the end of setUp()."""
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
"""Sets a given configuration object's value to the new value specified
using a POST request.
"""
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def set_user_role(self, username, user_role):
"""Sets the given role for this user.
Args:
username: str. Username of the given user.
user_role: str. Role of the given user.
"""
with self.super_admin_context():
self.post_json('/adminrolehandler', {
'username': username,
'role': user_role,
}, csrf_token=self.get_new_csrf_token())
def set_admins(self, admin_usernames):
"""Sets role of given users as ADMIN.
Args:
admin_usernames: list(str). List of usernames.
"""
for name in admin_usernames:
self.set_user_role(name, feconf.ROLE_ID_ADMIN)
def set_topic_managers(self, topic_manager_usernames):
"""Sets role of given users as TOPIC_MANAGER.
Args:
topic_manager_usernames: list(str). List of usernames.
"""
for name in topic_manager_usernames:
self.set_user_role(name, feconf.ROLE_ID_TOPIC_MANAGER)
def set_moderators(self, moderator_usernames):
"""Sets role of given users as MODERATOR.
Args:
moderator_usernames: list(str). List of usernames.
"""
for name in moderator_usernames:
self.set_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_banned_users(self, banned_usernames):
"""Sets role of given users as BANNED_USER.
Args:
banned_usernames: list(str). List of usernames.
"""
for name in banned_usernames:
self.set_user_role(name, feconf.ROLE_ID_BANNED_USER)
def set_collection_editors(self, collection_editor_usernames):
"""Sets role of given users as COLLECTION_EDITOR.
Args:
collection_editor_usernames: list(str). List of usernames.
"""
for name in collection_editor_usernames:
self.set_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
"""Gets the user ID corresponding to the given email.
Args:
email: str. A valid email stored in the App Engine database.
Returns:
str|None. ID of the user possessing the given email, or None if
the user does not exist.
"""
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
"""Returns a mock auth ID corresponding to the given email.
This method can use any algorithm to produce results as long as, during
the runtime of each test case/method, it is:
1. Pure (same input always returns the same output).
2. One-to-one (no two distinct inputs return the same output).
3. An integer byte-string (integers are always valid in auth IDs).
Args:
email: str. The email address of the user.
Returns:
bytes. The mock auth ID of a user possessing the given email.
"""
# Although the hash function doesn't guarantee a one-to-one mapping, in
# practice it is sufficient for our tests. We make it a positive integer
# because those are always valid auth IDs.
return python_utils.convert_to_bytes(abs(hash(email)))
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
"""Get a HTML response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will
be 200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response other than HTML or JSON as a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
"""Get a response, transformed to a Python object and checks for a list
of status codes.
Args:
url: str. The URL to fetch the response.
expected_status_int_list: list(int). A list of integer status code
to expect.
params: dict. A dictionary that will be encoded into a query string.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
"""Convert a JSON server response to an object (such as a dict)."""
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url, params=None, expected_status_int=200):
"""Get a JSON response, transformed to a Python object."""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, payload, csrf_token=None, expected_status_int=200,
upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
"""Delete object on the server using a JSON call."""
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
"""Sends a post request with the data provided to the url specified.
Args:
app: TestApp. The WSGI application which receives the request and
produces response.
url: str. The URL to send the POST request to.
data: *. To be put in the body of the request. If params is an
iterator, it will be urlencoded. If it is a string, it will not
be encoded, but placed in the body directly. Can be a
collections.OrderedDict with webtest.forms.Upload fields
included.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents will be
read from disk.
headers: dict(str, *). Extra headers to send.
Returns:
webtest.TestResponse. The response of the POST request.
"""
# Convert the files to bytes.
if upload_files is not None:
upload_files = tuple(
tuple(python_utils.convert_to_bytes(f) for f in upload_file)
for upload_file in upload_files)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_email(
self, recipient_email, sender_email, subject, body, html_body=None,
expect_errors=False, expected_status_int=200):
"""Post an email from the sender to the recipient.
Args:
recipient_email: str. The email of the recipient.
sender_email: str. The email of the sender.
subject: str. The subject of the email.
body: str. The body of the email.
html_body: str. The HTML body of the email.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code of the JSON
response.
Returns:
json. A JSON response generated by _send_post_request function.
"""
email = mail.EmailMessage(
sender=sender_email, to=recipient_email, subject=subject, body=body)
if html_body is not None:
email.html = html_body
mime_email = email.to_mime_message()
headers = {
'Content-Type': mime_email.get_content_type(),
}
data = mime_email.as_string()
incoming_email_url = '/_ah/mail/%s' % recipient_email
return self._send_post_request(
self.mail_testapp, incoming_email_url, data, expect_errors,
headers=headers, expected_status_int=expected_status_int)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Posts an object to the server by JSON with the specific headers
specified; return the received object.
"""
if csrf_token:
payload['csrf_token'] = csrf_token
return self.taskqueue_testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
"""PUT an object to the server with JSON and return the response."""
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
"""Generates CSRF token for test."""
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
"""Sets the interaction_id, sets the fully populated default interaction
customization arguments, and increments next_content_id_index as needed.
Args:
state: State. The state domain object to set the interaction for.
interaction_id: str. The interaction id to set. Also sets the
default customization args for the given interaction id.
"""
# We wrap next_content_id_index in a dict so that modifying it in the
# inner function modifies the value.
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
"""Generates content_id from recursively traversing the schema, and
assigning to the current value.
Args:
value: *. The current traversed value in customization
arguments.
schema: dict. The current traversed schema.
contentId: str. The content_id generated so far.
"""
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
x[schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
end_state_name: str. The name of the end state for the exploration.
interaction_id: str. The id of the interaction.
correctness_feedback_enabled: bool. Whether correctness feedback is
enabled for the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
# If an end state name is provided, add terminal node with that name.
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
# Link first state to ending state (to maintain validity).
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new strictly-validated exploration with a sequence of states.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
state_names: list(str). The names of states to be linked
sequentially in the exploration. Must be a non-empty list and
contain no duplicates.
interaction_ids: list(str). The names of the interaction ids to be
assigned to each state. Values will be cycled, so it doesn't
need to be the same size as state_names, but it must be
non-empty.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
Returns:
Exploration. The exploration domain object.
"""
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_states_schema_v0(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 0 states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=0,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_0_STATES_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
# Create an ExplorationIssues model to match the behavior of creating
# new explorations.
stats_services.create_exp_issues_for_new_exploration(exp_id, 1)
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
"""Saves a new default exploration with the given version of state dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
states_dict: dict. The dict representation of all the states.
version: int. Custom states schema version.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def save_new_exp_with_states_schema_v21(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 21 states
dictionary. Version 21 is where training data of exploration is stored
with the states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=21,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_21_STATE_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
"""Publish the exploration with the given exploration_id.
Args:
owner_id: str. The user_id of the owner of the exploration.
exploration_id: str. The ID of the new exploration.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default collection written by owner_id.
Args:
collection_id: str. The id of the new default collection.
owner_id: str. The user_id of the creator of the collection.
title: str. The title of the collection.
category: str. The category this collection belongs to.
objective: str. The objective of this collection.
language_code: str. The language_code of this collection.
Returns:
Collection. The collection domain object.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
"""Creates an Oppia collection and adds a node saving the exploration
details.
Args:
collection_id: str. ID for the collection to be created.
owner_id: str. The user_id of the creator of the collection.
title: str. Title for the collection.
category: str. The category of the exploration.
objective: str. Objective for the exploration.
language_code: str. The language code for the exploration.
exploration_id: str. The exploration_id for the Oppia exploration.
end_state_name: str. The name of the end state for the exploration.
Returns:
Collection. A newly-created collection containing the corresponding
exploration details.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
# Check whether exploration with given exploration_id exists or not.
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
"""Publish the collection with the given collection_id.
Args:
owner_id: str. The user_id of the owner of the collection.
collection_id: str. ID of the collection to be published.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
"""Creates an Oppia Story and saves it.
NOTE: Callers are responsible for ensuring that the
'corresponding_topic_id' provided is valid, unless a test explicitly
requires it to be invalid.
Args:
story_id: str. ID for the story to be created.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The url fragment of the story.
meta_tag_content: str. The meta tag content of the story.
Returns:
Story. A newly-created story.
"""
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
owner_id, title, description, notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color, description=description,
title=title, language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
"""Creates an Oppia subtopic and saves it.
Args:
subtopic_id: str. ID for the subtopic to be created.
owner_id: str. The user_id of the creator of the topic.
topic_id: str. ID for the topic that the subtopic belongs to.
Returns:
SubtopicPage. A newly-created subtopic.
"""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Creates an Oppia Topic and saves it.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
thumbnail_filename: str|None. The thumbnail filename of the topic.
thumbnail_bg_color: str|None. The thumbnail background color of the
topic.
description: str. The description of the topic.
canonical_story_ids: list(str). The list of ids of canonical stories
that are part of the topic.
additional_story_ids: list(str). The list of ids of additional
stories that are part of the topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
subtopics: list(Subtopic). The different subtopics that are part of
this topic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
Returns:
Topic. A newly-created topic.
"""
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, description, canonical_story_references,
additional_story_references, uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Saves a new topic with a default version 1 subtopic data dict.
This function should only be used for creating topics in tests involving
migration of datastore topics that use an old subtopic schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating topics. This is because
the latter approach would result in a topic with the *current* subtopic
schema version.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
canonical_name: str. The canonical name (lowercase) of the topic.
description: str. The description of the topic.
thumbnail_filename: str. The thumbnail file name of the topic.
thumbnail_bg_color: str. The thumbnail background color of the
topic.
canonical_story_references: list(StoryReference). A set of story
reference objects representing the canonical stories that are
part of this topic.
additional_story_references: list(StoryReference). A set of story
reference object representing the additional stories that are
part of this topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
"""
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Creates an Oppia Question and saves it.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
question_state_data: State. The state data for the question.
linked_skill_ids: list(str). List of skill IDs linked to the
question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
Question. A newly-created question.
"""
# This needs to be done because default arguments can not be of list
# type.
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default question with a default version 27 state data
dict.
This function should only be used for creating questions in tests
involving migration of datastore questions that use an old state data
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
linked_skill_ids: list(str). The skill IDs linked to the question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
"""
# This needs to be done because default arguments can not be of list
# type.
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new question suggestion with a default version 27 state data
dict.
This function should only be used for creating question suggestion in
tests involving migration of datastore question suggestions that use an
old state data schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
"""
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
"""Creates an Oppia Skill and saves it.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
misconceptions: list(Misconception)|None. A list of Misconception
objects that contains the various misconceptions of the skill.
rubrics: list(Rubric)|None. A list of Rubric objects that contain
the rubric for each difficulty of the skill.
skill_contents: SkillContents|None. A SkillContents object
containing the explanation and examples of the skill.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
prerequisite_skill_ids: list(str)|None. The prerequisite skill IDs
for the skill.
Returns:
Skill. A newly-created skill.
"""
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default skill with the given versions for misconceptions
and skill contents.
This function should only be used for creating skills in tests involving
migration of datastore skills that use an old schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating skills. This is because
the latter approach would result in a skill with the *current* schema
version.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
next_misconception_id: int. The misconception id to be used by the
next misconception added.
misconceptions: list(Misconception.to_dict()). The list of
misconception dicts associated with the skill.
rubrics: list(Rubric.to_dict()). The list of rubric dicts associated
with the skill.
skill_contents: SkillContents.to_dict(). A SkillContents dict
containing the explanation and examples of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the rubric
object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
"""
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
"""Creates a valid question_data dict.
Args:
default_dest_state_name: str. The default destination state.
Returns:
dict. The default question_data dict.
"""
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
"""Base class for linter tests."""
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
"""Mock for python_utils.PRINT. Append the values to print to
linter_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
"""Checks to see if all of the phrases appear in at least one of the
stdout outputs.
Args:
phrases: list(str). A list of phrases we are trying to find in one
of the stdout outputs. For example, python linting outputs a
success string that includes data we don't have easy access to,
like how long the test took, so we may want to search for a
substring of that success string in stdout.
stdout: list(str). A list of the output results from the method's
execution.
"""
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
"""Assert number of expected failed checks to actual number of failed
checks.
Args:
stdout: list(str). A list of linter output messages.
expected_failed_count: int. Expected number of failed messages.
"""
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class AuditJobsTestBase(GenericTestBase):
"""Base class for audit jobs tests."""
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
"""Helper function to run job and compare output.
Args:
expected_output: list(*). The expected result of the job.
sort: bool. Whether to sort the outputs before comparison.
literal_eval: bool. Whether to use ast.literal_eval before
comparison.
"""
self.process_and_flush_pending_tasks()
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in (ast.literal_eval(value) for value in actual_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in (ast.literal_eval(value) for value in expected_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertItemsEqual(actual_output_dict, expected_output_dict)
for key in actual_output_dict:
self.assertEqual(
actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
class EmailMessageMock(python_utils.OBJECT):
"""Mock for core.platform.models email services messages."""
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Inits a mock email message with all the necessary data.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_email: str. The email address of the recipient. Must be
utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Emails
must be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'[email protected]': {'first': 'Bob', 'id': 1},
'[email protected]': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
"""
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
"""Base class for tests requiring email services."""
emails_dict = collections.defaultdict(list)
def run(self, result=None):
"""Adds a context swap on top of the test_utils.run() method so that
test classes extending GenericEmailTestBase will automatically have a
mailgun api key, mailgun domain name and mocked version of
send_email_to_recipients().
"""
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
"""Reset email dictionary for a new test."""
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Mocks sending an email to each email in recipient_emails.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Must be utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Must be
utf-8.
reply_to: str|None. Optional Argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional Argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'[email protected]': {'first': 'Bob', 'id': 1},
'[email protected]': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are sent successfully.
"""
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
"""Gets messages to a single recipient email.
Args:
to: str. The recipient email address.
Returns:
list(EmailMessageMock). The list of email messages corresponding to
that recipient email.
"""
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
"""Gets the entire messages dictionary.
Returns:
dict(str, list(EmailMessageMock)). The dict keyed by recipient
email. Each value contains a list of EmailMessageMock objects
corresponding to that recipient email; in other words, all
individual emails sent to that specific recipient email.
"""
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
"""Base class for classifier test classes that need common functions
for related to reading classifier data and mocking the flow of the
storing the trained models through post request.
This class is derived from GenericEmailTestBase because the
TrainedClassifierHandlerTests test suite requires email services test
functions in addition to the classifier functions defined below.
"""
def post_blob(self, url, payload, expected_status_int=200):
"""Post a BLOB object to the server; return the received object.
Note that this method should only be used for
classifier.TrainedClassifierHandler handler and for no one else. The
reason being, we don't have any general mechanism for security for
transferring binary data. TrainedClassifierHandler implements a
specific mechanism which is restricted to the handler.
Args:
url: str. The URL to which BLOB object in payload should be sent
through a post request.
payload: bytes. Binary data which needs to be sent.
expected_status_int: int. The status expected as a response of post
request.
Returns:
dict. Parsed JSON response received upon invoking the post request.
"""
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={b'content-type': b'application/octet-stream'})
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
# Reference URL:
# https://github.com/Pylons/webtest/blob/
# bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119 .
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
"""Retrieves classifier training job from GCS using metadata stored in
classifier_training_job.
Args:
classifier_training_job: ClassifierTrainingJob. Domain object
containing metadata of the training job which is used to
retrieve the trained model.
Returns:
FrozenModel. Protobuf object containing classifier data.
"""
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper(python_utils.OBJECT):
"""A utility for making function wrappers. Create a subclass and override
any or both of the pre_call_hook and post_call_hook methods. See these
methods for more info.
"""
def __init__(self, func):
"""Creates a new FunctionWrapper instance.
Args:
func: a callable, or data descriptor. If it's a descriptor, then
__get__ should return a bound method. For example, func can be
a function, a method, a static or class method, but not a
@property.
"""
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
"""Overrides the call method for the function to call pre_call_hook
method which would be called before the function is executed and
post_call_hook which would be called after the function is executed.
"""
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# We have to implement __get__ because otherwise, we don't have a chance
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
"""Override this to do tasks that should be executed before the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
"""
pass
def post_call_hook(self, args, result):
"""Override this to do tasks that should be executed after the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
result: *. Result returned from the function.
"""
pass
class CallCounter(FunctionWrapper):
"""A function wrapper that keeps track of how often the function is called.
Note that the counter is incremented before each call, so it is also
increased when the function raises an exception.
"""
def __init__(self, f):
"""Counts the number of times the given function has been called. See
FunctionWrapper for arguments.
"""
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
"""Property that returns the number of times the wrapped function has
been called.
Returns:
int. The number of times the wrapped function has been called.
"""
return self._times_called
def pre_call_hook(self, args):
"""Method that is called before each function call to increment the
counter tracking the number of times a function is called. This will
also be called even when the function raises an exception.
Args:
args: list(*). Set of arguments that the function accepts.
"""
self._times_called += 1
class FailingFunction(FunctionWrapper):
"""A function wrapper that makes a function fail, raising a given exception.
It can be set to succeed after a given number of calls.
"""
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
"""Create a new Failing function.
Args:
f: func. See FunctionWrapper.
exception: Exception. The exception to be raised.
num_tries_before_success: int. The number of times to raise an
exception, before a call succeeds. If this is 0, all calls will
succeed, if it is FailingFunction. INFINITY, all calls will
fail.
"""
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not (self._num_tries_before_success >= 0 or self._always_fail):
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
"""Method that is called each time before the actual function call to
check if the exception is to be raised based on the number of tries
before success.
Args:
args: list(*). Set of arguments this function accepts.
"""
self._times_called += 1
call_should_fail = (
self._num_tries_before_success >= self._times_called)
if call_should_fail or self._always_fail:
raise self._exception
|
[] |
[] |
[
"USER_IS_ADMIN",
"USER_EMAIL",
"USER_ID"
] |
[]
|
["USER_IS_ADMIN", "USER_EMAIL", "USER_ID"]
|
python
| 3 | 0 | |
cmd/freetsd-meta/run/config.go
|
package run
import (
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"github.com/BurntSushi/toml"
"github.com/freetsdb/freetsdb/logger"
"github.com/freetsdb/freetsdb/pkg/tlsconfig"
"github.com/freetsdb/freetsdb/services/meta"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
)
const (
// DefaultBindAddress is the default address for raft, snapshot, etc..
DefaultBindAddress = ":8088"
// DefaultHostname is the default hostname used if we are unable to determine
// the hostname from the system
DefaultHostname = "localhost"
)
// Config represents the configuration format for the freetsd-meta binary.
type Config struct {
Meta *meta.Config `toml:"meta"`
Logging logger.Config `toml:"logging"`
// BindAddress is the address that all TCP services use (Raft, Snapshot, etc.)
BindAddress string `toml:"bind-address"`
// Hostname is the hostname portion to use when registering local
// addresses. This hostname must be resolvable from other nodes.
Hostname string `toml:"hostname"`
// TLS provides configuration options for all https endpoints.
TLS tlsconfig.Config `toml:"tls"`
}
// NewConfig returns an instance of Config with reasonable defaults.
func NewConfig() *Config {
c := &Config{}
c.Meta = meta.NewConfig()
c.Logging = logger.NewConfig()
c.BindAddress = DefaultBindAddress
return c
}
// NewDemoConfig returns the config that runs when no config is specified.
func NewDemoConfig() (*Config, error) {
c := NewConfig()
var homeDir string
// By default, store meta and data files in current users home directory
u, err := user.Current()
if err == nil {
homeDir = u.HomeDir
} else if os.Getenv("HOME") != "" {
homeDir = os.Getenv("HOME")
} else {
return nil, fmt.Errorf("failed to determine current user for storage")
}
c.Meta.Dir = filepath.Join(homeDir, ".freetsdb/meta")
return c, nil
}
// FromTomlFile loads the config from a TOML file.
func (c *Config) FromTomlFile(fpath string) error {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return err
}
// Handle any potential Byte-Order-Marks that may be in the config file.
// This is for Windows compatibility only.
bom := unicode.BOMOverride(transform.Nop)
bs, _, err = transform.Bytes(bom, bs)
if err != nil {
return err
}
return c.FromToml(string(bs))
}
// FromToml loads the config from TOML.
func (c *Config) FromToml(input string) error {
_, err := toml.Decode(input, c)
return err
}
// Validate returns an error if the config is invalid.
func (c *Config) Validate() error {
if err := c.Meta.Validate(); err != nil {
return err
}
if err := c.TLS.Validate(); err != nil {
return err
}
return nil
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
tj-api.py
|
import requests
import os
def setup_tj_api():
tj_token = os.environ['TJ_TOKEN']
tj_api_tj_api_address = 'https://api.tjournal.ru/v1.6/'
device_token_header_name = 'X-Device-Token'
def add_comment(entry_id, attachments):
response = requests.post(tj_api_address + 'comment/add',
data = {'id': entry_id, 'reply_to':0, 'text':'[я](https://ya.ru/) 😡', 'attachments':attachments},
headers = {device_token_header_name: tj_token})
if response.status_code != 200:
print (response.status_code)
print (response.text)
raise BaseException("error add comment")
def upload_attachment(url):
response = requests.post(tj_api_address + 'uploader/extract',
data = {'url': url},
headers = {device_token_header_name: tj_token})
if response.status_code != 200:
print (response.status_code)
print (response.text)
raise BaseException("error upload attachment")
attachment_fixed_format = str(response.json()['result']).replace('\'','"')
return attachment_fixed_format
def create_entry(attachment):
entry = {
'title': 'Тестовый заголовок',
'subsite_id': 237791,
'text':'текст текст текст текст',
'attachments':attachment}
response = requests.post(tj_api_address + 'entry/create',
data = entry,
headers = {device_token_header_name: tj_token})
if response.status_code != 200:
print (response.status_code)
print (response.text)
raise BaseException("error create entry")
#print(response.json()['result'])
return response.json()['result']['id']
def get_coub():
coub_api_address = 'https://coub.com/api/v2/search/coubs?q=animals&order_by=newest_popular'
response = requests.get(coub_api_address)
if response.status_code != 200:
print (response.status_code)
print (response.text)
raise BaseException("get coub error")
return "https://coub.com/view/" + response.json()['coubs'][0]['permalink']
#setup_tj_api()
#attachment = upload_attachment('https://coub.com/view/1mnj6k')
#entry_id = create_entry(attachment)
#add_comment(entry_id, attachment)
#print("entry ID -", entry_id)
coub_url = get_coub()
print(coub_url)
|
[] |
[] |
[
"TJ_TOKEN"
] |
[]
|
["TJ_TOKEN"]
|
python
| 1 | 0 | |
actions/qemuimg2disk/v1/partprobe.go
|
package main
import (
"fmt"
"os"
"golang.org/x/sys/unix"
)
func main() {
disk := os.Getenv("DEST_DISK")
fileOut, err := os.OpenFile(disk, os.O_CREATE|os.O_WRONLY, 0o644)
defer func() { _ = fileOut.Close() }()
if err != nil {
fmt.Printf("unable to open the target disk %s: %v\n", disk, err)
return
}
// Do the equivalent of partprobe on the device
if err := fileOut.Sync(); err != nil {
fmt.Printf("failed to sync the block device: %v\n", err)
return
}
if err := unix.IoctlSetInt(int(fileOut.Fd()), unix.BLKRRPART, 0); err != nil {
fmt.Printf("error re-probing the partitions for the specified device: %v\n", err)
return
}
}
|
[
"\"DEST_DISK\""
] |
[] |
[
"DEST_DISK"
] |
[]
|
["DEST_DISK"]
|
go
| 1 | 0 | |
pictor/wsgi.py
|
"""
WSGI config for pictor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pictor.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scripts/insert2db/reputation/plugins/dshield_medium.py
|
import sys
import os
import configparser
import requests
import pandas as pd
import hashlib
from io import StringIO
from datetime import datetime, timezone
## Django Setup
import django
import pymysql
pymysql.install_as_MySQLdb()
conffile = os.path.join(os.path.dirname(__file__), "../../conf/insert2db.conf")
conf = configparser.SafeConfigParser()
conf.read(conffile)
sys.path.append(conf.get('exist', 'syspath'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings')
django.setup()
from apps.reputation.models import blacklist
import django.utils.timezone as tzone
from django.db import IntegrityError
## Logger Setup
from logging import getLogger, DEBUG, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
logger.setLevel(DEBUG)
logger.propagate = True
DataDir = os.path.join(os.path.dirname(__file__), '../data/')
class Tracker():
def __init__(self):
self.name = 'Dshield_Medium'
self.ID = 222
self.URL = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt'
self.DataFilePath = DataDir + 'dshield/suspiciousdomains_Medium.txt'
self.header = [
'domain',
]
def cmpFiles(self, oldfile, newtext):
diffline = ''
if not os.path.exists(oldfile):
f = open(oldfile, 'w')
f.close()
oldsets = set(open(oldfile).readlines())
newsets = set(newtext.replace('\r\n','\n').splitlines(True))
results = newsets.difference(oldsets)
for result in results:
diffline += result
return diffline[:-1]
def delComment(self, s):
result = ''
for line in s.splitlines(True):
if not line.startswith('#') \
and line != "Site\n":
result += line
return result
def makeDataframe(self):
df = pd.DataFrame()
newline = ''
try:
res = requests.get(self.URL)
if res.status_code != 200:
return df
newline = self.cmpFiles(self.DataFilePath, res.text)
newline = self.delComment(newline)
except Exception as e:
logger.error(e)
if not newline == '':
open(self.DataFilePath, 'w').write(res.text)
df = pd.read_csv(StringIO(newline), names=self.header)
return df
def parse(self):
logger.info("start parsing: %s", self.name)
df = self.makeDataframe()
queries = []
if not df.empty:
for i, v in df.iterrows():
line = str(self.ID) + ","
line += str(v.values)
md5 = hashlib.md5(line.encode('utf-8')).hexdigest()
try:
query = blacklist(
id = md5,
domain = v.domain,
datetime = tzone.now(),
source = self.ID,
referrer = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt',
)
except Exception as e:
logger.error("%s: %s", e, line)
queries.append(query)
else:
logger.info("no update")
logger.info("done parsing: %s, %s queries were parsed", self.name, len(queries))
return queries
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/demo/charts/wsgi.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "charts.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tractags/web_ui.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Alec Thomas <[email protected]>
# Copyright (C) 2008 Dmitry Dianov
# Copyright (C) 2011 Itamar Ostricher <[email protected]>
# Copyright (C) 2011-2012 Ryan J Ollos <[email protected]>
# Copyright (C) 2011-2014 Steffen Hoffmann <[email protected]>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import re
import math
from genshi.builder import tag as builder
from genshi.core import Markup
from genshi.filters.transform import Transformer
from trac import __version__ as trac_version
from trac.config import BoolOption, ListOption, Option
from trac.core import implements
from trac.mimeview import Context
from trac.resource import Resource, ResourceSystem, get_resource_name
from trac.resource import get_resource_url
from trac.timeline.api import ITimelineEventProvider
from trac.util import to_unicode
from trac.util.text import CRLF, javascript_quote, unicode_quote_plus
from trac.web import IRequestFilter
from trac.web.api import IRequestHandler, ITemplateStreamFilter
from trac.web.chrome import Chrome, INavigationContributor
from trac.web.chrome import add_ctxtnav, add_script, add_stylesheet
from trac.web.chrome import add_warning
from trac.wiki.formatter import Formatter
from trac.wiki.model import WikiPage
from tractags.api import REALM_RE, TagSystem, _, tag_, tagn_
from tractags.macros import TagTemplateProvider, TagWikiMacros, as_int
from tractags.macros import query_realms
from tractags.model import tag_changes
from tractags.query import InvalidQuery, Query
from tractags.util import split_into_tags
class TagInputAutoComplete(TagTemplateProvider):
"""[opt] Provides auto-complete functionality for tag input fields.
This module is based on KeywordSuggestModule from KeywordSuggestPlugin
0.5dev.
"""
implements (IRequestFilter, ITemplateStreamFilter)
field_opt = Option('tags', 'complete_field', 'keywords',
"Ticket field to which a drop-down tag list should be attached.")
help_opt = Option('tags','ticket_help', None,
"If specified, 'keywords' label on ticket view will be turned into a "
"link to this URL.")
helpnewwindow_opt = BoolOption('tags','ticket_help_newwindow', False,
"If true and keywords_help specified, wiki page will open in a new "
"window. Default is false.")
# Needs to be reimplemented, refs th:#8141.
#mustmatch = BoolOption('tags', 'complete_mustmatch', False,
# "If true, input fields accept values from the word list only.")
matchcontains_opt = BoolOption('tags','complete_matchcontains', True,
"Include partial matches in suggestion list. Default is true.")
separator_opt = Option('tags','separator', ' ',
"Character(s) to use as separators between tags. Default is a "
"single whitespace.")
sticky_tags_opt = ListOption('tags', 'complete_sticky_tags', '', ',',
doc="A list of comma separated values available for input.")
def __init__(self):
self.tags_enabled = self.env.is_enabled(TagSystem)
@property
def separator(self):
return self.separator_opt.strip('\'') or ' '
# IRequestFilter methods
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if template is not None and \
(req.path_info.startswith('/ticket/') or
req.path_info.startswith('/newticket') or
(self.tags_enabled and req.path_info.startswith('/wiki/'))):
# In Trac 1.0 and later, jQuery-UI is included from the core.
if trac_version >= '1.0':
Chrome(self.env).add_jquery_ui(req)
else:
add_script(req, 'tags/js/jquery-ui-1.8.16.custom.min.js')
add_stylesheet(req, 'tags/css/jquery-ui-1.8.16.custom.css')
return template, data, content_type
# ITemplateStreamFilter method
def filter_stream(self, req, method, filename, stream, data):
if not (filename == 'ticket.html' or
(self.tags_enabled and filename == 'wiki_edit.html')):
return stream
keywords = self._get_keywords_string(req)
if not keywords:
self.log.debug(
"No keywords found. TagInputAutoComplete is disabled.")
return stream
matchfromstart = '"^" +'
if self.matchcontains_opt:
matchfromstart = ''
js = """
jQuery(document).ready(function($) {
var keywords = [ %(keywords)s ]
var sep = '%(separator)s'.trim() + ' '
function split( val ) {
return val.split( /%(separator)s\s*|\s+/ );
}
function extractLast( term ) {
return split( term ).pop();
}
$('%(field)s')
// don't navigate away from field on tab when selecting
// an item
.bind( "keydown", function( event ) {
if ( event.keyCode === $.ui.keyCode.TAB &&
$( this ).data( "autocomplete" ).menu.active ) {
event.preventDefault();
}
})
.autocomplete({
delay: 0,
minLength: 0,
source: function( request, response ) {
// delegate back to autocomplete, but extract
// the last term
response( $.ui.autocomplete.filter(
keywords, extractLast( request.term ) ) );
},
focus: function() {
// prevent value inserted on focus
return false;
},
select: function( event, ui ) {
var terms = split( this.value );
// remove the current input
terms.pop();
// add the selected item
terms.push( ui.item.value );
// add placeholder to get the comma-and-space at
// the end
terms.push( "" );
this.value = terms.join( sep );
return false;
}
});
});"""
# Inject transient part of JavaScript into ticket.html template.
if req.path_info.startswith('/ticket/') or \
req.path_info.startswith('/newticket'):
js_ticket = js % {'field': '#field-' + self.field_opt,
'keywords': keywords,
'matchfromstart': matchfromstart,
'separator': self.separator}
stream = stream | Transformer('.//head').append\
(builder.script(Markup(js_ticket),
type='text/javascript'))
# Turn keywords field label into link to an arbitrary resource.
if self.help_opt:
link = self._get_help_link(req)
if self.helpnewwindow_opt:
link = builder.a(href=link, target='blank')
else:
link = builder.a(href=link)
stream = stream | Transformer\
('//label[@for="field-keywords"]/text()').wrap(link)
# Inject transient part of JavaScript into wiki.html template.
elif self.tags_enabled and req.path_info.startswith('/wiki/'):
js_wiki = js % {'field': '#tags',
'keywords': keywords,
'matchfromstart': matchfromstart,
'separator': self.separator}
stream = stream | Transformer('.//head').append \
(builder.script(Markup(js_wiki),
type='text/javascript'))
return stream
# Private methods
def _get_keywords_string(self, req):
keywords = set(self.sticky_tags_opt) # prevent duplicates
if self.tags_enabled:
# Use TagsPlugin >= 0.7 performance-enhanced API.
tags = TagSystem(self.env).get_all_tags(req)
keywords.update(tags.keys())
if keywords:
keywords = sorted(keywords)
keywords = ','.join(("'%s'" % javascript_quote(_keyword)
for _keyword in keywords))
else:
keywords = ''
return keywords
def _get_help_link(self, req):
link = realm = resource_id = None
if self.help_opt.startswith('/'):
# Assume valid URL to arbitrary resource inside
# of the current Trac environment.
link = req.href(self.help_opt)
if not link and ':' in self.help_opt:
realm, resource_id = self.help_opt.split(':', 1)
# Validate realm-like prefix against resource realm list,
# but exclude 'wiki' to allow deferred page creation.
rsys = ResourceSystem(self.env)
if realm in set(rsys.get_known_realms()) - set('wiki'):
mgr = rsys.get_resource_manager(realm)
# Handle optional IResourceManager method gracefully.
try:
if mgr.resource_exists(Resource(realm, resource_id)):
link = mgr.get_resource_url(resource_id, req.href)
except AttributeError:
# Assume generic resource URL build rule.
link = req.href(realm, resource_id)
if not link:
if not resource_id:
# Assume wiki page name for backwards-compatibility.
resource_id = self.help_opt
# Preserve anchor without 'path_safe' arg (since Trac 0.12.2dev).
if '#' in resource_id:
path, anchor = resource_id.split('#', 1)
else:
anchor = None
path = resource_id
if hasattr(unicode_quote_plus, "safe"):
# Use method for query string quoting (since Trac 0.13dev).
anchor = unicode_quote_plus(anchor, safe="?!~*'()")
else:
anchor = unicode_quote_plus(anchor)
link = '#'.join([req.href.wiki(path), anchor])
return link
class TagRequestHandler(TagTemplateProvider):
"""[main] Implements the /tags handler."""
implements(INavigationContributor, IRequestHandler)
cloud_mincount = Option('tags', 'cloud_mincount', 1,
doc="""Integer threshold to hide tags with smaller count.""")
default_cols = Option('tags', 'default_table_cols', 'id|description|tags',
doc="""Select columns and order for table format using a "|"-separated
list of column names.
Supported columns: realm, id, description, tags
""")
default_format = Option('tags', 'default_format', 'oldlist',
doc="""Set the default format for the handler of the `/tags` domain.
|| `oldlist` (default value) || The original format with a
bulleted-list of "linked-id description (tags)" ||
|| `compact` || bulleted-list of "linked-description" ||
|| `table` || table... (see corresponding column option) ||
""")
exclude_realms = ListOption('tags', 'exclude_realms', [],
doc="""Comma-separated list of realms to exclude from tags queries
by default, unless specifically included using "realm:realm-name"
in a query.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
if 'TAGS_VIEW' in req.perm:
return 'tags'
def get_navigation_items(self, req):
if 'TAGS_VIEW' in req.perm:
label = tag_("Tags")
yield ('mainnav', 'tags',
builder.a(label, href=req.href.tags(), accesskey='T'))
# IRequestHandler methods
def match_request(self, req):
return req.path_info.startswith('/tags')
def process_request(self, req):
req.perm.require('TAGS_VIEW')
match = re.match(r'/tags/?(.*)', req.path_info)
tag_id = match.group(1) and match.group(1) or None
query = req.args.get('q', '')
# Consider only providers, that are permitted for display.
tag_system = TagSystem(self.env)
all_realms = tag_system.get_taggable_realms(req.perm)
if not (tag_id or query) or [r for r in all_realms
if r in req.args] == []:
for realm in all_realms:
if not realm in self.exclude_realms:
req.args[realm] = 'on'
checked_realms = [r for r in all_realms if r in req.args]
if query:
# Add permitted realms from query expression.
checked_realms.extend(query_realms(query, all_realms))
realm_args = dict(zip([r for r in checked_realms],
['on' for r in checked_realms]))
# Switch between single tag and tag query expression mode.
if tag_id and not re.match(r"""(['"]?)(\S+)\1$""", tag_id, re.UNICODE):
# Convert complex, invalid tag ID's --> query expression.
req.redirect(req.href.tags(realm_args, q=tag_id))
elif query:
single_page = re.match(r"""(['"]?)(\S+)\1$""", query, re.UNICODE)
if single_page:
# Convert simple query --> single tag.
req.redirect(req.href.tags(single_page.group(2), realm_args))
data = dict(page_title=_("Tags"), checked_realms=checked_realms)
# Populate the TagsQuery form field.
data['tag_query'] = tag_id and tag_id or query
data['tag_realms'] = list(dict(name=realm,
checked=realm in checked_realms)
for realm in all_realms)
if tag_id:
data['tag_page'] = WikiPage(self.env,
tag_system.wiki_page_prefix + tag_id)
if query or tag_id:
macro = 'ListTagged'
# TRANSLATOR: The meta-nav link label.
add_ctxtnav(req, _("Back to Cloud"), req.href.tags())
args = "%s,format=%s,cols=%s" % \
(tag_id and tag_id or query, self.default_format,
self.default_cols)
data['mincount'] = None
else:
macro = 'TagCloud'
mincount = as_int(req.args.get('mincount', None),
self.cloud_mincount)
args = mincount and "mincount=%s" % mincount or None
data['mincount'] = mincount
formatter = Formatter(self.env, Context.from_request(req,
Resource('tag')))
self.env.log.debug("%s macro arguments: %s", macro,
args and args or '(none)')
macros = TagWikiMacros(self.env)
try:
# Query string without realm throws 'NotImplementedError'.
data['tag_body'] = checked_realms and \
macros.expand_macro(formatter, macro, args,
realms=checked_realms) \
or ''
except InvalidQuery, e:
data['tag_query_error'] = to_unicode(e)
data['tag_body'] = macros.expand_macro(formatter, 'TagCloud', '')
add_stylesheet(req, 'tags/css/tractags.css')
return 'tag_view.html', data, None
class TagTimelineEventFilter(TagTemplateProvider):
"""[opt] Filters timeline events by tags associated with listed resources
mentioned in the event.
"""
implements(IRequestFilter, ITemplateStreamFilter)
key = 'tag_query'
# ITemplateStreamFilter method
def filter_stream(self, req, method, filename, stream, data):
if req.path_info == '/timeline':
insert = builder(Markup('<br />'), tag_("matching tags "),
builder.input(type='text', name=self.key,
value=data.get(self.key)))
xpath = '//form[@id="prefs"]/div[1]'
stream = stream | Transformer(xpath).append(insert)
return stream
# IRequestFilter methods
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if data and req.path_info == '/timeline' and \
'TAGS_VIEW' in req.perm(Resource('tags')):
def realm_handler(_, node, context):
return query.match(node, [context.realm])
query_str = req.args.get(self.key)
if query_str is None and req.args.get('format') != 'rss':
query_str = req.session.get('timeline.%s' % self.key)
else:
query_str = (query_str or '').strip()
# Record tag query expression between visits.
req.session['timeline.%s' % self.key] = query_str
if data.get('events') and query_str:
tag_system = TagSystem(self.env)
try:
query = Query(query_str,
attribute_handlers=dict(realm=realm_handler)
)
except InvalidQuery, e:
add_warning(req, _("Tag query syntax error: %s"
% to_unicode(e)))
else:
all_realms = tag_system.get_taggable_realms(req.perm)
query_realms = set()
for m in REALM_RE.finditer(query.as_string()):
query_realms.add(m.group(1))
# Don't care about resources from non-taggable realms.
realms = not query_realms and all_realms or \
query_realms.intersection(all_realms)
events = []
self.log.debug("Filtering timeline events by tags '%s'",
query_str)
for event in data['events']:
resource = event['data'][0]
if resource.realm in realms:
# Shortcut view permission checks here.
tags = tag_system.get_tags(None, resource)
if query(tags, context=resource):
events.append(event)
# Overwrite with filtered list.
data['events'] = events
if query_str:
# Add current value for next form rendering.
data[self.key] = query_str
elif self.key in req.session:
del req.session[self.key]
return template, data, content_type
class TagTimelineEventProvider(TagTemplateProvider):
"""[opt] Delivers recorded tag change events to timeline view."""
implements(ITimelineEventProvider)
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'TAGS_VIEW' in req.perm('tags'):
yield ('tags', _("Tag changes"))
def get_timeline_events(self, req, start, stop, filters):
if 'tags' in filters:
tags_realm = Resource('tags')
if not 'TAGS_VIEW' in req.perm(tags_realm):
return
add_stylesheet(req, 'tags/css/tractags.css')
for time, author, tagspace, name, old_tags, new_tags in \
tag_changes(self.env, None, start, stop):
tagged_resource = Resource(tagspace, name)
if 'TAGS_VIEW' in req.perm(tagged_resource):
yield ('tags', time, author,
(tagged_resource, old_tags, new_tags), self)
def render_timeline_event(self, context, field, event):
resource = event[3][0]
if field == 'url':
return get_resource_url(self.env, resource, context.href)
elif field == 'title':
name = builder.em(get_resource_name(self.env, resource))
return tag_("Tag change on %(resource)s", resource=name)
elif field == 'description':
return render_tag_changes(event[3][1], event[3][2])
def render_tag_changes(old_tags, new_tags):
old_tags = split_into_tags(old_tags or '')
new_tags = split_into_tags(new_tags or '')
added = sorted(new_tags - old_tags)
added = added and \
tagn_("%(tags)s added", "%(tags)s added",
len(added), tags=builder.em(', '.join(added)))
removed = sorted(old_tags - new_tags)
removed = removed and \
tagn_("%(tags)s removed", "%(tags)s removed",
len(removed), tags=builder.em(', '.join(removed)))
# TRANSLATOR: How to delimit added and removed tags.
delim = added and removed and _("; ")
return builder(builder.strong(_("Tags")), ' ', added,
delim, removed)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
add.go
|
// This script will add the joebunyan slack bot to every public channel in the hackclub slack org
package main
import (
"encoding/json"
"errors"
"io"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
)
func main() {
client := &http.Client{}
token := "Bearer " + os.Getenv("ADD_OAUTH_TOKEN")
channels, err := getChannels(client, token)
if err != nil {
log.Fatal(err)
}
err = joinChannels(client, channels, token)
if err != nil {
log.Fatal(err)
}
}
func getChannels(client *http.Client, token string) (map[string]string, error) {
channels := map[string]string{}
cursor := ""
for {
req, err := http.NewRequest(
"GET",
"https://slack.com/api/conversations.list?exclude_archived=true&types=public_channel&cursor="+url.QueryEscape(
cursor,
),
strings.NewReader(""),
)
if err != nil {
return map[string]string{}, err
}
req.Header.Add("Authorization", token)
res, err := client.Do(req)
if err != nil {
return map[string]string{}, err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return map[string]string{}, err
}
var data struct {
OK bool
Error string
Channels []struct {
ID string
Name string
IsShared bool `json:"is_shared"`
}
ResponseMetadata struct {
NextCursor string `json:"next_cursor"`
} `json:"response_metadata"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return map[string]string{}, err
}
if !data.OK {
if data.Error == "ratelimited" {
log.Println("Currently ratelimited. Resting for one minute")
time.Sleep(time.Minute)
continue
}
return map[string]string{}, errors.New(
"Data returned non OK from slack API: " + string(body),
)
}
if data.ResponseMetadata.NextCursor == "" {
break
}
for _, channel := range data.Channels {
if !channel.IsShared {
channels[channel.ID] = channel.Name
}
}
log.Printf("Got %v channels so far\n", len(channels))
cursor = data.ResponseMetadata.NextCursor
}
return channels, nil
}
func joinChannels(client *http.Client, channels map[string]string, token string) error {
stageAdded := 0
added := 0
for id, name := range channels {
if stageAdded >= 50 {
log.Println("Sleeping for 1 minute to prevent rate limiting")
time.Sleep(time.Minute)
stageAdded = 0
}
req, err := http.NewRequest("POST", "https://slack.com/api/conversations.join?channel="+id,
strings.NewReader(""),
)
if err != nil {
return err
}
req.Header.Add("Authorization", token)
res, err := client.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var data struct {
OK bool
Error string
}
err = json.Unmarshal(body, &data)
if err != nil {
return err
}
if data.Error != "" || !data.OK {
return errors.New(data.Error)
}
added++
stageAdded++
log.Printf("Added to #%v - %v/%v\n", name, added, len(channels))
}
return nil
}
|
[
"\"ADD_OAUTH_TOKEN\""
] |
[] |
[
"ADD_OAUTH_TOKEN"
] |
[]
|
["ADD_OAUTH_TOKEN"]
|
go
| 1 | 0 | |
pkg/utils/resolveAdmin.go
|
package utils
import (
"errors"
"os"
"strings"
"github.com/Bendomey/goutilities/pkg/validatetoken"
"github.com/dgrijalva/jwt-go"
"github.com/graphql-go/graphql"
)
//AdminFromToken unmarshals cliams from jwt to get admin id
type AdminFromToken struct {
ID string `json:"id"`
Role string `json:"role"`
}
func extractAdminToken(unattendedToken string) (string, error) {
//remove bearer
strArr := strings.Split(unattendedToken, " ")
if len(strArr) != 2 {
return "", errors.New("AuthorizationFailed")
}
return strArr[1], nil
}
func validateAdmin(unattendedToken string) (*AdminFromToken, error) {
//extract token
token, extractTokenErr := extractAdminToken(unattendedToken)
if extractTokenErr != nil {
return nil, extractTokenErr
}
//extract token metadata
rawToken, validateError := validatetoken.ValidateJWTToken(token, os.Getenv("ADMIN_SECRET"))
if validateError != nil {
return nil, errors.New("AuthorizationFailed")
}
claims, ok := rawToken.Claims.(jwt.MapClaims)
var adminFromTokenImplementation AdminFromToken
if ok && rawToken.Valid {
adminFromTokenImplementation.ID = claims["id"].(string)
adminFromTokenImplementation.Role = claims["role"].(string)
}
//check if its exists in db
// _, err := userService.GetUser(ctx, userFromTokenImplementation.ID)
// if err != nil {
// return nil, err
// }
return &adminFromTokenImplementation, nil
}
type manipulateAnythingFromAdmin func(params graphql.ResolveParams, adminData *AdminFromToken) (interface{}, error)
// AuthenticateAdmin checks if the user trying to access that resource is truly an Admin
func AuthenticateAdmin(fn manipulateAnythingFromAdmin) func(params graphql.ResolveParams) (interface{}, error) {
return func(params graphql.ResolveParams) (interface{}, error) {
token, tokenExtractionErr := GetContextInjected(params.Context)
if tokenExtractionErr != nil {
return nil, tokenExtractionErr
}
validated, validateError := validateAdmin(token)
if validateError != nil {
return nil, validateError
}
return fn(params, validated)
}
}
|
[
"\"ADMIN_SECRET\""
] |
[] |
[
"ADMIN_SECRET"
] |
[]
|
["ADMIN_SECRET"]
|
go
| 1 | 0 | |
calvin/runtime/south/plugins/async/twistedimpl/tests/test_server_connection.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.async import async
from calvin.runtime.south.plugins.async import server_connection
from calvin.runtime.south.plugins.async import threads
from calvin.utilities.calvinlogger import get_logger
import pytest
import socket
_log = get_logger(__name__)
def data_available(conn):
first_print = True
while conn.data_available is False:
if first_print:
print "waiting for conn.data_available ... ",
first_print = False
print ""
return True
def connection_made(factory):
first_print = True
while not factory.connections:
if first_print:
print "waiting for connection ... ",
first_print = False
print ""
return True
def hundred_connection_made(factory):
first_print = True
while not len(factory.connections) == 100:
if first_print:
print "waiting for 100 connection ... ",
first_print = False
print ""
return True
def no_more_connections(factory):
first_print = True
while factory.connections:
if first_print:
print "waiting for connections to close ... ",
first_print = False
print ""
return True
def print_header(string):
_log.info("\n\n### %s ###", string)
# Stub
class Scheduler_stub(object):
def trigger_loop(self):
""" Trigger the loop_once """
async.DelayedCall(0, self.trigger_loop)
return
@pytest.mark.essential
class TestServer(object):
@pytest.inlineCallbacks
def test_default_line_mode(self):
print_header("TEST_DEFAULT_LINE_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert not self.factory.connections
assert not self.factory.pending_connections
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
assert self.factory.pending_connections
_, self.conn = self.factory.accept()
####################################################################
####################################################################
print_header("Test_Line_Received")
####################################################################
assert self.conn.data_available is False
yield threads.defer_to_thread(self.client_socket.send, "sending string \r\n")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "sending string "
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.inlineCallbacks
def test_args_in_line_mode(self):
print_header("TEST_ARGS_IN_LINE_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, delimiter='end', max_length=3)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
_, self.conn = self.factory.accept()
print_header("Test_Short_Line_Received")
####################################################################
yield threads.defer_to_thread(self.client_socket.send, "123end")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "123"
print_header("Test_Long_Line_Received")
####################################################################
yield threads.defer_to_thread(self.client_socket.send, "1234end")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "1234"
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.inlineCallbacks
def test_raw_mode(self):
print_header("TEST_RAW_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, mode='raw', max_length=10)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
assert self.factory.pending_connections
_, self.conn = self.factory.accept()
assert not self.factory.pending_connections
print_header("Test_Data_Received")
####################################################################
assert self.conn.data_available is False
yield threads.defer_to_thread(self.client_socket.send, "abcdefghijklmnopqrstuvxyz123456789")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "abcdefghij"
assert self.conn.data_get() == "klmnopqrst"
assert self.conn.data_get() == "uvxyz12345"
assert self.conn.data_get() == "6789"
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.inlineCallbacks
def test_many_clients(self):
print_header("TEST_MANY_CLIENTS")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, mode='raw', max_length=10)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
clients = []
for i in range(100):
clients.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
for c in clients:
yield threads.defer_to_thread(c.connect, ('localhost', 8123))
yield threads.defer_to_thread(hundred_connection_made, self.factory)
assert len(self.factory.pending_connections) == 100
for i in range(100):
_, self.conn = self.factory.accept()
assert not self.factory.pending_connections
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
kensu/utils/kensu.py
|
import datetime
import getpass
import json
import logging
import os
import time
from kensu.client import *
from kensu.utils.dsl.extractors.external_lineage_dtos import KensuDatasourceAndSchema
from kensu.utils.dsl import mapping_strategies
from kensu.utils.dsl.extractors import Extractors
from kensu.utils.dsl.lineage_builder import LineageBuilder
from kensu.utils.helpers import to_hash_key
from kensu.utils.injection import Injection
from kensu.pandas import DataFrame,Series
class Kensu(object):
UNKNOWN_PHYSICAL_LOCATION = PhysicalLocation(name="Unknown", lat=0.12341234, lon=0.12341234,
pk=PhysicalLocationPK(city="Unknown", country="Unknown"))
@staticmethod
def discover_user_name():
return getpass.getuser()
@staticmethod
def get_git_repo():
cur_dir = os.path.realpath(os.path.curdir)
try:
import git
try:
git_repo = git.Repo(cur_dir, search_parent_directories=True)
return git_repo
except git.GitError as e:
logging.warn("kensu-py was unable to identify a git repo. The working dir is not a git repo?")
pass
except ImportError as e:
logging.warn("Install GitPython for a maximum context about the GIT code repo if any")
pass
@staticmethod
def discover_code_location():
cur_dir = os.path.realpath(os.path.curdir)
code_location = cur_dir
git_repo = Kensu.get_git_repo()
if git_repo is not None:
remote = git_repo.remote()
code_location = next(remote.urls, code_location)
return code_location
@staticmethod
def discover_code_version():
code_version = datetime.datetime.now().isoformat()
git_repo = Kensu.get_git_repo()
if git_repo is not None:
code_version = git_repo.head.commit.hexsha
if git_repo.is_dirty():
code_version = code_version + " (dirty)"
return code_version
def get_conf_path(self, default = "conf.ini"):
return os.environ["CONF_FILE"] if "CONF_FILE" in os.environ else default
def __init__(self, api_url=None, auth_token=None, process_name=None,
user_name=None, code_location=None, init_context=True,
do_report=None, report_to_file=None, offline_file_name=None, reporter=None, **kwargs):
"""
"""
from configparser import ConfigParser, ExtendedInterpolation
config = ConfigParser(interpolation=ExtendedInterpolation())
# TODO... path to conf there are so many args in the function here, so adding it will require a good migration plan (it doesn't land in kwargs...)
config.read(self.get_conf_path("conf.ini"))
kensu_conf = config['kensu'] if config.has_section('kensu') else config['DEFAULT']
self.conf = kensu_conf
kensu_host = self.get_kensu_host(api_url)
if kensu_host is None:
kensu_host = kensu_conf.get("api_url")
if auth_token is None:
kensu_auth_token = kensu_conf.get("api_token")
else:
kensu_auth_token = auth_token
def kwargs_or_conf_or_default(key, default, kw=kwargs, conf=kensu_conf):
if key in kw and kw[key] is not None:
return kw[key]
elif key in conf and conf.get(key) is not None:
r = conf.get(key)
if isinstance(default, list):
r = r.replace(" ","").split(",")
elif isinstance(default, bool):
r = conf.getboolean(key)
return r
else:
return default
self.extractors = Extractors()
pandas_support = kwargs_or_conf_or_default("pandas_support", True)
sklearn_support = kwargs_or_conf_or_default("sklearn_support", True)
bigquery_support = kwargs_or_conf_or_default("bigquery_support", False)
tensorflow_support = kwargs_or_conf_or_default("tensorflow_support", False)
self.extractors.add_default_supports(pandas_support=pandas_support, sklearn_support=sklearn_support,bigquery_support=bigquery_support,tensorflow_support=tensorflow_support)
project_names = kwargs_or_conf_or_default("project_names", [])
environment = kwargs_or_conf_or_default("environment", None)
timestamp = kwargs_or_conf_or_default("timestamp", None)
logical_naming = kwargs_or_conf_or_default("logical_naming", None)
mapping = kwargs_or_conf_or_default("mapping", None)
report_in_mem = kwargs_or_conf_or_default("report_in_mem", False)
if "get_code_version" in kwargs and kwargs["get_code_version"] is not None:
get_code_version = kwargs["get_code_version"]
else:
get_code_version = Kensu.discover_code_version
def default_if_arg_none(arg, default):
if arg is None:
return default
else:
return arg
process_name = default_if_arg_none(process_name, kensu_conf.get("process_name"))
user_name = default_if_arg_none(user_name, kensu_conf.get("user_name"))
code_location = default_if_arg_none(code_location, kensu_conf.get("code_location"))
do_report = default_if_arg_none(do_report, kensu_conf.getboolean("do_report", True))
report_to_file = default_if_arg_none(report_to_file, kensu_conf.getboolean("report_to_file", False))
offline_file_name = default_if_arg_none(offline_file_name, kensu_conf.get("offline_file_name", None))
self.kensu_api = KensuEntitiesApi()
self.kensu_api.api_client.host = kensu_host
self.kensu_api.api_client.default_headers["X-Auth-Token"] = kensu_auth_token
# add function to Kensu entities
injection = Injection()
injection.set_reporter(reporter)
injection.set_do_report(do_report, offline_file_name=offline_file_name, report_to_file=report_to_file)
injection.set_kensu_api(self.kensu_api)
self.logical_naming = logical_naming
self.mapping = mapping
self.report_in_mem = report_in_mem
self.set_default_physical_location(Kensu.UNKNOWN_PHYSICAL_LOCATION)
# can be updated using set_default_physical_location
self.init_context(process_name=process_name, user_name=user_name, code_location=code_location,
get_code_version=get_code_version, project_names=project_names, environment=environment, timestamp=timestamp)
# sets the api url using host if passed, otherwise gets KENSU_API_URL
def get_kensu_host(self, host=None):
if host is None:
if "KENSU_API_URL" in os.environ:
kensu_host = os.environ["KENSU_API_URL"]
else:
kensu_host = None
else:
kensu_host = host
return kensu_host
def register_schema_name(self, ds, schema):
name = ds.name
if "in-mem" in name and ds.format is not None:
name = name + " of format=" + str(ds.format or '?')
self.schema_name_by_guid[schema.to_guid()] = name
return schema
def to_schema_name(self, s_guid):
return self.schema_name_by_guid.get(s_guid) or s_guid
def to_schema_names(self, s_guids):
return [self.to_schema_name(s_guid) for s_guid in s_guids]
def init_context(self, process_name=None, user_name=None, code_location=None, get_code_version=None, project_names=None,environment=None,timestamp=None):
# list of triples i, o, mapping strategy
# i and o are either one or a list of triples (object, DS, SC)
self.dependencies = []
self.dependencies_mapping = []
self.dependencies_per_columns = {}
self.real_schema_df = {}
self.schema_name_by_guid = {}
self.sent_runs = []
self.data_collectors = {}
self.model={}
self.set_timestamp(timestamp)
self.inputs_ds = []
self.write_reinit = False
if user_name is None:
user_name = Kensu.discover_user_name()
if code_location is None:
code_location = Kensu.discover_code_location()
self.user = User(pk=UserPK(user_name))._report()
self.code_base = CodeBase(pk=CodeBasePK(code_location))._report()
if get_code_version is None:
if timestamp is not None: # this is weird though...
version = datetime.datetime.fromtimestamp(timestamp/1000).isoformat()
else:
version = Kensu.discover_code_version()
else:
version = get_code_version()
self.code_version = CodeVersion(maintainers_refs=[self.user.to_ref()],
pk=CodeVersionPK(version=version,
codebase_ref=self.code_base.to_ref()))._report()
if process_name is None:
if "__file__" in globals():
process_name = os.path.basename(os.path.realpath(__file__))
else:
raise Exception("Can't determine `process_name`, maybe is this running from a Notebook?")
self.process = Process(pk=ProcessPK(qualified_name=process_name))._report()
if project_names is None:
self.project_refs = []
else:
self.project_refs = [Project(pk=ProjectPK(name=n))._report().to_ref() for n in project_names]
process_run_name = process_name + "@" + datetime.datetime.now().isoformat()
self.process_run = ProcessRun(
pk=ProcessRunPK(process_ref=self.process.to_ref(), qualified_name=process_run_name)
, launched_by_user_ref=self.user.to_ref()
, executed_code_version_ref=self.code_version.to_ref()
, projects_refs=self.project_refs
, environment = environment
)._report()
def set_reinit(self, bool = True):
self.write_reinit = bool
def add_input_ref(self, entities):
if self.write_reinit == True:
self.inputs_ds = []
self.write_reinit = False
self.inputs_ds.append(entities)
def set_timestamp(self, timestamp):
if timestamp is not None:
self.kensu_api.api_client.default_headers["X-Entity-Creation-Time"] = timestamp
else:
timestamp = datetime.datetime.now().timestamp()*1000
self.timestamp = timestamp
def set_default_physical_location(self, pl):
self.default_physical_location = pl
pl._report()
self.default_physical_location_ref = pl.to_ref()
def get_dependencies_mapping(self):
return self.dependencies_mapping
def add_dependencies_mapping(self, guid, col, from_guid, from_col, type):
dep = {'GUID': guid,
'COLUMNS': col,
'FROM_ID': from_guid,
'FROM_COLUMNS': from_col,
'TYPE': type}
self.dependencies_mapping.append(dep)
def in_mem(self, var_name):
return "in-memory-data://" + self.process.pk.qualified_name + "/" + var_name
def report_with_mapping(self):
self.set_reinit()
import pandas as pd
deps = self.dependencies_mapping
ddf = pd.DataFrame(deps)
df = ddf.set_index(['GUID', 'COLUMNS', 'FROM_ID']).groupby(['GUID', 'COLUMNS', 'FROM_ID']).agg(list)
df = df[~df.index.duplicated(keep='first')].reset_index()
unique_ids = list(df['GUID'].unique())
if self.report_in_mem:
for element in unique_ids:
dataflow = []
dependencies = df[df['GUID'] == element]
data = {}
for row in dependencies.iterrows():
info = row[1]
from_columns = [str(x) for x in info['FROM_COLUMNS']]
data[str(info['COLUMNS'])] = from_columns
schema_dep = SchemaLineageDependencyDef(from_schema_ref=SchemaRef(by_guid=info['FROM_ID']),
to_schema_ref=SchemaRef(by_guid=info['GUID']),
column_data_dependencies=data)
dataflow.append(schema_dep)
lineage = ProcessLineage(name=self.get_lineage_name(dataflow),
operation_logic='APPEND',
pk=ProcessLineagePK(process_ref=ProcessRef(by_guid=self.process.to_guid()),
data_flow=dataflow))._report()
if lineage.to_guid() not in self.sent_runs:
lineage_run = LineageRun(pk=LineageRunPK(lineage_ref=ProcessLineageRef(by_guid=lineage.to_guid()),
process_run_ref=ProcessRunRef(
by_guid=self.process_run.to_guid()),
timestamp=round(self.timestamp)))._report()
self.sent_runs.append(lineage.to_guid())
else:
dependencies_per_columns = {}
for element in unique_ids:
if element in self.real_schema_df:
sub_df = df[df['GUID'] == element]
for row in sub_df.iterrows():
info = row[1]
destination_guid = info['GUID']
guid = info['GUID']
origin_column = info['COLUMNS']
column = info['COLUMNS']
all_deps = df
self.create_dependencies(destination_guid, guid, origin_column, column, all_deps,
dependencies_per_columns)
dataflows = {}
for destination_guid in dependencies_per_columns:
if dependencies_per_columns[destination_guid] != {}:
dataflows[destination_guid] = {}
for column in dependencies_per_columns[destination_guid]:
for origin_guid in dependencies_per_columns[destination_guid][column]:
if origin_guid not in dataflows[destination_guid] and origin_guid!=destination_guid:
dataflows[destination_guid][origin_guid] = {}
dataflows[destination_guid][origin_guid][column]=list(set(dependencies_per_columns[destination_guid][column][origin_guid]))
elif origin_guid!=destination_guid:
dataflows[destination_guid][origin_guid][column] = \
list(set(dependencies_per_columns[destination_guid][column][origin_guid]))
for to_guid in dataflows:
schemas_pk = set()
from_pks = set()
dataflow = []
is_ml_model = False
for from_guid in dataflows[to_guid]:
schema_dep = SchemaLineageDependencyDef(from_schema_ref=SchemaRef(by_guid=from_guid),
to_schema_ref=SchemaRef(by_guid=to_guid),
column_data_dependencies=dataflows[to_guid][from_guid])
if to_guid in self.model:
is_ml_model = True
dataflow.append(schema_dep)
schemas_pk.add(from_guid)
from_pks.add(from_guid)
schemas_pk.add(to_guid)
lineage = ProcessLineage(name=self.get_lineage_name(dataflow),
operation_logic='APPEND',
pk=ProcessLineagePK(
process_ref=ProcessRef(by_guid=self.process.to_guid()),
data_flow=dataflow))._report()
if lineage.to_guid() not in self.sent_runs:
lineage_run = LineageRun(
pk=LineageRunPK(lineage_ref=ProcessLineageRef(by_guid=lineage.to_guid()),
process_run_ref=ProcessRunRef(by_guid=self.process_run.to_guid()),
timestamp=round(self.timestamp)))._report()
self.sent_runs.append(lineage.to_guid())
for schema in schemas_pk:
stats_df = self.real_schema_df[schema]
try:
stats = self.extractors.extract_stats(stats_df)
except:
from kensu.requests.models import ksu_str
# FIXME weird... should be fine to delete (and try,except too)
if isinstance(stats_df, pd.DataFrame) or isinstance(stats_df, DataFrame) or isinstance(stats_df,Series) or isinstance(stats_df,pd.Series) :
stats = self.extractors.extract_stats(stats_df)
elif isinstance(stats_df, ksu_str):
stats = None
elif isinstance(stats_df, dict):
stats = stats_df
else:
#TODO Support ndarray
stats = None
if stats is not None:
DataStats(pk=DataStatsPK(schema_ref=SchemaRef(by_guid=schema),
lineage_run_ref=LineageRunRef(by_guid=lineage_run.to_guid())),
stats=stats,
extra_as_json=None)._report()
elif isinstance(stats_df, KensuDatasourceAndSchema):
stats_df.f_publish_stats(lineage_run.to_guid())
#FIXME should be using extractors instead
if is_ml_model:
model_name = self.model[to_guid][1]
metrics = self.model[to_guid][2]
import json
hyperparams = json.dumps(self.model[to_guid][3])
model = Model(ModelPK(name=model_name))._report()
train = ModelTraining(pk=ModelTrainingPK(model_ref=ModelRef(by_guid=model.to_guid()),
process_lineage_ref=ProcessLineageRef(
by_guid=lineage.to_guid())))._report()
r=ModelMetrics(pk=ModelMetricsPK(model_training_ref=ModelTrainingRef(by_guid=train.to_guid()),
lineage_run_ref=LineageRunRef(by_guid=lineage_run.to_guid()),
stored_in_schema_ref=SchemaRef(by_guid=to_guid)),
metrics=metrics, hyper_params_as_json=hyperparams)._report()
def create_dependencies(self,destination_guid, guid, origin_column, column, all_deps,
dependencies_per_columns_rt):
visited = list()
visited.append((guid,column))
self.dependencies_per_columns = dependencies_per_columns_rt
filtered_dependencies = all_deps[all_deps['GUID'] == guid]
filtered_dependencies = filtered_dependencies[filtered_dependencies['COLUMNS'] == str(column)]
if destination_guid in self.dependencies_per_columns:
for row in filtered_dependencies.iterrows():
row = row[1]
if row['FROM_ID'] in self.real_schema_df:
if origin_column in self.dependencies_per_columns[destination_guid]:
if row['FROM_ID'] in self.dependencies_per_columns[destination_guid][origin_column]:
self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] = \
self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] + \
row['FROM_COLUMNS']
else:
self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] = row[
'FROM_COLUMNS']
else:
self.dependencies_per_columns[destination_guid][origin_column] = {}
self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] = row[
'FROM_COLUMNS']
# dependencies_per_columns[guid][row['FROM_ID']] = row['FROM_COLUMNS']
else:
guid = row['FROM_ID']
columns = row['FROM_COLUMNS']
for column in columns:
if (guid,column) not in visited:
self.create_dependencies(destination_guid, guid, origin_column, column, all_deps,
self.dependencies_per_columns)
else:
self.dependencies_per_columns[destination_guid] = {}
self.create_dependencies(destination_guid, guid, origin_column, column, all_deps,
self.dependencies_per_columns)
def get_dependencies(self):
return self.dependencies
def add_dependency(self, i, o, mapping_strategy=mapping_strategies.FULL):
if not isinstance(i, tuple):
(ids, isc) = self.extractors.extract_data_source_and_schema(i, self.default_physical_location_ref)
i = (i, ids, isc)
if not isinstance(o, tuple):
(ods, osc) = self.extractors.extract_data_source_and_schema(o, self.default_physical_location_ref)
o = (o, ods, osc)
self.dependencies.append((i, o, mapping_strategy))
def add_dependencies(self, ins, outs, mapping_strategy=mapping_strategies.FULL):
new_ins = []
for i in ins:
if not isinstance(i, tuple):
(ids, isc) = self.extractors.extract_data_source_and_schema(i, self.default_physical_location_ref)
i = (i, ids, isc)
new_ins.append(i)
new_outs = []
for o in outs:
if not isinstance(o, tuple):
(ods, osc) = self.extractors.extract_data_source_and_schema(o, self.default_physical_location_ref)
o = (o, ods, osc)
new_outs.append(o)
self.dependencies.append((new_ins, new_outs, mapping_strategy))
def get_lineage_name(self,
data_flow # type: list[SchemaLineageDependencyDef]
):
inputs = ",".join(sorted(self.to_schema_names([d.from_schema_ref.by_guid for d in data_flow])))
outputs = ",".join(sorted(self.to_schema_names([d.to_schema_ref.by_guid for d in data_flow])))
return "Lineage to {} from {}".format(outputs, inputs)
@property
def s(self):
return self.start_lineage(True)
def start_lineage(self, report_stats=True):
lineage_builder = LineageBuilder(self, report_stats)
return lineage_builder
def new_lineage(self, process_lineage_dependencies, report_stats=True, **kwargs):
# if the new_lineage has a model training in it (output),
# then kwargs will be pass to the function to compute metrics
# ex: kwargs["y_test"] can refer to the test set to compute CV metrics
data_flow = [d.toSchemaLineageDependencyDef() for d in process_lineage_dependencies]
lineage = ProcessLineage(name=self.get_lineage_name(data_flow),
operation_logic="APPEND",
# FIXME? => add control and the function level like report_stats
pk=ProcessLineagePK(
process_ref=self.process.to_ref(),
data_flow=data_flow
)
)._report()
if self.timestamp is None:
self.timestamp=int(time.time()) * 1000
lineage_run = LineageRun(pk=LineageRunPK(
lineage_ref=lineage.to_ref(),
process_run_ref=self.process_run.to_ref(),
timestamp=self.timestamp
)
)._report()
data_flow_inputs = list(
{to_hash_key(d.input_schema): (d.input_schema, d.input) for d in process_lineage_dependencies}.values())
data_flow_outputs = list(
{to_hash_key(d.output_schema): (d.output_schema, d.output) for d in process_lineage_dependencies}.values())
for (schema, df) in (data_flow_inputs + data_flow_outputs):
stats = self.extractors.extract_stats(df)
if report_stats and stats is not None:
DataStats(pk=DataStatsPK(schema_ref=schema.to_ref(),
lineage_run_ref=lineage_run.to_ref()),
stats=stats,
extra_as_json=None)._report()
# TODO Machine Learning part for OUTPUTS ONLY (right ?)
for (schema, df) in data_flow_outputs:
ml = self.extractors.extract_machine_learning_info(df)
if ml is not None:
model = Model(ModelPK(ml["name"]))._report()
model_training = ModelTraining(ModelTrainingPK(model_ref=model.to_ref(),
process_lineage_ref=lineage.to_ref())
)._report()
metrics = self.extractors.extract_machine_learning_metrics(df, **kwargs)
if len(metrics) > 0:
hp = self.extractors.extract_machine_learning_hyper_parameters(df)
ModelMetrics(pk=ModelMetricsPK(
model_training_ref=model_training.to_ref(),
lineage_run_ref=lineage_run.to_ref(),
stored_in_schema_ref=schema.to_ref()
),
metrics=metrics,
hyper_params_as_json=json.dumps(hp)
)._report()
|
[] |
[] |
[
"KENSU_API_URL",
"CONF_FILE"
] |
[]
|
["KENSU_API_URL", "CONF_FILE"]
|
python
| 2 | 0 | |
server/bert_multitask_serving/server/helper.py
|
import argparse
import logging
import os
import sys
import uuid
import numpy as np
import zmq
from zmq.utils import jsonapi
__all__ = ['set_logger', 'send_ndarray', 'get_args_parser',
'check_tf_version', 'auto_bind', 'import_tf', 'send_dict_ndarray']
def set_logger(context, verbose=False):
logger = logging.getLogger(context)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter(
'%(levelname)-.1s:' + context + ':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt='%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG if verbose else logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def send_ndarray(src, dest, X, req_id=b'', flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(dtype=str(X.dtype), shape=X.shape)
return src.send_multipart([dest, jsonapi.dumps(md), X, req_id], flags, copy=copy, track=track)
def send_dict_ndarray(src, dest, X, req_id=b'', flags=0, copy=True, track=False):
"""send a dict of numpy array with metadata"""
md = {}
for problem in X:
md[problem] = dict(
dtype='str',
shape=[])
if not isinstance(X[problem], list):
X[problem] = X[problem].tolist()
# md = dict(dtype=str(X.dtype), shape=X.shape)
return src.send_multipart(
[dest, jsonapi.dumps(md), jsonapi.dumps(X), req_id], flags, copy=copy, track=track)
def get_args_parser():
from . import __version__
# from .graph import PoolingStrategy
parser = argparse.ArgumentParser()
group1 = parser.add_argument_group('File Paths',
'config the path, checkpoint and filename of a pretrained/fine-tuned BERT model')
group1.add_argument('-model_dir', type=str, required=True,
help='directory of a pretrained BERT model')
group1.add_argument('-tuned_model_dir', type=str,
help='directory of a fine-tuned BERT model')
group1.add_argument('-ckpt_name', type=str, default='bert_model.ckpt',
help='filename of the checkpoint file. By default it is "bert_model.ckpt", but \
for a fine-tuned model the name could be different.')
group1.add_argument('-config_name', type=str, default='bert_config.json',
help='filename of the JSON config file for BERT model.')
group2 = parser.add_argument_group('BERT Parameters',
'config how BERT model and pooling works')
group2.add_argument('-max_seq_len', type=int, default=25,
help='maximum length of a sequence')
group2.add_argument('-pooling_layer', type=int, nargs='+', default=[-2],
help='the encoder layer(s) that receives pooling. \
Give a list in order to concatenate several layers into one')
# group2.add_argument('-pooling_strategy', type=PoolingStrategy.from_string,
# default=PoolingStrategy.REDUCE_MEAN, choices=list(
# PoolingStrategy),
# help = 'the pooling strategy for generating encoding vectors')
group2.add_argument('-mask_cls_sep', action='store_true', default=False,
help='masking the embedding on [CLS] and [SEP] with zero. \
When pooling_strategy is in {CLS_TOKEN, FIRST_TOKEN, SEP_TOKEN, LAST_TOKEN} \
then the embedding is preserved, otherwise the embedding is masked to zero before pooling')
group3 = parser.add_argument_group('Serving Configs',
'config how server utilizes GPU/CPU resources')
group3.add_argument('-port', '-port_in', '-port_data', type=int, default=5555,
help='server port for receiving data from client')
group3.add_argument('-port_out', '-port_result', type=int, default=5556,
help='server port for sending result to client')
group3.add_argument('-num_worker', type=int, default=1,
help='number of server instances')
group3.add_argument('-max_batch_size', type=int, default=256,
help='maximum number of sequences handled by each worker')
group3.add_argument('-priority_batch_size', type=int, default=16,
help='batch smaller than this size will be labeled as high priority,'
'and jumps forward in the job queue')
group3.add_argument('-cpu', action='store_true', default=False,
help='running on CPU (default on GPU)')
group3.add_argument('-xla', action='store_true', default=False,
help='enable XLA compiler (experimental)')
group3.add_argument('-gpu_memory_fraction', type=float, default=0.5,
help='determine the fraction of the overall amount of memory \
that each visible GPU should be allocated per worker. \
Should be in range [0.0, 1.0]')
group3.add_argument('-device_map', type=int, nargs='+', default=[],
help='specify the list of GPU device ids that will be used (id starts from 0). \
If num_worker > len(device_map), then device will be reused; \
if num_worker < len(device_map), then device_map[:num_worker] will be used')
group3.add_argument('-prefetch_size', type=int, default=10,
help='the number of batches to prefetch on each worker. When running on a CPU-only machine, \
this is set to 0 for comparability')
parser.add_argument('-verbose', action='store_true', default=False,
help='turn on tensorflow logging for debug')
parser.add_argument('-version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-problem', type=str,
default='CWS|NER|POS', help='Problems to serve.')
return parser
def check_tf_version():
import tensorflow as tf
tf_ver = tf.__version__.split('.')
assert int(tf_ver[0]) >= 1 and int(
tf_ver[1]) >= 10, 'Tensorflow >=1.10 is required!'
return tf_ver
def import_tf(device_id=-1, verbose=False):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if device_id < 0 else str(
device_id)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' if verbose else '3'
import tensorflow as tf
tf.logging.set_verbosity(
tf.logging.DEBUG if verbose else tf.logging.ERROR)
return tf
def auto_bind(socket):
if os.name == 'nt': # for Windows
socket.bind_to_random_port('tcp://*')
else:
# Get the location for tmp file for sockets
try:
tmp_dir = os.environ['ZEROMQ_SOCK_TMP_DIR']
if not os.path.exists(tmp_dir):
raise ValueError(
'This directory for sockets ({}) does not seems to exist.'.format(tmp_dir))
tmp_dir = os.path.join(tmp_dir, str(uuid.uuid1())[:8])
except KeyError:
tmp_dir = '*'
socket.bind('ipc://{}'.format(tmp_dir))
return socket.getsockopt(zmq.LAST_ENDPOINT).decode('ascii')
def get_run_args(parser_fn=get_args_parser, printed=True):
args = parser_fn().parse_args()
if printed:
param_str = '\n'.join(['%20s = %s' % (k, v)
for k, v in sorted(vars(args).items())])
print('usage: %s\n%20s %s\n%s\n%s\n' %
(' '.join(sys.argv), 'ARG', 'VALUE', '_' * 50, param_str))
return args
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"ZEROMQ_SOCK_TMP_DIR",
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "ZEROMQ_SOCK_TMP_DIR", "TF_CPP_MIN_LOG_LEVEL"]
|
python
| 3 | 0 | |
providers/terraform-provider-aws/aws/resource_aws_pinpoint_gcm_channel_test.go
|
package aws
import (
"fmt"
"os"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/pinpoint"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
/**
Before running this test, the following ENV variable must be set:
GCM_API_KEY - Google Cloud Messaging Api Key
**/
func TestAccAWSPinpointGCMChannel_basic(t *testing.T) {
var channel pinpoint.GCMChannelResponse
resourceName := "aws_pinpoint_gcm_channel.test_gcm_channel"
if os.Getenv("GCM_API_KEY") == "" {
t.Skipf("GCM_API_KEY env missing, skip test")
}
apiKey := os.Getenv("GCM_API_KEY")
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSPinpointApp(t) },
IDRefreshName: resourceName,
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSPinpointGCMChannelDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSPinpointGCMChannelConfig_basic(apiKey),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSPinpointGCMChannelExists(resourceName, &channel),
resource.TestCheckResourceAttr(resourceName, "enabled", "false"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"api_key"},
},
{
Config: testAccAWSPinpointGCMChannelConfig_basic(apiKey),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSPinpointGCMChannelExists(resourceName, &channel),
resource.TestCheckResourceAttr(resourceName, "enabled", "false"),
),
},
},
})
}
func testAccCheckAWSPinpointGCMChannelExists(n string, channel *pinpoint.GCMChannelResponse) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Pinpoint GCM Channel with that application ID exists")
}
conn := testAccProvider.Meta().(*AWSClient).pinpointconn
// Check if the app exists
params := &pinpoint.GetGcmChannelInput{
ApplicationId: aws.String(rs.Primary.ID),
}
output, err := conn.GetGcmChannel(params)
if err != nil {
return err
}
*channel = *output.GCMChannelResponse
return nil
}
}
func testAccAWSPinpointGCMChannelConfig_basic(apiKey string) string {
return fmt.Sprintf(`
resource "aws_pinpoint_app" "test_app" {}
resource "aws_pinpoint_gcm_channel" "test_gcm_channel" {
application_id = aws_pinpoint_app.test_app.application_id
enabled = "false"
api_key = "%s"
}
`, apiKey)
}
func testAccCheckAWSPinpointGCMChannelDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).pinpointconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_pinpoint_gcm_channel" {
continue
}
// Check if the event stream exists
params := &pinpoint.GetGcmChannelInput{
ApplicationId: aws.String(rs.Primary.ID),
}
_, err := conn.GetGcmChannel(params)
if err != nil {
if isAWSErr(err, pinpoint.ErrCodeNotFoundException, "") {
continue
}
return err
}
return fmt.Errorf("GCM Channel exists when it should be destroyed!")
}
return nil
}
|
[
"\"GCM_API_KEY\"",
"\"GCM_API_KEY\""
] |
[] |
[
"GCM_API_KEY"
] |
[]
|
["GCM_API_KEY"]
|
go
| 1 | 0 | |
pkg/cmd/serviceaccount/auth/provider.go
|
package auth
import (
"os"
"path/filepath"
"runtime"
"github.com/Azure/azure-workload-identity/pkg/cloud"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/google/uuid"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
ini "gopkg.in/ini.v1"
)
const (
clientSecretAuthMethod = "client_secret"
clientCertificateAuthMethod = "client_certificate"
cliAuthMethod = "cli"
)
// Provider is an interface for getting an Azure client
type Provider interface {
AddFlags(f *pflag.FlagSet)
GetAzureClient() cloud.Interface
GetAzureTenantID() string
Validate() error
}
// authArgs is an implementation of the Provider interface
type authArgs struct {
rawAzureEnvironment string
rawSubscriptionID string
subscriptionID uuid.UUID
authMethod string
rawClientID string
tenantID string
clientID uuid.UUID
clientSecret string
certificatePath string
privateKeyPath string
azureClient cloud.Interface
}
// NewProvider returns a new authArgs
func NewProvider() Provider {
return &authArgs{}
}
// AddFlags adds the flags for this package to the specified FlagSet
func (a *authArgs) AddFlags(f *pflag.FlagSet) {
f.StringVar(&a.rawAzureEnvironment, "azure-env", "AzurePublicCloud", "the target Azure cloud")
f.StringVarP(&a.rawSubscriptionID, "subscription-id", "s", "", "azure subscription id (required)")
f.StringVar(&a.authMethod, "auth-method", cliAuthMethod, "auth method to use. Supported values: cli, client_secret, client_certificate")
f.StringVar(&a.rawClientID, "client-id", "", "client id (used with --auth-method=[client_secret|client_certificate])")
f.StringVar(&a.clientSecret, "client-secret", "", "client secret (used with --auth-method=client_secret)")
f.StringVar(&a.certificatePath, "certificate-path", "", "path to client certificate (used with --auth-method=client_certificate)")
f.StringVar(&a.privateKeyPath, "private-key-path", "", "path to private key (used with --auth-method=client_certificate)")
}
// GetAzureClient returns an Azure client
func (a *authArgs) GetAzureClient() cloud.Interface {
return a.azureClient
}
// GetAzureTenantID returns the Azure tenant ID
func (a *authArgs) GetAzureTenantID() string {
return a.tenantID
}
// Validate validates the authArgs
func (a *authArgs) Validate() error {
var err error
if a.authMethod == "" {
return errors.New("--auth-method is a required parameter")
}
if a.authMethod == cliAuthMethod && a.rawClientID != "" && a.clientSecret != "" {
a.authMethod = clientSecretAuthMethod
}
if a.authMethod == clientSecretAuthMethod || a.authMethod == clientCertificateAuthMethod {
if a.clientID, err = uuid.Parse(a.rawClientID); err != nil {
return errors.Wrap(err, "parsing --client-id")
}
if a.authMethod == clientSecretAuthMethod {
if a.clientSecret == "" {
return errors.New(`--client-secret must be specified when --auth-method="client_secret"`)
}
} else if a.authMethod == clientCertificateAuthMethod {
if a.certificatePath == "" || a.privateKeyPath == "" {
return errors.New(`--certificate-path and --private-key-path must be specified when --auth-method="client_certificate"`)
}
}
}
a.subscriptionID, _ = uuid.Parse(a.rawSubscriptionID)
if a.subscriptionID.String() == "00000000-0000-0000-0000-000000000000" {
var subID uuid.UUID
subID, err = getSubFromAzDir(filepath.Join(getHomeDir(), ".azure"))
if err != nil || subID.String() == "00000000-0000-0000-0000-000000000000" {
return errors.New("--subscription-id is required (and must be a valid UUID)")
}
log.Infoln("No subscription provided, using selected subscription from Azure CLI:", subID.String())
a.subscriptionID = subID
}
env, err := azure.EnvironmentFromName(a.rawAzureEnvironment)
if err != nil {
return errors.Wrap(err, "failed to parse --azure-env as a valid target Azure cloud environment")
}
if a.tenantID, err = cloud.GetTenantID(env.ResourceManagerEndpoint, a.subscriptionID.String()); err != nil {
return err
}
switch a.authMethod {
case cliAuthMethod:
a.azureClient, err = cloud.NewAzureClientWithCLI(env, a.subscriptionID.String(), a.tenantID)
case clientSecretAuthMethod:
a.azureClient, err = cloud.NewAzureClientWithClientSecret(env, a.subscriptionID.String(), a.clientID.String(), a.clientSecret, a.tenantID)
case clientCertificateAuthMethod:
a.azureClient, err = cloud.NewAzureClientWithClientCertificateFile(env, a.subscriptionID.String(), a.clientID.String(), a.tenantID, a.certificatePath, a.privateKeyPath)
default:
err = errors.Errorf("--auth-method: ERROR: method unsupported. method=%q", a.authMethod)
}
return err
}
// getSubFromAzDir returns the subscription ID from the Azure CLI directory
func getSubFromAzDir(root string) (uuid.UUID, error) {
subConfig, err := ini.Load(filepath.Join(root, "clouds.config"))
if err != nil {
return uuid.UUID{}, errors.Wrap(err, "error decoding cloud subscription config")
}
cloudConfig, err := ini.Load(filepath.Join(root, "config"))
if err != nil {
return uuid.UUID{}, errors.Wrap(err, "error decoding cloud config")
}
cloud := getSelectedCloudFromAzConfig(cloudConfig)
return getCloudSubFromAzConfig(cloud, subConfig)
}
// getSelectedCloudFromAzConfig returns the selected cloud from the Azure CLI config
func getSelectedCloudFromAzConfig(f *ini.File) string {
selectedCloud := "AzureCloud"
if cloud, err := f.GetSection("cloud"); err == nil {
if name, err := cloud.GetKey("name"); err == nil {
if s := name.String(); s != "" {
selectedCloud = s
}
}
}
return selectedCloud
}
// getCloudSubFromAzConfig returns the subscription ID from the Azure CLI config
func getCloudSubFromAzConfig(cloud string, f *ini.File) (uuid.UUID, error) {
cfg, err := f.GetSection(cloud)
if err != nil {
return uuid.UUID{}, errors.Wrap(err, "could not find user defined subscription id")
}
sub, err := cfg.GetKey("subscription")
if err != nil {
return uuid.UUID{}, errors.Wrap(err, "error reading subscription id from cloud config")
}
return uuid.Parse(sub.String())
}
// getHomeDir attempts to get the home dir from env
func getHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
src/test/test_util.py
|
#!/opt/anaconda/bin/python
import sys
import os
import unittest
import string
import numpy as np
from StringIO import StringIO
import py_compile
from osgeo import gdal, ogr
sys.path.append('/workspace/wfp-01-03-01/src/main/app-resources/notebook/libexec')
from aux_functions import matrix_sum, crop_image, write_output_image, calc_max_matrix, calc_average
# Simulating the Runtime environment
os.environ['TMPDIR'] = '/tmp'
os.environ['_CIOP_APPLICATION_PATH'] = '/application'
os.environ['ciop_job_nodeid'] = 'dummy'
os.environ['ciop_wf_run_root'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'artifacts')
#sys.path.append('../main/app-resources/util/')
#from util import log_input
class NodeATestCase(unittest.TestCase):
def setUp(self):
self.mat1 = np.matrix('1, 1; 1, 1')
self.mat2 = np.matrix('2, 2; 2, 2')
self.mat3 = np.matrix('-9999, -9999; -9999, -9999')
self.mat4 = np.matrix('-9999, 2; 3, -9999')
self.mat5 = 0
self.mat6 = np.matrix('1, 2; 3, 4')
self.mat7 = np.matrix('2, 3; 1, 3')
self.test_img = "/workspace/data/test_image_chirps.tif"
def test_matrix_sum(self):
sum1 = matrix_sum(self.mat1, self.mat2)
self.assertTrue((sum1 == np.matrix('3, 3; 3, 3')).all())
def test_matrix_sum_with_no_data_value(self):
sum1 = matrix_sum(self.mat1, self.mat4, -9999)
self.assertTrue((sum1 == np.matrix('1, 3; 4, 1')).all())
def test_matrix_sum_with_different_sizes(self):
sum1 = matrix_sum(self.mat1, self.mat5, -9999)
self.assertTrue((sum1 == self.mat1).all())
'''def test_crop_image(self):
polygon = 'POLYGON((-30 -10, 20 -10, 20 40, -30 40, -30 -10))'
cropped_image_path = "output.tif"
crop_image(self.test_img, polygon, cropped_image_path)
self.assertGreaterEqual(os.path.getsize(cropped_image_path), 0)
os.remove('output.tif')
'''
'''
def test_write_image(self):
matrix_rand = np.random.rand(30,30)
mask_rand = np.random.randint(2, size=(30,30))
filepath = "/workspace/wfp-01-03-01/src/test/output_test.tif"
write_output_image(filepath, matrix_rand, "GTiff", mask_rand, gdal.GDT_Float32)
self.assertGreaterEqual(os.path.getsize(filepath), 0)
os.remove('output_test.tif')'''
def test_max_matrix(self):
max_matrix = calc_max_matrix(self.mat6, self.mat7)
self.assertTrue((max_matrix == np.matrix('2, 3; 3, 4')).all())
def test_calc_average(self):
mat_list = [self.mat1, self.mat2, self.mat6, self.mat7]
average_matrix = calc_average(mat_list, 4)
print(average_matrix)
self.assertTrue((average_matrix == np.matrix('1.5, 2; 1.75, 2.5')).all())
def test_max_matrix_with_zero(self):
max_matrix = calc_max_matrix(self.mat5, self.mat1)
print(max_matrix)
self.assertTrue((max_matrix == self.mat1).all())
def test_compile(self):
try:
py_compile.compile('../main/app-resources/notebook/run', doraise=True)
except:
self.fail('failed to compile src/main/app-resources/notebook/run')
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"_CIOP_APPLICATION_PATH",
"ciop_wf_run_root",
"ciop_job_nodeid",
"TMPDIR"
] |
[]
|
["_CIOP_APPLICATION_PATH", "ciop_wf_run_root", "ciop_job_nodeid", "TMPDIR"]
|
python
| 4 | 0 | |
dofile.go
|
package redux
import (
"fmt"
"github.com/gyepisam/fileutils"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
)
/*
findDofile searches for the most specific .do file for the target and returns a DoInfo structure.
The structure's Missing field contains paths to more specific .do files, if any, that were not found.
If a file is found the structure's Name and Arg2 fields are also set appropriately.
*/
func (f *File) findDoFile() (*DoInfo, error) {
relPath := &RelPath{}
var missing []string
dir := f.Dir
candidates := f.DoInfoCandidates()
TOP:
for {
for _, do := range candidates {
path := filepath.Join(dir, do.Name)
exists, err := fileutils.FileExists(path)
f.Debug("%s %t %v\n", path, exists, err)
if err != nil {
return nil, err
} else if exists {
do.Dir = dir
do.RelDir = relPath.Join()
do.Missing = missing
return do, nil
}
missing = append(missing, path)
}
if dir == f.RootDir {
break TOP
}
relPath.Add(filepath.Base(dir))
dir = filepath.Dir(dir)
}
return &DoInfo{Missing: missing}, nil
}
const shell = "/bin/sh"
// RunDoFile executes the do file script, records the metadata for the resulting output, then
// saves the resulting output to the target file, if applicable.
// The execution is equivalent to:
// sh target.ext.do target.ext target outfn > out0
// A well behaved .do file writes to stdout (out0) or to the $3 file (outfn), but not both.
func (target *File) RunDoFile(doInfo *DoInfo) (err error) {
// out0 is an open file connected to subprocess stdout
// However, a task subprocess, meaning it is run for side effects,
// emits output to stdout.
var out0 *Output
if target.IsTask() {
out0 = NewOutput(os.Stdout)
} else {
out0, err = target.NewOutput()
if err != nil {
return
}
defer out0.Cleanup()
}
// outfn is the arg3 filename argument to the do script.
var outfn *Output
outfn, err = target.NewOutput()
if err != nil {
return
}
defer outfn.Cleanup()
// Arg3 file should not exist prior to script execution
// so its subsequent existence can be significant.
if err = outfn.SetupArg3(); err != nil {
return
}
if err = target.runCmd(out0.File, outfn.Name(), doInfo); err != nil {
return
}
file, err := os.Open(outfn.Name())
if err != nil {
if os.IsNotExist(err) {
if target.IsTask() {
return nil
}
} else {
return
}
}
if target.IsTask() {
// Task files should not write to the temp file.
return target.Errorf("Task do file %s unexpectedly wrote to $3", target.DoFile)
}
if err = out0.Close(); err != nil {
return
}
outputs := make([]*Output, 0)
finfo, err := os.Stat(out0.Name())
if err != nil {
return
}
if finfo.Size() > 0 {
outputs = append(outputs, out0)
}
if file != nil {
outfn.File = file // for consistency
if err = outfn.Close(); err != nil {
return
}
outputs = append(outputs, outfn)
}
if n := len(outputs); n == 0 {
return target.Errorf("Do file %s generated no output or file activity", target.DoFile)
} else if n == 2 {
return target.Errorf("Do file %s wrote to stdout and to file $3", target.DoFile)
}
out := outputs[0]
err = os.Rename(out.Name(), target.Fullpath())
if err != nil && strings.Index(err.Error(), "cross-device") > -1 {
// The rename failed due to a cross-device error because the output file
// tmp dir is on a different device from the target file.
// Copy the tmp file across the device to the target directory and try again.
var path string
path, err = out.Copy(target.Dir)
if err != nil {
return
}
err = os.Rename(path, target.Fullpath())
if err != nil {
_ = os.Remove(path)
}
}
return
}
func (target *File) runCmd(out0 *os.File, outfn string, doInfo *DoInfo) error {
args := []string{"-e"}
if ShellArgs != "" {
if ShellArgs[0] != '-' {
ShellArgs = "-" + ShellArgs
}
args = append(args, ShellArgs)
}
pending := os.Getenv("REDO_PENDING")
pendingID := ";" + string(target.FullPathHash)
target.Debug("Current: [%s]. Pending: [%s].\n", pendingID, pending)
if strings.Contains(pending, pendingID) {
return fmt.Errorf("Loop detected on pending target: %s", target.Target)
}
pending += pendingID
relTarget := doInfo.RelPath(target.Name)
args = append(args, doInfo.Name, relTarget, doInfo.RelPath(doInfo.Arg2), outfn)
target.Debug("@sh %s $3\n", strings.Join(args[0:len(args)-1], " "))
cmd := exec.Command(shell, args...)
cmd.Dir = doInfo.Dir
cmd.Stdout = out0
cmd.Stderr = os.Stderr
depth := 0
if i64, err := strconv.ParseInt(os.Getenv("REDO_DEPTH"), 10, 32); err == nil {
depth = int(i64)
}
parent := os.Getenv("REDO_PARENT")
// Add environment variables, replacing existing entries if necessary.
cmdEnv := os.Environ()
env := map[string]string{
"REDO_PARENT": relTarget,
"REDO_DEPTH": strconv.Itoa(depth + 1),
"REDO_PENDING": pending,
}
// Update environment values if they exist and append when they dont.
TOP:
for key, value := range env {
prefix := key + "="
for i, entry := range cmdEnv {
if strings.HasPrefix(entry, prefix) {
cmdEnv[i] = prefix + value
continue TOP
}
}
cmdEnv = append(cmdEnv, prefix+value)
}
cmd.Env = cmdEnv
if Verbose() {
prefix := strings.Repeat(" ", depth)
if parent != "" {
prefix += parent + " => "
}
target.Log("%s%s (%s)\n", prefix, target.Rel(target.Fullpath()), target.Rel(doInfo.Path()))
}
err := cmd.Run()
if err == nil {
return nil
}
if Verbose() {
return target.Errorf("%s %s: %s", shell, strings.Join(args, " "), err)
}
return target.Errorf("%s", err)
}
|
[
"\"REDO_PENDING\"",
"\"REDO_DEPTH\"",
"\"REDO_PARENT\""
] |
[] |
[
"REDO_PENDING",
"REDO_PARENT",
"REDO_DEPTH"
] |
[]
|
["REDO_PENDING", "REDO_PARENT", "REDO_DEPTH"]
|
go
| 3 | 0 | |
test/docker_single_test.go
|
//
// DISCLAIMER
//
// Copyright 2017 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
// Author Ewout Prangsma
//
package test
import (
"fmt"
"os"
"strings"
"testing"
"time"
)
// TestDockerSingle runs the arangodb starter in docker with `--starter.mode=single`
func TestDockerSingle(t *testing.T) {
needTestMode(t, testModeDocker)
needStarterMode(t, starterModeSingle)
if os.Getenv("IP") == "" {
t.Fatal("IP envvar must be set to IP address of this machine")
}
/*
docker volume create arangodb1
docker run -i --name=adb1 --rm -p 8528:8528 \
-v arangodb1:/data \
-v /var/run/docker.sock:/var/run/docker.sock \
arangodb/arangodb-starter \
--docker.container=adb1 \
--starter.address=$IP \
--starter.mode=single
*/
volID := createDockerID("vol-starter-test-single-")
createDockerVolume(t, volID)
defer removeDockerVolume(t, volID)
// Cleanup of left over tests
removeDockerContainersByLabel(t, "starter-test=true")
removeStarterCreatedDockerContainers(t)
start := time.Now()
cID := createDockerID("starter-test-single-")
dockerRun := Spawn(t, strings.Join([]string{
"docker run -i",
"--label starter-test=true",
"--name=" + cID,
"--rm",
fmt.Sprintf("-p %d:%d", basePort, basePort),
fmt.Sprintf("-v %s:/data", volID),
"-v /var/run/docker.sock:/var/run/docker.sock",
"arangodb/arangodb-starter",
"--docker.container=" + cID,
"--starter.address=$IP",
"--starter.mode=single",
createEnvironmentStarterOptions(),
}, " "))
defer dockerRun.Close()
defer removeDockerContainer(t, cID)
if ok := WaitUntilStarterReady(t, whatSingle, dockerRun); ok {
t.Logf("Single server start took %s", time.Since(start))
testSingle(t, insecureStarterEndpoint(0*portIncrement), false)
}
if isVerbose {
t.Log("Waiting for termination")
}
ShutdownStarter(t, insecureStarterEndpoint(0*portIncrement))
}
// TestDockerSingleAutoKeyFile runs the arangodb starter in docker with `--starter.mode=single` && `--ssl.auto-key`
func TestDockerSingleAutoKeyFile(t *testing.T) {
needTestMode(t, testModeDocker)
needStarterMode(t, starterModeSingle)
if os.Getenv("IP") == "" {
t.Fatal("IP envvar must be set to IP address of this machine")
}
/*
docker volume create arangodb1
docker run -i --name=adb1 --rm -p 8528:8528 \
-v arangodb1:/data \
-v /var/run/docker.sock:/var/run/docker.sock \
arangodb/arangodb-starter \
--docker.container=adb1 \
--starter.ddress=$IP \
--starter.mode=single \
--ssl.auto-key
*/
volID := createDockerID("vol-starter-test-single-")
createDockerVolume(t, volID)
defer removeDockerVolume(t, volID)
// Cleanup of left over tests
removeDockerContainersByLabel(t, "starter-test=true")
removeStarterCreatedDockerContainers(t)
start := time.Now()
cID := createDockerID("starter-test-single-")
dockerRun := Spawn(t, strings.Join([]string{
"docker run -i",
"--label starter-test=true",
"--name=" + cID,
"--rm",
fmt.Sprintf("-p %d:%d", basePort, basePort),
fmt.Sprintf("-v %s:/data", volID),
"-v /var/run/docker.sock:/var/run/docker.sock",
"arangodb/arangodb-starter",
"--docker.container=" + cID,
"--starter.address=$IP",
"--starter.mode=single",
"--ssl.auto-key",
createEnvironmentStarterOptions(),
}, " "))
defer dockerRun.Close()
defer removeDockerContainer(t, cID)
if ok := WaitUntilStarterReady(t, whatSingle, dockerRun); ok {
t.Logf("Single server start took %s", time.Since(start))
testSingle(t, secureStarterEndpoint(0*portIncrement), true)
}
if isVerbose {
t.Log("Waiting for termination")
}
ShutdownStarter(t, secureStarterEndpoint(0*portIncrement))
}
// TestDockerSingleAutoContainerName runs the arangodb starter in docker with `--starter.mode=single` automatic detection of its container name.
func TestDockerSingleAutoContainerName(t *testing.T) {
needTestMode(t, testModeDocker)
needStarterMode(t, starterModeSingle)
if os.Getenv("IP") == "" {
t.Fatal("IP envvar must be set to IP address of this machine")
}
/*
docker volume create arangodb1
docker run -i --name=adb1 --rm -p 8528:8528 \
-v arangodb1:/data \
-v /var/run/docker.sock:/var/run/docker.sock \
arangodb/arangodb-starter \
--starter.address=$IP \
--starter.mode=single
*/
volID := createDockerID("vol-starter-test-single-")
createDockerVolume(t, volID)
defer removeDockerVolume(t, volID)
// Cleanup of left over tests
removeDockerContainersByLabel(t, "starter-test=true")
removeStarterCreatedDockerContainers(t)
start := time.Now()
cID := createDockerID("starter-test-single-")
dockerRun := Spawn(t, strings.Join([]string{
"docker run -i",
"--label starter-test=true",
"--name=" + cID,
"--rm",
fmt.Sprintf("-p %d:%d", basePort, basePort),
fmt.Sprintf("-v %s:/data", volID),
"-v /var/run/docker.sock:/var/run/docker.sock",
"arangodb/arangodb-starter",
"--starter.address=$IP",
"--starter.mode=single",
createEnvironmentStarterOptions(),
}, " "))
defer dockerRun.Close()
defer removeDockerContainer(t, cID)
if ok := WaitUntilStarterReady(t, whatSingle, dockerRun); ok {
t.Logf("Single server start took %s", time.Since(start))
testSingle(t, insecureStarterEndpoint(0*portIncrement), false)
}
if isVerbose {
t.Log("Waiting for termination")
}
ShutdownStarter(t, insecureStarterEndpoint(0*portIncrement))
}
// TestDockerSingleAutoRocksdb runs the arangodb starter in docker with `--server.storage-engine=rocksdb` and a 3.2 image.
func TestDockerSingleAutoRocksdb(t *testing.T) {
needTestMode(t, testModeDocker)
needStarterMode(t, starterModeSingle)
if os.Getenv("IP") == "" {
t.Fatal("IP envvar must be set to IP address of this machine")
}
/*
docker volume create arangodb1
docker run -i --name=adb1 --rm -p 8528:8528 \
-v arangodb1:/data \
-v /var/run/docker.sock:/var/run/docker.sock \
arangodb/arangodb-starter \
--starter.address=$IP \
--starter.mode=single \
--server.storage-engine=rocksdb \
--docker.image=arangodb/arangodb-preview:3.2.devel
*/
volID := createDockerID("vol-starter-test-single-")
createDockerVolume(t, volID)
defer removeDockerVolume(t, volID)
// Cleanup of left over tests
removeDockerContainersByLabel(t, "starter-test=true")
removeStarterCreatedDockerContainers(t)
start := time.Now()
skipDockerImage := true
cID := createDockerID("starter-test-single-")
dockerRun := Spawn(t, strings.Join([]string{
"docker run -i",
"--label starter-test=true",
"--name=" + cID,
"--rm",
fmt.Sprintf("-p %d:%d", basePort, basePort),
fmt.Sprintf("-v %s:/data", volID),
"-v /var/run/docker.sock:/var/run/docker.sock",
"arangodb/arangodb-starter",
"--starter.address=$IP",
"--starter.mode=single",
"--server.storage-engine=rocksdb",
"--docker.image=arangodb/arangodb-preview:3.2.devel",
createEnvironmentStarterOptions(skipDockerImage),
}, " "))
defer dockerRun.Close()
defer removeDockerContainer(t, cID)
if ok := WaitUntilStarterReady(t, whatSingle, dockerRun); ok {
t.Logf("Single server start took %s", time.Since(start))
testSingle(t, insecureStarterEndpoint(0*portIncrement), false)
}
if isVerbose {
t.Log("Waiting for termination")
}
ShutdownStarter(t, insecureStarterEndpoint(0*portIncrement))
}
// TestOldDockerSingleAutoKeyFile runs the arangodb starter in docker with `--mode=single` && `--sslAutoKeyFile`
func TestOldDockerSingleAutoKeyFile(t *testing.T) {
needTestMode(t, testModeDocker)
needStarterMode(t, starterModeSingle)
if os.Getenv("IP") == "" {
t.Fatal("IP envvar must be set to IP address of this machine")
}
/*
docker volume create arangodb1
docker run -i --name=adb1 --rm -p 8528:8528 \
-v arangodb1:/data \
-v /var/run/docker.sock:/var/run/docker.sock \
arangodb/arangodb-starter \
--dockerContainer=adb1 --ownAddress=$IP \
--mode=single --sslAutoKeyFile
*/
volID := createDockerID("vol-starter-test-single-")
createDockerVolume(t, volID)
defer removeDockerVolume(t, volID)
// Cleanup of left over tests
removeDockerContainersByLabel(t, "starter-test=true")
removeStarterCreatedDockerContainers(t)
start := time.Now()
cID := createDockerID("starter-test-single-")
dockerRun := Spawn(t, strings.Join([]string{
"docker run -i",
"--label starter-test=true",
"--name=" + cID,
"--rm",
fmt.Sprintf("-p %d:%d", basePort, basePort),
fmt.Sprintf("-v %s:/data", volID),
"-v /var/run/docker.sock:/var/run/docker.sock",
"arangodb/arangodb-starter",
"--dockerContainer=" + cID,
"--ownAddress=$IP",
"--mode=single",
"--sslAutoKeyFile",
createEnvironmentStarterOptions(),
}, " "))
defer dockerRun.Close()
defer removeDockerContainer(t, cID)
if ok := WaitUntilStarterReady(t, whatSingle, dockerRun); ok {
t.Logf("Single server start took %s", time.Since(start))
testSingle(t, secureStarterEndpoint(0*portIncrement), true)
}
if isVerbose {
t.Log("Waiting for termination")
}
ShutdownStarter(t, secureStarterEndpoint(0*portIncrement))
}
|
[
"\"IP\"",
"\"IP\"",
"\"IP\"",
"\"IP\"",
"\"IP\""
] |
[] |
[
"IP"
] |
[]
|
["IP"]
|
go
| 1 | 0 | |
test/fixtures.py
|
import os
from cachetools.func import lru_cache
from collections import namedtuple
from datetime import datetime, timedelta
import pytest
import shutil
import inspect
from flask import Flask, jsonify
from flask_login import LoginManager
from flask_principal import identity_loaded, Permission, Identity, identity_changed, Principal
from flask_mail import Mail
from peewee import SqliteDatabase, InternalError
from mock import patch
from app import app as application
from auth.permissions import on_identity_loaded
from data import model
from data.database import close_db_filter, db, configure
from data.model.user import LoginWrappedDBUser, create_robot, lookup_robot, create_user_noverify
from data.userfiles import Userfiles
from endpoints.api import api_bp
from endpoints.appr import appr_bp
from endpoints.web import web
from endpoints.v1 import v1_bp
from endpoints.v2 import v2_bp
from endpoints.verbs import verbs as verbs_bp
from endpoints.webhooks import webhooks
from initdb import initialize_database, populate_database
from path_converters import APIRepositoryPathConverter, RegexConverter, RepositoryPathConverter
from test.testconfig import FakeTransaction
INIT_DB_PATH = 0
@pytest.fixture(scope="session")
def init_db_path(tmpdir_factory):
"""
Creates a new database and appropriate configuration.
Note that the initial database is created *once* per session. In the non-full-db-test case, the
database_uri fixture makes a copy of the SQLite database file on disk and passes a new copy to
each test.
"""
# NOTE: We use a global here because pytest runs this code multiple times, due to the fixture
# being imported instead of being in a conftest. Moving to conftest has its own issues, and this
# call is quite slow, so we simply cache it here.
global INIT_DB_PATH
INIT_DB_PATH = INIT_DB_PATH or _init_db_path(tmpdir_factory)
return INIT_DB_PATH
def _init_db_path(tmpdir_factory):
if os.environ.get("TEST_DATABASE_URI"):
return _init_db_path_real_db(os.environ.get("TEST_DATABASE_URI"))
return _init_db_path_sqlite(tmpdir_factory)
def _init_db_path_real_db(db_uri):
"""
Initializes a real database for testing by populating it from scratch. Note that this does.
*not* add the tables (merely data). Callers must have migrated the database before calling
the test suite.
"""
configure(
{
"DB_URI": db_uri,
"SECRET_KEY": "superdupersecret!!!1",
"DB_CONNECTION_ARGS": {"threadlocals": True, "autorollback": True,},
"DB_TRANSACTION_FACTORY": _create_transaction,
"DATABASE_SECRET_KEY": "anothercrazykey!",
}
)
populate_database()
return db_uri
def _init_db_path_sqlite(tmpdir_factory):
"""
Initializes a SQLite database for testing by populating it from scratch and placing it into a
temp directory file.
"""
sqlitedbfile = str(tmpdir_factory.mktemp("data").join("test.db"))
sqlitedb = "sqlite:///{0}".format(sqlitedbfile)
conf = {
"TESTING": True,
"DEBUG": True,
"SECRET_KEY": "superdupersecret!!!1",
"DATABASE_SECRET_KEY": "anothercrazykey!",
"DB_URI": sqlitedb,
}
os.environ["DB_URI"] = str(sqlitedb)
db.initialize(SqliteDatabase(sqlitedbfile))
application.config.update(conf)
application.config.update({"DB_URI": sqlitedb})
initialize_database()
db.obj.execute_sql("PRAGMA foreign_keys = ON;")
db.obj.execute_sql('PRAGMA encoding="UTF-8";')
populate_database()
close_db_filter(None)
return str(sqlitedbfile)
@pytest.yield_fixture()
def database_uri(monkeypatch, init_db_path, sqlitedb_file):
"""
Returns the database URI to use for testing.
In the SQLite case, a new, distinct copy of the SQLite database is created by copying the
initialized database file (sqlitedb_file) on a per-test basis. In the non-SQLite case, a
reference to the existing database URI is returned.
"""
if os.environ.get("TEST_DATABASE_URI"):
db_uri = os.environ["TEST_DATABASE_URI"]
monkeypatch.setenv("DB_URI", db_uri)
yield db_uri
else:
# Copy the golden database file to a new path.
shutil.copy2(init_db_path, sqlitedb_file)
# Monkeypatch the DB_URI.
db_path = "sqlite:///{0}".format(sqlitedb_file)
monkeypatch.setenv("DB_URI", db_path)
yield db_path
# Delete the DB copy.
assert ".." not in sqlitedb_file
assert "test.db" in sqlitedb_file
os.remove(sqlitedb_file)
@pytest.fixture()
def sqlitedb_file(tmpdir):
"""
Returns the path at which the initialized, golden SQLite database file will be placed.
"""
test_db_file = tmpdir.mkdir("quaydb").join("test.db")
return str(test_db_file)
def _create_transaction(db):
return FakeTransaction()
@pytest.fixture()
def appconfig(database_uri):
"""
Returns application configuration for testing that references the proper database URI.
"""
conf = {
"TESTING": True,
"DEBUG": True,
"DB_URI": database_uri,
"SECRET_KEY": "superdupersecret!!!1",
"DATABASE_SECRET_KEY": "anothercrazykey!",
"DB_CONNECTION_ARGS": {"threadlocals": True, "autorollback": True,},
"DB_TRANSACTION_FACTORY": _create_transaction,
"DATA_MODEL_CACHE_CONFIG": {"engine": "inmemory",},
"USERFILES_PATH": "userfiles/",
"MAIL_SERVER": "",
"MAIL_DEFAULT_SENDER": "[email protected]",
"DATABASE_SECRET_KEY": "anothercrazykey!",
}
return conf
AllowedAutoJoin = namedtuple("AllowedAutoJoin", ["frame_start_index", "pattern_prefixes"])
ALLOWED_AUTO_JOINS = [
AllowedAutoJoin(0, ["test_"]),
AllowedAutoJoin(0, ["<", "test_"]),
]
CALLER_FRAMES_OFFSET = 3
FRAME_NAME_INDEX = 3
@pytest.fixture()
def initialized_db(appconfig):
"""
Configures the database for the database found in the appconfig.
"""
under_test_real_database = bool(os.environ.get("TEST_DATABASE_URI"))
# Configure the database.
configure(appconfig)
# Initialize caches.
model._basequery._lookup_team_roles()
model._basequery.get_public_repo_visibility()
model.log.get_log_entry_kinds()
if not under_test_real_database:
# Make absolutely sure foreign key constraints are on.
db.obj.execute_sql("PRAGMA foreign_keys = ON;")
db.obj.execute_sql('PRAGMA encoding="UTF-8";')
assert db.obj.execute_sql("PRAGMA foreign_keys;").fetchone()[0] == 1
assert db.obj.execute_sql("PRAGMA encoding;").fetchone()[0] == "UTF-8"
# If under a test *real* database, setup a savepoint.
if under_test_real_database:
with db.transaction():
test_savepoint = db.savepoint()
test_savepoint.__enter__()
yield # Run the test.
try:
test_savepoint.rollback()
test_savepoint.__exit__(None, None, None)
except InternalError:
# If postgres fails with an exception (like IntegrityError) mid-transaction, it terminates
# it immediately, so when we go to remove the savepoint, it complains. We can safely ignore
# this case.
pass
else:
if os.environ.get("DISALLOW_AUTO_JOINS", "false").lower() == "true":
# Patch get_rel_instance to fail if we try to load any non-joined foreign key. This will allow
# us to catch missing joins when running tests.
def get_rel_instance(self, instance):
value = instance.__data__.get(self.name)
if value is not None or self.name in instance.__rel__:
if self.name not in instance.__rel__:
# NOTE: We only raise an exception if this auto-lookup occurs from non-testing code.
# Testing code can be a bit inefficient.
lookup_allowed = False
try:
outerframes = inspect.getouterframes(inspect.currentframe())
except IndexError:
# Happens due to a bug in Jinja.
outerframes = []
for allowed_auto_join in ALLOWED_AUTO_JOINS:
if lookup_allowed:
break
if (
len(outerframes)
>= allowed_auto_join.frame_start_index + CALLER_FRAMES_OFFSET
):
found_match = True
for index, pattern_prefix in enumerate(
allowed_auto_join.pattern_prefixes
):
frame_info = outerframes[index + CALLER_FRAMES_OFFSET]
if not frame_info[FRAME_NAME_INDEX].startswith(pattern_prefix):
found_match = False
break
if found_match:
lookup_allowed = True
break
if not lookup_allowed:
raise Exception(
"Missing join on instance `%s` for field `%s`", instance, self.name
)
obj = self.rel_model.get(self.field.rel_field == value)
instance.__rel__[self.name] = obj
return instance.__rel__[self.name]
elif not self.field.null:
raise self.rel_model.DoesNotExist
return value
with patch("peewee.ForeignKeyAccessor.get_rel_instance", get_rel_instance):
yield
else:
yield
@pytest.fixture()
def app(appconfig, initialized_db):
"""
Used by pytest-flask plugin to inject a custom app instance for testing.
"""
app = Flask(__name__)
login_manager = LoginManager(app)
@app.errorhandler(model.DataModelException)
def handle_dme(ex):
response = jsonify({"message": str(ex)})
response.status_code = 400
return response
@login_manager.user_loader
def load_user(user_uuid):
return LoginWrappedDBUser(user_uuid)
@identity_loaded.connect_via(app)
def on_identity_loaded_for_test(sender, identity):
on_identity_loaded(sender, identity)
Principal(app, use_sessions=False)
app.url_map.converters["regex"] = RegexConverter
app.url_map.converters["apirepopath"] = APIRepositoryPathConverter
app.url_map.converters["repopath"] = RepositoryPathConverter
app.register_blueprint(api_bp, url_prefix="/api")
app.register_blueprint(appr_bp, url_prefix="/cnr")
app.register_blueprint(web, url_prefix="/")
app.register_blueprint(verbs_bp, url_prefix="/c1")
app.register_blueprint(v1_bp, url_prefix="/v1")
app.register_blueprint(v2_bp, url_prefix="/v2")
app.register_blueprint(webhooks, url_prefix="/webhooks")
app.config.update(appconfig)
Userfiles(app)
Mail(app)
return app
|
[] |
[] |
[
"DISALLOW_AUTO_JOINS",
"DB_URI",
"TEST_DATABASE_URI"
] |
[]
|
["DISALLOW_AUTO_JOINS", "DB_URI", "TEST_DATABASE_URI"]
|
python
| 3 | 0 | |
project/server/config.py
|
# project/server/config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig(object):
"""Base configuration."""
APP_NAME = os.getenv("APP_NAME", "SA Strat db")
BCRYPT_LOG_ROUNDS = 4
DEBUG_TB_ENABLED = False
SECRET_KEY = os.getenv("SECRET_KEY", "my_precious")
SQLALCHEMY_TRACK_MODIFICATIONS = False
WTF_CSRF_ENABLED = False
MAIL_SERVER = os.getenv("MAIL_SERVER", "localhost")
MAIL_PORT = os.getenv("MAIL_PORT", '8025')
MAIL_USE_TLS = os.getenv("MAIL_USE_TLS")
MAIL_USERNAME = os.getenv("MAIL_USERNAME")
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD")
ADMIN_EMAIL = os.getenv("ADMIN_EMAIL")
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG_TB_ENABLED = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
MAIL_SERVER = "localhost"
MAIL_PORT = '8025'
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DATABASE_URL", "sqlite:///{0}".format(os.path.join(basedir, "dev.db"))
)
class TestingConfig(BaseConfig):
"""Testing configuration."""
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_DATABASE_URI = "sqlite:///"
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_TEST_URL", "sqlite:///")
TESTING = True
class ProductionConfig(BaseConfig):
"""Production configuration."""
BCRYPT_LOG_ROUNDS = 13
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DATABASE_URL",
"sqlite:///{0}".format(os.path.join(basedir, "prod.db")),
)
WTF_CSRF_ENABLED = True
|
[] |
[] |
[
"MAIL_SERVER",
"MAIL_PASSWORD",
"DATABASE_URL",
"MAIL_PORT",
"ADMIN_EMAIL",
"SECRET_KEY",
"MAIL_USERNAME",
"APP_NAME",
"MAIL_USE_TLS",
"DATABASE_TEST_URL"
] |
[]
|
["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "ADMIN_EMAIL", "SECRET_KEY", "MAIL_USERNAME", "APP_NAME", "MAIL_USE_TLS", "DATABASE_TEST_URL"]
|
python
| 10 | 0 | |
services/resourcemanager/struct_region_statuses_in_create_resource_group.go
|
package resourcemanager
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// RegionStatusesInCreateResourceGroup is a nested struct in resourcemanager response
type RegionStatusesInCreateResourceGroup struct {
RegionStatus []RegionStatus `json:"RegionStatus" xml:"RegionStatus"`
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cli/cli.go
|
package cli
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/spf13/cobra"
)
// SD is the main interface to running sd
type SD interface {
Run() error
}
type sd struct {
root *cobra.Command
initialized bool
}
// New returns an instance of SD
func New(version string) SD {
s := &sd{
root: &cobra.Command{
Use: "sd",
Version: version,
},
}
s.init()
return s
}
func (s *sd) init() {
s.initAliasing()
s.initCompletions()
s.initDebugging()
s.initEditing()
s.initialized = true
}
func showUsage(cmd *cobra.Command, _ []string) error {
return cmd.Usage()
}
func (s *sd) Run() error {
if !s.initialized {
return fmt.Errorf("init() not called")
}
err := s.loadCommands()
if err != nil {
logrus.Debugf("Error loading commands: %v", err)
return err
}
err = s.root.Execute()
if err != nil {
logrus.Debugf("Error executing command: %v", err)
return err
}
return nil
}
func (s *sd) initAliasing() {
s.root.PersistentFlags().StringP("alias", "a", "sd", "Use an alias in help text and completions")
err := s.root.PersistentFlags().MarkHidden("alias")
if err != nil {
panic(err)
}
s.root.Use = "sd"
// Flags haven't been parsed yet, we need to do it ourselves
for i, arg := range os.Args {
if (arg == "-a" || arg == "--alias") && len(os.Args) >= i+2 {
alias := os.Args[i+1]
if alias == "" {
break
}
s.root.Use = alias
s.root.Version = fmt.Sprintf("%s (aliased to %s)", s.root.Version, alias)
logrus.Debug("Aliasing: sd replaced with ", alias, " in help text")
}
}
s.root.RunE = showUsage
}
func (s *sd) initCompletions() {
c := &cobra.Command{
Use: "completions",
Short: "Generate completion scripts",
RunE: showUsage,
}
c.AddCommand(&cobra.Command{
Use: "bash",
Short: "Generate completions for bash",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Root().GenBashCompletion(os.Stdout)
},
})
c.AddCommand(&cobra.Command{
Use: "zsh",
Short: "Generate completions for zsh",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Root().GenZshCompletion(os.Stdout)
},
})
logrus.Debug("Completions (bash/zsh) commands added")
s.root.AddCommand(c)
}
func (s *sd) initDebugging() {
s.root.PersistentFlags().BoolP("debug", "d", false, "Turn debugging on/off")
// Flags haven't been parsed yet, we need to do it ourselves
for _, arg := range os.Args {
if arg == "-d" || arg == "--debug" {
logrus.SetLevel(logrus.DebugLevel)
}
}
}
func (s *sd) initEditing() {
s.root.PersistentFlags().BoolP("edit", "e", false, "Edit command")
}
func (s *sd) loadCommands() error {
logrus.Debug("Loading commands started")
home := filepath.Join(os.Getenv("HOME"), ".sd")
logrus.Debug("HOME is set to: ", home)
wd, err := os.Getwd()
if err != nil {
return err
}
logrus.Debug("Current working dir is set to: ", wd)
current := filepath.Join(wd, "scripts")
logrus.Debug("Looking for ./scripts in: ", current)
sdPath := os.Getenv("SD_PATH")
paths := filepath.SplitList(sdPath)
logrus.Debug("SD_PATH is set to:", sdPath, ", parsed as: ", paths)
for _, path := range deduplicate(append([]string{home, current}, paths...)) {
cmds, err := visitDir(path)
if err != nil {
return err
}
for _, c := range cmds {
s.root.AddCommand(c)
}
}
logrus.Debug("Loading commands done")
return nil
}
func visitDir(path string) ([]*cobra.Command, error) {
logrus.Debug("Visiting path: ", path)
var cmds []*cobra.Command
if _, err := os.Stat(path); os.IsNotExist(err) {
logrus.Debug("Path does not exist: ", path)
return cmds, nil
}
items, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
for _, item := range items {
switch {
case strings.HasPrefix(item.Name(), "."):
logrus.Debug("Ignoring hidden path: ", filepath.Join(path, item.Name()))
continue
case item.IsDir():
logrus.Debug("Found directory: ", filepath.Join(path, item.Name()))
cmd := &cobra.Command{
Use: fmt.Sprintf("%s [command]", item.Name()),
}
readmePath := filepath.Join(path, item.Name(), "README")
readme, err := ioutil.ReadFile(readmePath)
if err == nil {
logrus.Debug("Found README at: ", readmePath)
cmd.Short = strings.Split(string(readme), "\n")[0]
cmd.Long = string(readme)
cmd.Args = cobra.NoArgs
cmd.RunE = showUsage
}
subcmds, err := visitDir(filepath.Join(path, item.Name()))
if err != nil {
return nil, err
}
for _, i := range subcmds {
cmd.AddCommand(i)
}
if cmd.HasSubCommands() {
logrus.Debug("Directory has scripts (subcommands) inside it: ", filepath.Join(path, item.Name()))
cmd.RunE = showUsage
}
cmds = append(cmds, cmd)
case item.Mode()&0100 != 0:
logrus.Debug("Script found: ", filepath.Join(path, item.Name()))
cmd, err := commandFromScript(filepath.Join(path, item.Name()))
if err != nil {
return nil, err
}
cmds = append(cmds, cmd)
}
}
return cmds, nil
}
func commandFromScript(path string) (*cobra.Command, error) {
shortDesc, err := shortDescriptionFrom(path)
if err != nil {
return nil, err
}
usage, args, err := usageFrom(path)
if err != nil {
return nil, err
}
cmd := &cobra.Command{
Use: usage,
Short: shortDesc,
Annotations: map[string]string{
"Source": path,
},
Args: args,
RunE: execCommand,
}
example, err := exampleFrom(path)
if err != nil {
return nil, err
}
cmd.Example = example
logrus.Debug("Created command: ", filepath.Base(path))
return cmd, nil
}
// these get mocked in tests
var (
syscallExec = syscall.Exec
env = os.Getenv
)
func execCommand(cmd *cobra.Command, args []string) error {
src := cmd.Annotations["Source"]
edit, err := cmd.Root().PersistentFlags().GetBool("edit")
if err != nil {
return err
}
if edit {
editor := env("VISUAL")
if editor == "" {
logrus.Debug("$VISUAL not set, trying $EDITOR...")
editor = env("EDITOR")
if editor == "" {
logrus.Debug("$EDITOR not set, trying $(which vim)...")
editor = "$(command -v vim)"
}
}
cmdline := []string{"sh", "-c", strings.Join([]string{editor, src}, " ")}
logrus.Debug("Running ", cmdline)
return syscallExec("/bin/sh", cmdline, os.Environ())
}
logrus.Debug("Exec: ", src, " with args: ", args)
return syscallExec(src, append([]string{src}, args...), makeEnv(cmd))
}
func makeEnv(cmd *cobra.Command) []string {
out := os.Environ()
out = append(out, fmt.Sprintf("SD_ALIAS=%s", cmd.Root().Use))
if debug, _ := cmd.Root().PersistentFlags().GetBool("debug"); debug {
out = append(out, "DEBUG=true")
}
return out
}
|
[
"\"HOME\"",
"\"SD_PATH\""
] |
[] |
[
"HOME",
"SD_PATH"
] |
[]
|
["HOME", "SD_PATH"]
|
go
| 2 | 0 | |
springdoc-openapi-webmvc-core/src/test/java/test/org/springdoc/api/app105/api/ApiUtil.java
|
/*
*
* * Copyright 2019-2020 the original author or authors.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * https://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package test.org.springdoc.api.app105.api;
import java.io.IOException;
import javax.servlet.http.HttpServletResponse;
import org.springframework.http.HttpStatus;
import org.springframework.web.context.request.NativeWebRequest;
import org.springframework.web.server.ResponseStatusException;
public class ApiUtil {
public static void setExampleResponse(NativeWebRequest req, String contentType, String example) {
try {
req.getNativeResponse(HttpServletResponse.class).addHeader("Content-Type", contentType);
req.getNativeResponse(HttpServletResponse.class).getOutputStream().print(example);
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
public static void checkApiKey(NativeWebRequest req) {
if (!"1".equals(System.getenv("DISABLE_API_KEY")) && !"special-key".equals(req.getHeader("api_key"))) {
throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, "Missing API key!");
}
}
}
|
[
"\"DISABLE_API_KEY\""
] |
[] |
[
"DISABLE_API_KEY"
] |
[]
|
["DISABLE_API_KEY"]
|
java
| 1 | 0 | |
crawler/login.go
|
package crawler
import (
"auto-report/model"
"context"
"errors"
"github.com/devfeel/mapper"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"regexp"
"strings"
)
var (
LTURL = "http://xgsm.hitsz.edu.cn/zhxy-xgzs/xg_mobile/shsj/common"
LOGINURL = "https://sso.hitsz.edu.cn:7002/cas/login?service=http://xgsm.hitsz.edu.cn/zhxy-xgzs/common/casLogin?params=L3hnX21vYmlsZS94c0hvbWU="
POSTID = "http://xgsm.hitsz.edu.cn/zhxy-xgzs/xg_mobile/xs/csh"
REPORT = "http://xgsm.hitsz.edu.cn/zhxy-xgzs/xg_mobile/xs/getYqxx"
COMMITURL = "http://xgsm.hitsz.edu.cn/zhxy-xgzs/xg_mobile/xs/saveYqxx"
JW_Mirror = "https://yes.mzz.pub:7002"
XGSM_Mirror = "http://yes.mzz.pub:7004"
)
func init() {
mapper.Register(&model.ReportData{})
mapper.Register(&model.ModelData{})
}
func getLt(client http.Client) (string, error) {
resp, err := client.Get(LTURL)
if err != nil {
log.Println(err)
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println(err)
return "", err
}
template := regexp.MustCompile(`<input.*?type="hidden".*?value="(.*?)".*?/>`)
lt := template.FindStringSubmatch(string(body))[1]
return lt, nil
}
// set mirror trickily
func setMirror(client http.Client) http.Client {
client.Transport = &http.Transport{DialContext: func(ctx context.Context, network string, addr string) (net.Conn, error) {
_, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
var target string
// tricky
if port == "7002" {
target = JW_Mirror
} else {
target = XGSM_Mirror
}
u, err := url.Parse(target)
if err != nil {
panic(err)
}
ip := u.Hostname()
port = u.Port()
if port == "" {
if u.Scheme == "https" {
port = "443"
} else {
port = "80"
}
}
if net.ParseIP(ip) == nil {
ips, err := net.LookupHost(ip)
if err != nil {
return nil, err
}
ip = ips[0]
}
return net.Dial(network, net.JoinHostPort(ip, port))
}}
return client
}
func login(account, password string) (http.Client, error) {
jar, _ := cookiejar.New(nil)
var client = http.Client{
Jar: jar,
}
// github action
if value := os.Getenv("CI"); value == "true" {
client = setMirror(client)
}
lt, err := getLt(client)
if err != nil {
log.Println(err)
return client, err
}
params := url.Values{
"username": {account},
"password": {password},
"rememberMe": {"on"},
"lt": {lt},
"execution": {"e1s1"},
"_eventId": {"submit"},
"vc_username": {""},
"vc_password": {""},
}
resp, err := client.Post(LOGINURL, "application/x-www-form-urlencoded", strings.NewReader(params.Encode()))
if err != nil {
log.Println(err)
return client, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println(err)
return client, err
}
bodyContent := string(body)
if strings.Contains(bodyContent, "每日上报") {
log.Println("登录成功!")
} else {
log.Println("登录失败!用户名或密码错误!")
return client, errors.New("login error")
}
return client, nil
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.