filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test_pc_vox_refine.py | from __future__ import print_function
import argparse
import random
import torch.backends.cudnn as cudnn
from dataset_img2vox import *
from model import *
from torchvision.utils import save_image
from common import *
parser = argparse.ArgumentParser()
parser.add_argument('--dataRoot', type=str, default='/data/dudong/ShapeNetCore.v1', help='data root path')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--thres', type=float, default=0.5, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=1)
parser.add_argument('--model', type=str, default='checkpoint', help='model path')
parser.add_argument('--test', type=str, default='test_pc_vox_refine', help='test results path')
parser.add_argument('--cat', type=str, default='03001627')
parser.add_argument('--cuda', type=str, default='0')
opt = parser.parse_args()
# cat_set: '03001627', '02691156', '02958343', '04090263', '04256520', '04379243'
os.environ["CUDA_VISIBLE_DEVICES"] = opt.cuda
opt.manualSeed = random.randint(1, 10000) # fix seed
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
# create path
model_path = os.path.join(opt.model, opt.cat)
test_path = os.path.join(opt.test, opt.cat)
if not os.path.exists(test_path):
os.makedirs(test_path)
# Creat testing dataloader
# using different point cloud data
dataset_test = ShapeNet(img_root=os.path.join(opt.dataRoot, 'renderingimg'),
vox_root=os.path.join(opt.dataRoot, 'vox64'),
filelist_root=os.path.join(opt.dataRoot, 'train_val_test_list'),
cat=opt.cat, mode='test', view_pick=True)
dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers))
len_dataset = len(dataset_test)
print('testing set num', len_dataset)
# Create network
network_img2vox = VoxelNetwork64(z_dim=128, gf_dim=128)
network_img2vox.cuda()
network_img2vox.load_state_dict(torch.load(os.path.join(model_path, 'img2vox64.pt')))
network_img2vox.eval()
network_occnet = OccupancyNetwork()
network_occnet.cuda()
network_occnet.load_state_dict(torch.load(os.path.join(opt.model, 'occnet.pt')))
network_occnet.eval()
network_img2pc = PointSetGenerationNetwork(z_dim=256, n_pc=1024)
network_img2pc.cuda()
network_img2pc.load_state_dict(torch.load(os.path.join(model_path, 'img2pc.pt')))
network_img2pc.eval()
network_vox_refine = VoxelRefineNetwork()
network_vox_refine.cuda()
network_vox_refine.load_state_dict(torch.load(os.path.join(model_path, 'vox64_refine.pt')))
network_vox_refine.eval()
# Create Loss Module
criterion = nn.BCEWithLogitsLoss(pos_weight=torch.FloatTensor([2.]).cuda()) # nn.BCEWithLogitsLoss()
with torch.no_grad():
for i, data in enumerate(dataloader_test, 0):
img, vox_gt, name, view_id = data
img = img.cuda()
vox_gt = vox_gt.cuda()
pc = network_img2pc(img)
vox_init, vox_init_sigmoid = network_img2vox(img)
pc_occ_sigmoid = network_occnet.predict(img, pc)
vox_update = network_vox_refine.voxel_updater(vox_init_sigmoid, pc, pc_occ_sigmoid)
vox_ref, vox_ref_sigmoid = network_vox_refine(vox_update)
loss_init = criterion(vox_init, vox_gt)
loss_ref = criterion(vox_ref, vox_gt)
vox_init_sigmoid = vox_init_sigmoid[0, 0, :, :, :]
vox_init_sigmoid = vox_init_sigmoid.cpu().data.squeeze().numpy() + (1-opt.thres)
vox_init_sigmoid = np.array(vox_init_sigmoid, dtype=np.uint8).reshape((64, 64, 64))
vox_ref_sigmoid = vox_ref_sigmoid[0, 0, :, :, :]
vox_ref_sigmoid = vox_ref_sigmoid.cpu().data.squeeze().numpy() + (1-opt.thres)
vox_ref_sigmoid = np.array(vox_ref_sigmoid, dtype=np.uint8).reshape((64, 64, 64))
vox_gt = vox_gt.cpu().data.squeeze().numpy()
vox_gt = np.array(vox_gt, dtype=np.uint8).reshape((64, 64, 64))
save_image(img.squeeze(0).cpu(), os.path.join(test_path, name[0] + '_' + view_id[0] + '.png'))
pc = np.array(pc.cpu().data.squeeze().numpy()).reshape((-1, 3))
write_pts_obj(pc, os.path.join(test_path, name[0] + '_' + view_id[0] + '_pre.obj'))
write_binvox_file(vox_init_sigmoid, os.path.join(test_path, name[0] + '_' + view_id[0] + '_init.binvox'))
write_binvox_file(vox_ref_sigmoid, os.path.join(test_path, name[0] + '_' + view_id[0] + '_ref.binvox'))
write_binvox_file(vox_gt, os.path.join(test_path, name[0] + '_' + view_id[0] + '_gt.binvox'))
print('testing %s, view name %s, loss_init %f, loss_ref %f' %
(name[0], view_id[0], loss_init.item(), loss_ref.item()))
if i > 19:
break
print('Testing done!')
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
qa/rpc-tests/util.py | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The Agon developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "agon.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
agond and agon-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run agond:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "agond"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "agon-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in agon.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a agond and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "agond"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "agon-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple agonds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| []
| []
| [
"BITCOINCLI",
"BITCOIND"
]
| [] | ["BITCOINCLI", "BITCOIND"] | python | 2 | 0 | |
tests/zpill.py | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta, tzinfo
import fnmatch
import json
import unittest
import os
import shutil
import zipfile
import boto3
from botocore.response import StreamingBody
import jmespath
from placebo import pill
import placebo
from six import StringIO
###########################################################################
# BEGIN PLACEBO MONKEY PATCH
#
# Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony...
# These monkeypatch patches represent fixes on trunk of that repo that have not been released
# into an extant version, we carry them here. We can drop this when this issue is resolved
#
# https://github.com/garnaat/placebo/issues/63
#
# License - Apache 2.0
# Copyright (c) 2015 Mitch Garnaat
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if "__class__" in target:
class_name = target.pop("__class__")
if "__module__" in obj:
obj.pop("__module__")
# Use getattr(module, class_name) for custom types if needed
if class_name == "datetime":
return datetime(tzinfo=utc, **target)
if class_name == "StreamingBody":
return StringIO(target["body"])
# Return unrecognized structures as-is
return obj
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {"__class__": obj.__class__.__name__}
try:
result["__module__"] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime):
result["year"] = obj.year
result["month"] = obj.month
result["day"] = obj.day
result["hour"] = obj.hour
result["minute"] = obj.minute
result["second"] = obj.second
result["microsecond"] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result["body"] = obj.read()
obj._raw_stream = StringIO(result["body"])
obj._amount_read = 0
return result
if isinstance(obj, bytes):
return obj.decode('utf8')
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable")
placebo.pill.serialize = serialize
placebo.pill.deserialize = deserialize
# END PLACEBO MONKEY
##########################################################################
class BluePill(pill.Pill):
def playback(self):
super(BluePill, self).playback()
self._avail = self.get_available()
def get_available(self):
return set(
[
os.path.join(self.data_path, n)
for n in fnmatch.filter(os.listdir(self.data_path), "*.json")
]
)
def get_next_file_path(self, service, operation):
fn = super(BluePill, self).get_next_file_path(service, operation)
# couple of double use cases
if fn in self._avail:
self._avail.remove(fn)
else:
print("\ndouble use %s\n" % fn)
return fn
def stop(self):
result = super(BluePill, self).stop()
if self._avail:
print("Unused json files \n %s" % ("\n".join(sorted(self._avail))))
return result
class ZippedPill(pill.Pill):
def __init__(self, path, prefix=None, debug=False):
super(ZippedPill, self).__init__(prefix, debug)
self.path = path
self._used = set()
self.archive = None
def playback(self):
self.archive = zipfile.ZipFile(self.path, "r")
self._files = set(self.archive.namelist())
return super(ZippedPill, self).playback()
def record(self):
self.archive = zipfile.ZipFile(self.path, "a", zipfile.ZIP_DEFLATED)
self._files = set()
files = set([n for n in self.archive.namelist() if n.startswith(self.prefix)])
if not files:
return super(ZippedPill, self).record()
# We can't update files in a zip, so copy
self.archive.close()
os.rename(self.path, "%s.tmp" % self.path)
src = zipfile.ZipFile("%s.tmp" % self.path, "r")
self.archive = zipfile.ZipFile(self.path, "w", zipfile.ZIP_DEFLATED)
for n in src.namelist():
if n in files:
continue
self.archive.writestr(n, src.read(n))
os.remove("%s.tmp" % self.path)
return super(ZippedPill, self).record()
def stop(self):
super(ZippedPill, self).stop()
if self.archive:
self.archive.close()
def save_response(self, service, operation, response_data, http_response=200):
filepath = self.get_new_file_path(service, operation)
pill.LOG.debug("save_response: path=%s", filepath)
json_data = {"status_code": http_response, "data": response_data}
self.archive.writestr(
filepath,
json.dumps(json_data, indent=4, default=pill.serialize),
zipfile.ZIP_DEFLATED,
)
self._files.add(filepath)
def load_response(self, service, operation):
response_file = self.get_next_file_path(service, operation)
self._used.add(response_file)
pill.LOG.debug("load_responses: %s", response_file)
response_data = json.loads(
self.archive.read(response_file), object_hook=pill.deserialize
)
return (
pill.FakeHttpResponse(response_data["status_code"]), response_data["data"]
)
def get_new_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_new_file_path: %s", base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + "*")
for file_path in fnmatch.filter(self._files, glob_pattern):
file_name = os.path.basename(file_path)
m = self.filename_re.match(file_name)
if m:
i = int(m.group("index"))
if i > index:
index = i
index += 1
return os.path.join(self._data_path, "{0}_{1}.json".format(base_name, index))
def get_next_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_next_file_path: %s", base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(self._data_path, base_name + "_{0}.json".format(index))
if fn in self._files:
next_file = fn
self._index[base_name] += 1
self._files.add(fn)
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError("response file ({0}) not found".format(fn))
return fn
def attach(session, data_path, prefix=None, debug=False):
pill = ZippedPill(data_path, prefix=prefix, debug=debug)
pill.attach(session, prefix)
return pill
class PillTest(unittest.TestCase):
archive_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "placebo_data.zip"
)
placebo_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "placebo"
)
output_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "output"
)
recording = False
def assertJmes(self, expr, instance, expected):
value = jmespath.search(expr, instance)
self.assertEqual(value, expected)
def cleanUp(self):
self.pill = None
def record_flight_data(self, test_case, zdata=False, augment=False):
self.recording = True
test_dir = os.path.join(self.placebo_dir, test_case)
if not (zdata or augment):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir)
session = boto3.Session()
default_region = session.region_name
if not zdata:
pill = placebo.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, debug=True)
pill.record()
self.pill = pill
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
def factory(region=None, assume=None):
if region and region != default_region:
new_session = boto3.Session(region_name=region)
assert not zdata
new_pill = placebo.attach(new_session, test_dir, debug=True)
new_pill.record()
self.addCleanup(new_pill.stop)
return new_session
return session
return factory
def replay_flight_data(self, test_case, zdata=False, region=None):
"""
The `region` argument is to allow functional tests to override the
default region. It is unused when replaying stored data.
"""
if os.environ.get("C7N_FUNCTIONAL") == "yes":
self.recording = True
return lambda region=region, assume=None: boto3.Session(region_name=region)
if not zdata:
test_dir = os.path.join(self.placebo_dir, test_case)
if not os.path.exists(test_dir):
raise RuntimeError("Invalid Test Dir for flight data %s" % test_dir)
session = boto3.Session()
if not zdata:
pill = placebo.attach(session, test_dir)
# pill = BluePill()
# pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, False)
pill.playback()
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
return lambda region=None, assume=None: session
| []
| []
| [
"C7N_FUNCTIONAL"
]
| [] | ["C7N_FUNCTIONAL"] | python | 1 | 0 | |
core/git.go | package core
import (
"fmt"
"github.com/j-martin/bub/utils"
"github.com/manifoldco/promptui"
"github.com/pkg/errors"
"io/ioutil"
"log"
"os"
"path"
"regexp"
"strings"
"sync"
"text/tabwriter"
)
type Git struct {
cfg *Configuration
dir string
}
type GitCommit struct {
Hash, Committer, Subject, Body string
}
type RepoOperation func(string) (string, error)
func InitGit() *Git {
return &Git{}
}
func MustInitGit(repoDir string) *Git {
if repoDir == "" {
repoDir = "."
}
return &Git{dir: repoDir}
}
func (g *Git) RunGit(args ...string) error {
if g.dir != "" {
args = append([]string{"-C", g.dir}, args...)
}
log.Printf("Running: 'git %v'", strings.Join(args, " "))
return utils.RunCmd("git", args...)
}
func (g *Git) RunGitWithStdout(args ...string) (string, error) {
if g.dir != "" {
args = append([]string{"-C", g.dir}, args...)
}
return utils.RunCmdWithStdout("git", args...)
}
func (g *Git) RunGitWithFullOutput(args ...string) (string, error) {
if g.dir != "" {
args = append([]string{"-C", g.dir}, args...)
}
return utils.RunCmdWithFullOutput("git", args...)
}
func (g *Git) MustRunGitWithStdout(args ...string) string {
output, err := g.RunGitWithStdout(args...)
if err != nil {
log.Fatalf("Git failed: %v", err)
}
return output
}
func (g *Git) GetCurrentRepositoryName() string {
repositoryUri := g.MustRunGitWithStdout("config", "--get", "remote.origin.url")
return strings.TrimSuffix(path.Base(repositoryUri), path.Ext(repositoryUri))
}
func (g *Git) GetCurrentBranch() string {
result, err := g.RunGitWithStdout("symbolic-ref", "--short", "-q", "HEAD")
if err != nil {
// if on jenkins the HEAD is usually detached, but you can infer the branch name.
branchEnv := os.Getenv("BRANCH_NAME")
if branchEnv != "" {
log.Printf("Could not get branch name from git: %v", err)
log.Printf("Inferring from environment variables: %v", branchEnv)
}
return branchEnv
}
return strings.Trim(string(result), "\n ")
}
func (g *Git) GetRepositoryRootPath() (string, error) {
return g.RunGitWithStdout("rev-parse", "--show-toplevel")
}
func (g *Git) GetTitleFromBranchName() string {
branch := g.GetCurrentBranch()
return strings.Replace(strings.Replace(strings.Replace(branch, "-", "_", 1), "-", " ", -1), "_", "-", -1)
}
func (g *Git) Clone() (string, error) {
log.Printf("Cloning: %v", g.dir)
return utils.RunCmdWithFullOutput("git", "clone", "[email protected]:benchlabs/"+g.dir+".git")
}
func (g *Git) Push(cfg *Configuration) error {
args := []string{"push", "--set-upstream", "origin", g.GetCurrentBranch()}
if cfg.Git.NoVerify {
args = append(args, "--no-verify")
}
return g.RunGit(args...)
}
func (g *Git) Sync(unStash bool) (string, error) {
commands := [][]string{
{"reset", "HEAD", g.dir},
}
dirtyTree := g.RunGit("diff-index", "--quiet", "HEAD", "--") != nil
if dirtyTree {
commands = append(commands, [][]string{
{"checkout", "master", "-f"},
{"stash", "save", "pre-update-" + utils.CurrentTimeForFilename()},
}...)
}
commands = append(commands, [][]string{
{"checkout", "master", "-f"},
{"clean", "-fd"},
{"checkout", "master", "."},
{"pull"},
{"pull", "--tags"},
}...)
if dirtyTree && unStash {
commands = append(commands, []string{"stash", "apply"})
}
for _, cmd := range commands {
out, err := g.RunGitWithFullOutput(cmd...)
if err != nil {
return out, err
}
}
return "", nil
}
type ConcurrentResult struct {
Output string
Err error
}
type ConcurrentResults map[string]ConcurrentResult
func ConcurrentRepositoryOperations(repos []string, fn RepoOperation) error {
var wg sync.WaitGroup
var mutex sync.Mutex
errs := ConcurrentResults{}
for _, r := range repos {
log.Printf("Sync: %v", r)
wg.Add(1)
go func(repo string) {
defer wg.Done()
output, err := fn(repo)
mutex.Lock()
errs[repo] = ConcurrentResult{Output: output, Err: err}
mutex.Unlock()
log.Printf("%v: done.", repo)
}(r)
}
wg.Wait()
errorCount := 0
for repo, result := range errs {
fmt.Println(result.Output)
if result.Err != nil {
errorCount++
log.Printf("%v failed to be updated: %v", repo, result.Err)
}
}
if errorCount > 0 {
log.Printf("%v repos failed to be updated.", errorCount)
return errors.New("some repos failed to update")
}
log.Print("All Done.")
return nil
}
func (g *Git) syncRepository() (string, error) {
repositoryExists, _ := utils.PathExists(g.dir)
if repositoryExists {
return g.Sync(true)
} else {
return g.Clone()
}
}
func (g *Git) Log() (commits []*GitCommit) {
output := strings.Split(g.MustRunGitWithStdout("log", "--pretty=format:%h||~||%an||~||%s||~||%b|~~~~~|"), "|~~~~~|\n")
for _, line := range output {
if len(line) == 0 {
continue
}
fields := strings.Split(line, "||~||")
commits = append(commits, &GitCommit{Hash: fields[0], Committer: fields[1], Subject: fields[2], Body: fields[3]})
}
return commits
}
func (g *Git) PendingChanges(cfg *Configuration, manifest *Manifest, previousVersion, currentVersion string, formatForSlack bool, noAt bool) {
table := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
output := g.MustRunGitWithStdout("log", "--first-parent", "--pretty=format:%h\t\t%an\t%s", previousVersion+"..."+currentVersion)
if formatForSlack {
re := g.GetIssueRegex()
output = re.ReplaceAllString(output, "<https://"+cfg.JIRA.Server+"/browse/$1|$1>")
re = g.GetPRRegex()
output = re.ReplaceAllString(output, "<https://github.com/"+cfg.GitHub.Organization+"/"+manifest.Repository+"/pull/$2|PR#$2> ")
re = regexp.MustCompile("(?m:^)([a-z0-9]{6,})")
output = re.ReplaceAllString(output, "<https://github.com/"+cfg.GitHub.Organization+"/"+manifest.Repository+"/commit/$1|$1>")
}
fmt.Fprintln(table, output)
table.Flush()
if !noAt {
committerSlackArr := g.committerSlackReference(cfg, previousVersion, currentVersion)
if formatForSlack {
fmt.Print("\n" + strings.Join(committerSlackArr, ", "))
}
}
}
func (g *Git) GetPRRegex() *regexp.Regexp {
return regexp.MustCompile("(Merge pull request #)(\\d+) from \\w+/")
}
func (g *Git) GetIssueRegex() *regexp.Regexp {
return regexp.MustCompile("([A-Z]{2,}-\\d+)")
}
func (g *Git) PickCommit(commits []*GitCommit) (*GitCommit, error) {
templates := &promptui.SelectTemplates{
Label: "{{ . }}:",
Active: "▶ {{ .Hash }} {{ .Subject }}",
Inactive: " {{ .Hash }} {{ .Subject }}",
Selected: "▶ {{ .Hash }} {{ .Subject }}",
Details: `
{{ .Hash }}
{{ .Committer }}
{{ .Subject }}
{{ .Body }}
`,
}
searcher := func(input string, index int) bool {
i := commits[index]
name := strings.Replace(strings.ToLower(i.Subject), " ", "", -1)
input = strings.Replace(strings.ToLower(input), " ", "", -1)
return strings.Contains(name, input)
}
prompt := promptui.Select{
Size: 20,
Label: "Pick commit",
Items: commits,
Templates: templates,
Searcher: searcher,
StartInSearchMode: true,
}
i, _, err := prompt.Run()
return commits[i], err
}
func (g *Git) FetchTags() error {
return g.RunGit("fetch", "--tags")
}
func (g *Git) Fetch() error {
return g.RunGit("fetch")
}
func (g *Git) sanitizeBranchName(name string) string {
r := regexp.MustCompile("[^a-zA-Z0-9]+")
r2 := regexp.MustCompile("-+")
return strings.Trim(r2.ReplaceAllString(r.ReplaceAllString(name, "-"), "-"), "-")
}
func (g *Git) LogNotInMasterSubjects() []string {
return strings.Split(g.MustRunGitWithStdout("log", "HEAD", "--not", "origin/master", "--no-merges", "--pretty=format:%s"), "\n")
}
func (g *Git) LogNotInMasterBody() string {
return g.MustRunGitWithStdout("log", "HEAD", "--not", "origin/master", "--no-merges", "--pretty=format:-> %B")
}
func (g *Git) ListFileChanged() []string {
return strings.Split(g.MustRunGitWithStdout("diff", "HEAD", "--not", "origin/master", "--name-only"), "\n")
}
func (g *Git) GetIssueKeyFromBranch() string {
return g.extractIssueKeyFromName(g.GetCurrentBranch())
}
func (g *Git) CommitWithBranchName() error {
return g.RunGit("commit", "-m", g.GetTitleFromBranchName(), "--all")
}
func (g *Git) CurrentHEAD() (string, error) {
return g.RunGitWithStdout("rev-parse", "HEAD")
}
func (g *Git) CommitWithIssueKey(cfg *Configuration, message string, extraArgs []string) error {
issueKey := g.GetIssueKeyFromBranch()
if message == "" {
title := g.GetTitleFromBranchName()
pos := strings.Index(title, " ")
if pos < 0 {
return errors.New("commit message could not be inferred from branch name")
}
message = title[pos:]
}
message = strings.Trim(message, " ")
if len(message) == 0 {
return errors.New("no commit message passed or could not be inferred from branch name")
}
if issueKey != "" {
message = issueKey + " " + message
}
args := []string{
"commit", "-m", message,
}
if cfg.Git.NoVerify {
args = append(args, "--no-verify")
}
args = append(args, extraArgs...)
return g.RunGit(args...)
}
func (g *Git) extractIssueKeyFromName(name string) string {
return g.GetIssueRegex().FindString(name)
}
func (g *Git) CreateBranch(name string) error {
name = g.sanitizeBranchName(name)
return g.RunGit("checkout", "-b", name, "origin/master")
}
func (g *Git) ForceCreateBranch(name string) error {
name = g.sanitizeBranchName(name)
return g.RunGit("checkout", "-B", name, "origin/master")
}
func (g *Git) CheckoutBranch() error {
item, err := utils.PickItem("Pick a branch", g.getBranches())
if err != nil {
return err
}
return g.RunGit("checkout", item)
}
func ForEachRepo(fn RepoOperation) error {
var repos []string
files, err := ioutil.ReadDir("./")
if err != nil {
return err
}
for _, value := range files {
if !value.IsDir() {
continue
}
if !utils.IsRepository(value.Name()) {
continue
}
repos = append(repos, value.Name())
}
return ConcurrentRepositoryOperations(repos, fn)
}
func (g *Git) getBranches() []string {
output := g.MustRunGitWithStdout("branch", "--all", "--sort=-committerdate")
var branches []string
for _, b := range strings.Split(output, "\n") {
b = strings.TrimPrefix(strings.Trim(b, " "), "* ")
if b == "" {
continue
}
branches = append(branches, b)
}
return branches
}
func (g *Git) committerSlackReference(cfg *Configuration, previousVersion string, currentVersion string) []string {
committerMapping := make(map[string]string)
for _, i := range cfg.Users {
committerMapping[i.Name] = i.Slack
}
committersStdout := g.MustRunGitWithStdout("log", "--first-parent", "--pretty=format:%an", previousVersion+"..."+currentVersion)
committersSlackMapping := make(map[string]string)
for _, commiterName := range strings.Split(committersStdout, "\n") {
slackUserName := committerMapping[commiterName]
if slackUserName == "" {
slackUserName = commiterName
} else {
slackUserName = "@" + slackUserName
}
committersSlackMapping[commiterName] = slackUserName
}
var committerSlackArr []string
for _, v := range committersSlackMapping {
committerSlackArr = append(committerSlackArr, v)
}
return committerSlackArr
}
func (g *Git) ContainedUncommittedChanges() bool {
return utils.HasNonEmptyLines(strings.Split(g.MustRunGitWithStdout("status", "--short"), "\n"))
}
func (g *Git) IsDifferentFromMaster() bool {
return utils.HasNonEmptyLines(g.LogNotInMasterSubjects())
}
func (g *Git) ISDirty() bool {
return g.RunGit("diff-index", "--quiet", "HEAD", "--") != nil
}
func (g *Git) Diff() (string, error) {
if !g.IsDifferentFromMaster() {
return "", nil
}
return g.RunGitWithFullOutput("--no-pager", "diff")
}
| [
"\"BRANCH_NAME\""
]
| []
| [
"BRANCH_NAME"
]
| [] | ["BRANCH_NAME"] | go | 1 | 0 | |
cmd/km/main.go | package main
import (
"context"
"flag"
"fmt"
"github.com/bsycorp/keymaster/km/api"
"github.com/bsycorp/keymaster/km/workflow"
"github.com/pkg/errors"
"gopkg.in/ini.v1"
"io/ioutil"
"log"
"os"
"runtime"
"time"
)
var targetFlag = flag.String("target", "", "target issuer")
var roleFlag = flag.String("role", "", "target role")
var debugFlag = flag.Int("debug", 0, "enable debugging")
var debugLevel = 0
func main() {
flag.Parse()
// create km directory
kmDirectory := fmt.Sprintf("%s/.km", UserHomeDir())
if err := os.MkdirAll(kmDirectory, 0700); err != nil {
log.Println("Failed to create ~/.km directory: ", err)
}
if *roleFlag == "" {
log.Fatalln("Required argument role missing (need -role)")
}
if *targetFlag == "" {
log.Fatalln("Required argument taget is missing (need -target)")
}
debugLevel = *debugFlag
// Draft workflow
// First, get the config
// target := "arn:aws:lambda:ap-southeast-2:062921715532:function:km2"
//target := "arn:aws:apigateway:ap-southeast-2:lambda:path/2015-03-31/functions/arn:aws:lambda:ap-southeast-2:218296299700:function:km2/invocations"
target := *targetFlag
kmApi := api.NewClient(target)
kmApi.Debug = debugLevel
discoveryReq := new(api.DiscoveryRequest)
_, err := kmApi.Discovery(discoveryReq)
if err != nil {
log.Fatal(errors.Wrap(err, "error calling kmApi.Discovery"))
}
configReq := new(api.ConfigRequest)
configResp, err := kmApi.GetConfig(configReq)
if err != nil {
log.Fatal(errors.Wrap(err, "error calling kmApi.GetConfig"))
}
// Now start workflow to get nonce
kmWorkflowStartResponse, err := kmApi.WorkflowStart(&api.WorkflowStartRequest{})
if err != nil {
log.Fatal(errors.Wrap(err, "error calling kmApi.WorkflowStart"))
}
log.Println("Started workflow with km api")
log.Println("Target role for authentication:", *roleFlag)
targetRole := configResp.Config.FindRoleByName(*roleFlag)
if targetRole == nil {
log.Fatalf("Target role #{roleFlag} not found in config")
}
workflowPolicyName := targetRole.Workflow
configWorkflowPolicy := configResp.Config.Workflow.FindPolicyByName(workflowPolicyName)
if configWorkflowPolicy == nil {
log.Fatalf("workflow policy %s not found in config", workflowPolicyName)
}
workflowPolicy := workflow.Policy{
Name: configWorkflowPolicy.Name,
IdpName: configWorkflowPolicy.IdpName,
RequesterCanApprove: configWorkflowPolicy.RequesterCanApprove,
IdentifyRoles: configWorkflowPolicy.IdentifyRoles,
ApproverRoles: configWorkflowPolicy.ApproverRoles,
}
workflowBaseUrl := configResp.Config.Workflow.BaseUrl
log.Println("Using workflow engine:", workflowBaseUrl)
workflowApi, err := workflow.NewClient(workflowBaseUrl)
if err != nil {
log.Fatal(err)
}
workflowApi.Debug = debugLevel
// And start a workflow session
startResult, err := workflowApi.Create(context.Background(), &workflow.CreateRequest{
IdpNonce: kmWorkflowStartResponse.IdpNonce,
Requester: workflow.Requester{
Name: "Blair Strang",
Username: "strangb",
Email: "[email protected]",
},
Source: workflow.Source{
Description: "Deploy a new version 3.2 with amazing features",
DetailsURI: "https://gitlab.com/platform/keymaster",
},
Target: workflow.Target{
EnvironmentName: configResp.Config.Name,
EnvironmentDiscoveryURI: target,
},
Policy: workflowPolicy,
})
if err != nil {
log.Fatal(err)
}
// Now fix up the workflow URL
log.Printf("------------------------------------------------------------------")
log.Printf("******************************************************************")
log.Printf("APPROVAL URL: %s", startResult.WorkflowUrl)
log.Printf("******************************************************************")
log.Printf("------------------------------------------------------------------")
// Poll for assertions
var getAssertionsResult *workflow.GetAssertionsResponse
for {
getAssertionsResult, err = workflowApi.GetAssertions(context.Background(), &workflow.GetAssertionsRequest{
WorkflowId: startResult.WorkflowId,
WorkflowNonce: startResult.WorkflowNonce,
})
if err != nil {
log.Println(errors.Wrap(err, "error calling workflowApi.GetAssertions"))
}
log.Printf("workflow state: %s", getAssertionsResult.Status)
if getAssertionsResult.Status == "CREATED" {
time.Sleep(5 * time.Second)
} else if getAssertionsResult.Status == "COMPLETED" {
break
} else if getAssertionsResult.Status == "REJECTED" {
log.Fatal("Your change request was REJECTED by a workflow approver. Exiting.")
} else {
log.Fatal("unexpected assertions result status:", getAssertionsResult.Status)
}
}
log.Printf("got: %d assertions from workflow", len(getAssertionsResult.Assertions))
creds, err := kmApi.WorkflowAuth(&api.WorkflowAuthRequest{
Username: "gitlab", // TODO
Role: "deployment",
IdpNonce: kmWorkflowStartResponse.IdpNonce,
IssuingNonce: kmWorkflowStartResponse.IssuingNonce,
Assertions: getAssertionsResult.Assertions,
})
if err != nil {
log.Fatal(errors.Wrap(err, "error calling kmApi.WorkflowAuth"))
}
var iamCred *api.Cred
for _, cred := range creds.Credentials {
if cred.Type == "iam" {
iamCred = &cred
break
}
}
if iamCred == nil {
log.Fatal("Got creds but no IAM cred?")
}
iamCredValue, ok := iamCred.Value.(*api.IAMCred)
if !ok {
log.Fatal("oops IAM cred is wrong type?")
}
awsCredsFmt := `[%s]
aws_access_key_id = %s
aws_secret_access_key = %s
aws_session_token = %s
# Keymaster issued, expires: %s
`
exp := time.Unix(iamCred.Expiry, 0)
localAwsCreds := fmt.Sprintf(
awsCredsFmt,
iamCredValue.ProfileName,
iamCredValue.AccessKeyId,
iamCredValue.SecretAccessKey,
iamCredValue.SessionToken,
exp,
)
awsCredentialsPath := UserHomeDir() + "/.aws/credentials"
existingCreds, err := ioutil.ReadFile(awsCredentialsPath)
if err != nil {
fmt.Printf("Failed to update local credentials: %v", err)
} else {
log.Printf("Found existing credentials file, appending..")
awsCredentialsIni, err := ini.Load(existingCreds, []byte(localAwsCreds))
if err != nil {
fmt.Printf("Failed to read existing local credentials: %v", err)
} else {
err = awsCredentialsIni.SaveTo(awsCredentialsPath)
if err != nil {
fmt.Printf("Failed to update local credentials: %v", err)
}
}
}
}
func UserHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
func WriteFile(data []byte, localPath string, perm os.FileMode) {
log.Printf("Writing local file: %s", localPath)
err := ioutil.WriteFile(localPath, data, perm)
if err != nil {
log.Fatalf("Failed to write local file: %s: %s", localPath, err)
}
if err := FixWindowsPerms(localPath); err != nil {
log.Fatalf("Failed to set file permissions: %s: %s", localPath, err)
}
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
]
| [] | ["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"] | go | 4 | 0 | |
pkg/controllers/provisioningv2/rke2/machineprovision/args.go | package machineprovision
import (
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1"
"github.com/rancher/rancher/pkg/controllers/management/drivers"
"github.com/rancher/rancher/pkg/controllers/provisioningv2/rke2"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
namespace2 "github.com/rancher/rancher/pkg/namespace"
"github.com/rancher/rancher/pkg/settings"
"github.com/rancher/wrangler/pkg/data"
"github.com/rancher/wrangler/pkg/data/convert"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/kv"
name2 "github.com/rancher/wrangler/pkg/name"
corev1 "k8s.io/api/core/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
capi "sigs.k8s.io/cluster-api/api/v1beta1"
)
var (
regExHyphen = regexp.MustCompile("([a-z])([A-Z])")
envNameOverride = map[string]string{
"amazonec2": "AWS",
"rackspace": "OS",
"openstack": "OS",
"vmwarevsphere": "VSPHERE",
"vmwarefusion": "FUSION",
"vmwarevcloudair": "VCLOUDAIR",
}
)
type driverArgs struct {
rkev1.RKEMachineStatus
DriverName string
ImageName string
MachineName string
MachineNamespace string
MachineGVK schema.GroupVersionKind
ImagePullPolicy corev1.PullPolicy
EnvSecret *corev1.Secret
FilesSecret *corev1.Secret
StateSecretName string
BootstrapSecretName string
BootstrapRequired bool
Args []string
BackoffLimit int32
}
func (h *handler) getArgsEnvAndStatus(infraObj *infraObject, args map[string]interface{}, driver string, create bool) (driverArgs, error) {
var (
url, hash, cloudCredentialSecretName string
jobBackoffLimit int32
filesSecret *corev1.Secret
)
if infraObj.data.String("spec", "providerID") != "" && !infraObj.data.Bool("status", "jobComplete") {
// If the providerID is set, but jobComplete is false, then we need to re-enqueue the job so the proper status is set from that handler.
job, err := h.getJobFromInfraMachine(infraObj)
if err != nil {
return driverArgs{}, err
}
h.jobController.Enqueue(infraObj.meta.GetNamespace(), job.Name)
return driverArgs{}, generic.ErrSkip
}
nd, err := h.nodeDriverCache.Get(driver)
if !create && apierror.IsNotFound(err) {
url = infraObj.data.String("status", "driverURL")
hash = infraObj.data.String("status", "driverHash")
} else if err != nil {
return driverArgs{}, err
} else if !strings.HasPrefix(nd.Spec.URL, "local://") {
url = getDriverDownloadURL(nd)
hash = nd.Spec.Checksum
}
envSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name2.SafeConcatName(infraObj.meta.GetName(), "machine", "driver", "secret"),
Namespace: infraObj.meta.GetNamespace(),
},
Data: map[string][]byte{
"HTTP_PROXY": []byte(os.Getenv("HTTP_PROXY")),
"HTTPS_PROXY": []byte(os.Getenv("HTTPS_PROXY")),
"NO_PROXY": []byte(os.Getenv("NO_PROXY")),
},
}
machine, err := rke2.GetMachineByOwner(h.machines, infraObj.meta)
if err != nil {
return driverArgs{}, err
}
bootstrapName, cloudCredentialSecretName, secrets, err := h.getSecretData(machine, infraObj.data, create)
if err != nil {
return driverArgs{}, err
}
for k, v := range secrets {
_, k = kv.RSplit(k, "-")
envName := envNameOverride[driver]
if envName == "" {
envName = driver
}
k := strings.ToUpper(envName + "_" + regExHyphen.ReplaceAllString(k, "${1}_${2}"))
envSecret.Data[k] = []byte(v)
}
secretName := rke2.MachineStateSecretName(infraObj.meta.GetName())
cmd := []string{
fmt.Sprintf("--driver-download-url=%s", url),
fmt.Sprintf("--driver-hash=%s", hash),
fmt.Sprintf("--secret-namespace=%s", infraObj.meta.GetNamespace()),
fmt.Sprintf("--secret-name=%s", secretName),
}
if create {
cmd = append(cmd, "create",
fmt.Sprintf("--driver=%s", driver),
fmt.Sprintf("--custom-install-script=/run/secrets/machine/value"))
rancherCluster, err := h.rancherClusterCache.Get(infraObj.meta.GetNamespace(), infraObj.meta.GetLabels()[capi.ClusterLabelName])
if err != nil {
return driverArgs{}, err
}
// The files secret must be constructed before toArgs is called because
// constructFilesSecret replaces file contents and creates a secret to be passed as a volume.
filesSecret = constructFilesSecret(driver, args)
cmd = append(cmd, toArgs(driver, args, rancherCluster.Status.ClusterName)...)
} else {
cmd = append(cmd, "rm", "-y")
jobBackoffLimit = 3
}
// cloud-init will split the hostname on '.' and set the hostname to the first chunk. This causes an issue where all
// nodes in a machine pool may have the same node name in Kubernetes. Converting the '.' to '-' here prevents this.
cmd = append(cmd, strings.ReplaceAll(infraObj.meta.GetName(), ".", "-"))
return driverArgs{
DriverName: driver,
MachineName: infraObj.meta.GetName(),
MachineNamespace: infraObj.meta.GetNamespace(),
MachineGVK: infraObj.obj.GetObjectKind().GroupVersionKind(),
ImageName: settings.PrefixPrivateRegistry(settings.MachineProvisionImage.Get()),
ImagePullPolicy: corev1.PullAlways,
EnvSecret: envSecret,
FilesSecret: filesSecret,
StateSecretName: secretName,
BootstrapSecretName: bootstrapName,
BootstrapRequired: create,
Args: cmd,
BackoffLimit: jobBackoffLimit,
RKEMachineStatus: rkev1.RKEMachineStatus{
Ready: infraObj.data.String("spec", "providerID") != "" && infraObj.data.Bool("status", "jobComplete"),
DriverHash: hash,
DriverURL: url,
CloudCredentialSecretName: cloudCredentialSecretName,
},
}, nil
}
func (h *handler) getBootstrapSecret(machine *capi.Machine) (string, error) {
if machine == nil || machine.Spec.Bootstrap.ConfigRef == nil {
return "", nil
}
gvk := schema.FromAPIVersionAndKind(machine.Spec.Bootstrap.ConfigRef.APIVersion,
machine.Spec.Bootstrap.ConfigRef.Kind)
bootstrap, err := h.dynamic.Get(gvk, machine.Namespace, machine.Spec.Bootstrap.ConfigRef.Name)
if apierror.IsNotFound(err) {
return "", nil
} else if err != nil {
return "", err
}
d, err := data.Convert(bootstrap)
if err != nil {
return "", err
}
return d.String("status", "dataSecretName"), nil
}
func (h *handler) getSecretData(machine *capi.Machine, obj data.Object, create bool) (string, string, map[string]string, error) {
var (
err error
result = map[string]string{}
)
oldCredential := obj.String("status", "cloudCredentialSecretName")
cloudCredentialSecretName := obj.String("spec", "common", "cloudCredentialSecretName")
if machine == nil && create {
return "", "", nil, generic.ErrSkip
}
if cloudCredentialSecretName == "" {
cloudCredentialSecretName = oldCredential
}
if cloudCredentialSecretName != "" && machine != nil {
secret, err := GetCloudCredentialSecret(h.secrets, machine.GetNamespace(), cloudCredentialSecretName)
if err != nil {
return "", "", nil, err
}
for k, v := range secret.Data {
result[k] = string(v)
}
}
bootstrapName, err := h.getBootstrapSecret(machine)
if err != nil {
return "", "", nil, err
}
return bootstrapName, cloudCredentialSecretName, result, nil
}
func GetCloudCredentialSecret(secrets corecontrollers.SecretCache, namespace, name string) (*corev1.Secret, error) {
globalNS, globalName := kv.Split(name, ":")
if globalName != "" && globalNS == namespace2.GlobalNamespace {
return secrets.Get(globalNS, globalName)
}
return secrets.Get(namespace, name)
}
func toArgs(driverName string, args map[string]interface{}, clusterID string) (cmd []string) {
if driverName == "amazonec2" {
tagValue := fmt.Sprintf("kubernetes.io/cluster/%s,owned", clusterID)
if tags, ok := args["tags"]; !ok || convert.ToString(tags) == "" {
args["tags"] = tagValue
} else {
args["tags"] = convert.ToString(tags) + "," + tagValue
}
}
for k, v := range args {
dmField := "--" + driverName + "-" + strings.ToLower(regExHyphen.ReplaceAllString(k, "${1}-${2}"))
if v == nil {
continue
}
switch v.(type) {
case float64:
cmd = append(cmd, fmt.Sprintf("%s=%v", dmField, v))
case string:
if v.(string) != "" {
cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, v.(string)))
}
case bool:
if v.(bool) {
cmd = append(cmd, dmField)
}
case []interface{}:
for _, s := range v.([]interface{}) {
if _, ok := s.(string); ok {
cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, s.(string)))
}
}
}
}
if driverName == "amazonec2" &&
convert.ToString(args["securityGroup"]) != "rancher-nodes" &&
args["securityGroupReadonly"] == nil {
cmd = append(cmd, "--amazonec2-security-group-readonly")
}
sort.Strings(cmd)
return
}
func getNodeDriverName(typeMeta meta.Type) string {
return strings.ToLower(strings.TrimSuffix(typeMeta.GetKind(), "Machine"))
}
// getDriverDownloadURL checks for a local version of the driver to download for air-gapped installs.
// If no local version is found or CATTLE_DEV_MODE is set, then the URL from the node driver is returned.
func getDriverDownloadURL(nd *v3.NodeDriver) string {
if os.Getenv("CATTLE_DEV_MODE") != "" {
return nd.Spec.URL
}
driverName := nd.Name
if !strings.HasPrefix(driverName, drivers.DockerMachineDriverPrefix) {
driverName = drivers.DockerMachineDriverPrefix + driverName
}
if _, err := os.Stat(filepath.Join(settings.UIPath.Get(), "assets", driverName)); err != nil {
return nd.Spec.URL
}
return fmt.Sprintf("%s/assets/%s", settings.ServerURL.Get(), driverName)
}
| [
"\"HTTP_PROXY\"",
"\"HTTPS_PROXY\"",
"\"NO_PROXY\"",
"\"CATTLE_DEV_MODE\""
]
| []
| [
"HTTP_PROXY",
"HTTPS_PROXY",
"CATTLE_DEV_MODE",
"NO_PROXY"
]
| [] | ["HTTP_PROXY", "HTTPS_PROXY", "CATTLE_DEV_MODE", "NO_PROXY"] | go | 4 | 0 | |
CycleGAN-Pix2Pix/.ipynb_checkpoints/train-checkpoint.py | """General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
print('The number of training images = %d' % dataset_size)
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
total_iters = 0 # the total number of training iterations
# epoch_count = 1, n_epochs=100, n_epoch_decay=100
# 学习率初始为0.0002,总的epoch为200,在epoch:[0, 100]的时候,学习率为0.0002,在epoch:[100, 200]的时候,学习率逐渐线性减小为0,
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
model.update_learning_rate() # update learning rates in the beginning of every epoch.
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tester/examples/kms/kms_test.go | package kms
import (
"os"
"testing"
"github.com/GSA/grace-tftest/aws/kms"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
)
func TestKm(t *testing.T) {
port := os.Getenv("MOTO_PORT")
if len(port) == 0 {
t.Skipf("skipping testing, MOTO_PORT not set in environment variables")
}
url := "http://localhost:" + os.Getenv("MOTO_PORT")
t.Logf("connecting to: %s\n", url)
sess, err := session.NewSession(&aws.Config{
Endpoint: aws.String(url),
DisableSSL: aws.Bool(true),
S3ForcePathStyle: aws.Bool(true),
})
if err != nil {
t.Fatalf("failed to connect to moto: %s -> %v", url, err)
}
svc := kms.New(sess)
alias := svc.
Alias.
Name("alias/key").
Assert(t)
alias.
Policy(t).
Statement(t, nil).
Principal("AWS", "arn:aws:iam::*:root").
Effect("Allow").
Resource("*").
Action("kms:*").
Assert(t)
}
| [
"\"MOTO_PORT\"",
"\"MOTO_PORT\""
]
| []
| [
"MOTO_PORT"
]
| [] | ["MOTO_PORT"] | go | 1 | 0 | |
scripts/pixel_classification_zarr.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# Copyright (c) 2020 University of Dundee.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Version: 1.0
#
import time
import tempfile
import tarfile
import numpy
import os
import zarr
import dask.array as da
import omero.clients
from omero.gateway import BlitzGateway
from getpass import getpass
from ilastik import app
from ilastik.applets.dataSelection.opDataSelection import PreloadedArrayDatasetInfo # noqa
import vigra
# Connect to the server
def connect(hostname, username, password):
conn = BlitzGateway(username, password,
host=hostname, secure=True)
conn.connect()
return conn
# Load-images
def load_images(conn, dataset_id):
return conn.getObjects('Image', opts={'dataset': dataset_id})
# Create-dataset
def create_dataset(conn, dataset_id):
dataset = omero.model.DatasetI()
v = "ilastik_probabilities_from_dataset_%s" % dataset_id
dataset.setName(omero.rtypes.rstring(v))
v = "ilatisk results probabilities from Dataset:%s" % dataset_id
dataset.setDescription(omero.rtypes.rstring(v))
return conn.getUpdateService().saveAndReturnObject(dataset)
# Load-data
def load_numpy_array(image, path, extension=".tar", resolution=0):
# load annotation linked to the image. Download in a tmp dir
for ann in image.listAnnotations():
if isinstance(ann, omero.gateway.FileAnnotationWrapper):
name = ann.getFile().getName()
ns = ann.getNs()
if (name.endswith(".zip") or name.endswith(".tar")) and ns is None:
file_path = os.path.join(path, name)
f_path = os.path.join(path, name.strip(extension))
with open(str(file_path), 'wb') as f:
for chunk in ann.getFileInChunks():
f.write(chunk)
# extract the file
if extension == ".tar":
tf = tarfile.open(file_path)
tf.extractall(path)
tf.close()
data = zarr.open(f_path)
values = data[resolution][:]
# from tczyx to tzyxc
values = values.swapaxes(1, 2).swapaxes(2, 3)
values = values.swapaxes(3, 4)
return values
else:
data = zarr.open(file_path)
return data[:]
return None
def load_from_s3(image, resolution='0'):
id = image.getId()
endpoint_url = 'https://minio-dev.openmicroscopy.org/'
root = 'idr/outreach/%s.zarr/' % id
# data.shape is (t, c, z, y, x) by convention
data = da.from_zarr(endpoint_url + root)
values = data[:]
values = values.swapaxes(1, 2).swapaxes(2, 3).swapaxes(3, 4)
return numpy.asarray(values)
# Analyze-data
def analyze(conn, images, model, new_dataset, extension=".tar", resolution=0):
# Prepare ilastik
# temporary directory where to download files
path = tempfile.mkdtemp()
if not os.path.exists(path):
os.makedirs(path)
os.environ["LAZYFLOW_THREADS"] = "2"
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
args = app.parse_args([])
args.headless = True
args.project = model
args.readonly = True
shell = app.main(args)
start = time.time()
for image in images:
input_data = load_from_s3(image, path)
# run ilastik headless
print('running ilastik using %s and %s' % (model, image.getName()))
data = [ {"Raw Data": PreloadedArrayDatasetInfo(preloaded_array=input_data, axistags=vigra.defaultAxistags("tzyxc"))}] # noqa
shell.workflow.batchProcessingApplet.run_export(data, export_to_array=True) # noqa
elapsed = time.time() - start
print(elapsed)
# Save-results
def save_results(conn, image, data, dataset, path):
filename, file_extension = os.path.splitext(image.getName())
# Save the probabilities file as an image
print("Saving Probabilities as zarr file attached to the original Image")
name = filename + "_Probabilities_zarr.zip"
desc = "ilastik probabilities from Image:%s" % image.getId()
# Re-organise array from tzyxc to zctyx order expected by OMERO
# data = data.swapaxes(0, 1).swapaxes(3, 4).swapaxes(2, 3).swapaxes(1, 2)
namespace = "ilastik.zarr.demo"
fp = os.path.join(path, name)
with zarr.ZipStore(fp, mode='w') as store:
zarr.array(data, store=store, dtype='int16',
compressor=zarr.Blosc(cname='zstd'))
ann = conn.createFileAnnfromLocalFile(fp, mimetype="application/zip",
ns=namespace, desc=desc)
image.linkAnnotation(ann)
# Disconnect
def disconnect(conn):
conn.close()
# main
def main():
# Collect user credentials
try:
host = input("Host [wss://outreach.openmicroscopy.org/omero-ws]: ") or 'wss://outreach.openmicroscopy.org/omero-ws' # noqa
username = input("Username [trainer-1]: ") or 'trainer-1'
password = getpass("Password: ")
dataset_id = input("Dataset ID [6161]: ") or '6161'
# Connect to the server
conn = connect(host, username, password)
conn.c.enableKeepAlive(60)
# path to the ilastik project
ilastik_project = "../notebooks/pipelines/pixel-class-133.ilp"
# Load the images in the dataset
images = load_images(conn, dataset_id)
new_dataset = create_dataset(conn, dataset_id)
analyze(conn, images, ilastik_project, new_dataset)
finally:
disconnect(conn)
print("done")
if __name__ == "__main__":
main()
| []
| []
| [
"LAZYFLOW_THREADS",
"LAZYFLOW_TOTAL_RAM_MB"
]
| [] | ["LAZYFLOW_THREADS", "LAZYFLOW_TOTAL_RAM_MB"] | python | 2 | 0 | |
distribution/push_v2.go | package distribution // import "github.com/docker/docker/distribution"
import (
"context"
"fmt"
"io"
"os"
"runtime"
"sort"
"strings"
"sync"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/client"
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/registry"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
smallLayerMaximumSize = 100 * (1 << 10) // 100KB
middleLayerMaximumSize = 10 * (1 << 20) // 10MB
)
type v2Pusher struct {
v2MetadataService metadata.V2MetadataService
ref reference.Named
endpoint registry.APIEndpoint
repoInfo *registry.RepositoryInfo
config *ImagePushConfig
repo distribution.Repository
// pushState is state built by the Upload functions.
pushState pushState
}
type pushState struct {
sync.Mutex
// remoteLayers is the set of layers known to exist on the remote side.
// This avoids redundant queries when pushing multiple tags that
// involve the same layers. It is also used to fill in digest and size
// information when building the manifest.
remoteLayers map[layer.DiffID]distribution.Descriptor
hasAuthInfo bool
}
func (p *v2Pusher) Push(ctx context.Context) (err error) {
p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
p.repo, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "")
if err != nil {
logrus.Debugf("Error getting v2 registry: %v", err)
return err
}
if err = p.pushV2Repository(ctx); err != nil {
if continueOnError(err, p.endpoint.Mirror) {
return fallbackError{
err: err,
transportOK: true,
}
}
}
return err
}
func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged {
imageID, err := p.config.ReferenceStore.Get(p.ref)
if err != nil {
return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref))
}
return p.pushV2Tag(ctx, namedTagged, imageID)
}
if !reference.IsNameOnly(p.ref) {
return errors.New("cannot push a digest reference")
}
// Push all tags
pushed := 0
for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) {
if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged {
pushed++
if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil {
return err
}
}
}
if pushed == 0 {
return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name))
}
return nil
}
func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error {
logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref))
imgConfig, err := p.config.ImageStore.Get(ctx, id)
if err != nil {
return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err)
}
rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig)
if err != nil {
return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err)
}
l, err := p.config.LayerStores.Get(rootfs.ChainID())
if err != nil {
return fmt.Errorf("failed to get top layer from image: %v", err)
}
defer l.Release()
hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig)
if err != nil {
return fmt.Errorf("failed to compute hmac key of auth config: %v", err)
}
var descriptors []xfer.UploadDescriptor
descriptorTemplate := v2PushDescriptor{
v2MetadataService: p.v2MetadataService,
hmacKey: hmacKey,
repoInfo: p.repoInfo.Name,
ref: p.ref,
endpoint: p.endpoint,
repo: p.repo,
pushState: &p.pushState,
}
// Loop bounds condition is to avoid pushing the base layer on Windows.
for range rootfs.DiffIDs {
descriptor := descriptorTemplate
descriptor.layer = l
descriptor.checkedDigests = make(map[digest.Digest]struct{})
descriptors = append(descriptors, &descriptor)
l = l.Parent()
}
if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
return err
}
// Try schema2 first
builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig)
manifest, err := manifestFromBuilder(ctx, builder, descriptors)
if err != nil {
return err
}
manSvc, err := p.repo.Manifests(ctx)
if err != nil {
return err
}
putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())}
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 {
logrus.Warnf("failed to upload schema2 manifest: %v", err)
return err
}
// This is a temporary environment variables used in CI to allow pushing
// manifest v2 schema 1 images to test-registries used for testing *pulling*
// these images.
if os.Getenv("DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE") == "" {
if err.Error() == "tag invalid" {
msg := "[DEPRECATED] support for pushing manifest v2 schema1 images has been removed. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/"
logrus.WithError(err).Error(msg)
return errors.Wrap(err, msg)
}
return err
}
logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
// Note: this fallback is deprecated, see log messages below
manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag())
if err != nil {
return err
}
builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig)
manifest, err = manifestFromBuilder(ctx, builder, descriptors)
if err != nil {
return err
}
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
return err
}
// schema2 failed but schema1 succeeded
msg := fmt.Sprintf("[DEPRECATION NOTICE] support for pushing manifest v2 schema1 images will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", reference.Domain(ref))
logrus.Warn(msg)
progress.Message(p.config.ProgressOutput, "", msg)
}
var canonicalManifest []byte
switch v := manifest.(type) {
case *schema1.SignedManifest:
canonicalManifest = v.Canonical
case *schema2.DeserializedManifest:
_, canonicalManifest, err = v.Payload()
if err != nil {
return err
}
}
manifestDigest := digest.FromBytes(canonicalManifest)
progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
return err
}
// Signal digest to the trust client so it can sign the
// push, if appropriate.
progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)})
return nil
}
func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) {
// descriptors is in reverse order; iterate backwards to get references
// appended in the right order.
for i := len(descriptors) - 1; i >= 0; i-- {
if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
return nil, err
}
}
return builder.Build(ctx)
}
type v2PushDescriptor struct {
layer PushLayer
v2MetadataService metadata.V2MetadataService
hmacKey []byte
repoInfo reference.Named
ref reference.Named
endpoint registry.APIEndpoint
repo distribution.Repository
pushState *pushState
remoteDescriptor distribution.Descriptor
// a set of digests whose presence has been checked in a target repository
checkedDigests map[digest.Digest]struct{}
}
func (pd *v2PushDescriptor) Key() string {
return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String()
}
func (pd *v2PushDescriptor) ID() string {
return stringid.TruncateID(pd.layer.DiffID().String())
}
func (pd *v2PushDescriptor) DiffID() layer.DiffID {
return pd.layer.DiffID()
}
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
// Skip foreign layers unless this registry allows nondistributable artifacts.
if !pd.endpoint.AllowNondistributableArtifacts {
if fs, ok := pd.layer.(distribution.Describable); ok {
if d := fs.Descriptor(); len(d.URLs) > 0 {
progress.Update(progressOutput, pd.ID(), "Skipped foreign layer")
return d, nil
}
}
}
diffID := pd.DiffID()
pd.pushState.Lock()
if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok {
// it is already known that the push is not needed and
// therefore doing a stat is unnecessary
pd.pushState.Unlock()
progress.Update(progressOutput, pd.ID(), "Layer already exists")
return descriptor, nil
}
pd.pushState.Unlock()
maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer)
// Do we have any metadata associated with this layer's DiffID?
v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
if err == nil {
// check for blob existence in the target repository
descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata)
if exists || err != nil {
return descriptor, err
}
}
// if digest was empty or not saved, or if blob does not exist on the remote repository,
// then push the blob.
bs := pd.repo.Blobs(ctx)
var layerUpload distribution.BlobWriter
// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata)
isUnauthorizedError := false
for _, mc := range candidates {
mountCandidate := mc
logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository)
createOpts := []distribution.BlobCreateOption{}
if len(mountCandidate.SourceRepository) > 0 {
namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository)
if err != nil {
logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err)
pd.v2MetadataService.Remove(mountCandidate)
continue
}
// Candidates are always under same domain, create remote reference
// with only path to set mount from with
remoteRef, err := reference.WithName(reference.Path(namedRef))
if err != nil {
logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err)
continue
}
canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest)
if err != nil {
logrus.Errorf("failed to make canonical reference: %v", err)
continue
}
createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
}
// send the layer
lu, err := bs.Create(ctx, createOpts...)
switch err := err.(type) {
case nil:
// noop
case distribution.ErrBlobMounted:
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
err.Descriptor.MediaType = schema2.MediaTypeLayer
pd.pushState.Lock()
pd.pushState.remoteLayers[diffID] = err.Descriptor
pd.pushState.Unlock()
// Cache mapping from this layer's DiffID to the blobsum
if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
Digest: err.Descriptor.Digest,
SourceRepository: pd.repoInfo.Name(),
}); err != nil {
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
}
return err.Descriptor, nil
case errcode.Errors:
for _, e := range err {
switch e := e.(type) {
case errcode.Error:
if e.Code == errcode.ErrorCodeUnauthorized {
// when unauthorized error that indicate user don't has right to push layer to register
logrus.Debugln("failed to push layer to registry because unauthorized error")
isUnauthorizedError = true
}
default:
}
}
default:
logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err)
}
// when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register
// and he hasn't login either, in this case candidate cache should be removed
if len(mountCandidate.SourceRepository) > 0 &&
!(isUnauthorizedError && !pd.pushState.hasAuthInfo) &&
(metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) ||
len(mountCandidate.HMAC) == 0) {
cause := "blob mount failure"
if err != nil {
cause = fmt.Sprintf("an error: %v", err.Error())
}
logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause)
pd.v2MetadataService.Remove(mountCandidate)
}
if lu != nil {
// cancel previous upload
cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload)
layerUpload = lu
}
}
if maxExistenceChecks-len(pd.checkedDigests) > 0 {
// do additional layer existence checks with other known digests if any
descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata)
if exists || err != nil {
return descriptor, err
}
}
logrus.Debugf("Pushing layer: %s", diffID)
if layerUpload == nil {
layerUpload, err = bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
}
defer layerUpload.Close()
// upload the blob
return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
}
func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {
pd.remoteDescriptor = descriptor
}
func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
return pd.remoteDescriptor
}
func (pd *v2PushDescriptor) uploadUsingSession(
ctx context.Context,
progressOutput progress.Output,
diffID layer.DiffID,
layerUpload distribution.BlobWriter,
) (distribution.Descriptor, error) {
var reader io.ReadCloser
contentReader, err := pd.layer.Open()
if err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, pd.layer.Size(), pd.ID(), "Pushing")
switch m := pd.layer.MediaType(); m {
case schema2.MediaTypeUncompressedLayer:
compressedReader, compressionDone := compress(reader)
defer func(closer io.Closer) {
closer.Close()
<-compressionDone
}(reader)
reader = compressedReader
case schema2.MediaTypeLayer:
default:
reader.Close()
return distribution.Descriptor{}, xfer.DoNotRetry{Err: fmt.Errorf("unsupported layer media type %s", m)}
}
digester := digest.Canonical.Digester()
tee := io.TeeReader(reader, digester.Hash())
nn, err := layerUpload.ReadFrom(tee)
reader.Close()
if err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
pushDigest := digester.Digest()
if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
return distribution.Descriptor{}, retryOnError(err)
}
logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
progress.Update(progressOutput, pd.ID(), "Pushed")
// Cache mapping from this layer's DiffID to the blobsum
if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
Digest: pushDigest,
SourceRepository: pd.repoInfo.Name(),
}); err != nil {
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
}
desc := distribution.Descriptor{
Digest: pushDigest,
MediaType: schema2.MediaTypeLayer,
Size: nn,
}
pd.pushState.Lock()
pd.pushState.remoteLayers[diffID] = desc
pd.pushState.Unlock()
return desc, nil
}
// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata"
// slice. If it finds one that the registry knows about, it returns the known digest and "true". If
// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository
// (not just the target one).
func (pd *v2PushDescriptor) layerAlreadyExists(
ctx context.Context,
progressOutput progress.Output,
diffID layer.DiffID,
checkOtherRepositories bool,
maxExistenceCheckAttempts int,
v2Metadata []metadata.V2Metadata,
) (desc distribution.Descriptor, exists bool, err error) {
// filter the metadata
candidates := []metadata.V2Metadata{}
for _, meta := range v2Metadata {
if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() {
continue
}
candidates = append(candidates, meta)
}
// sort the candidates by similarity
sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates)
digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata)
// an array of unique blob digests ordered from the best mount candidates to worst
layerDigests := []digest.Digest{}
for i := 0; i < len(candidates); i++ {
if len(layerDigests) >= maxExistenceCheckAttempts {
break
}
meta := &candidates[i]
if _, exists := digestToMetadata[meta.Digest]; exists {
// keep reference just to the first mapping (the best mount candidate)
continue
}
if _, exists := pd.checkedDigests[meta.Digest]; exists {
// existence of this digest has already been tested
continue
}
digestToMetadata[meta.Digest] = meta
layerDigests = append(layerDigests, meta.Digest)
}
attempts:
for _, dgst := range layerDigests {
meta := digestToMetadata[dgst]
logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name())
desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst)
pd.checkedDigests[meta.Digest] = struct{}{}
switch err {
case nil:
if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) {
// cache mapping from this layer's DiffID to the blobsum
if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
Digest: desc.Digest,
SourceRepository: pd.repoInfo.Name(),
}); err != nil {
return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err}
}
}
desc.MediaType = schema2.MediaTypeLayer
exists = true
break attempts
case distribution.ErrBlobUnknown:
if meta.SourceRepository == pd.repoInfo.Name() {
// remove the mapping to the target repository
pd.v2MetadataService.Remove(*meta)
}
default:
logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name())
}
}
if exists {
progress.Update(progressOutput, pd.ID(), "Layer already exists")
pd.pushState.Lock()
pd.pushState.remoteLayers[diffID] = desc
pd.pushState.Unlock()
}
return desc, exists, nil
}
// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from
// source repositories of target registry, maximum number of layer existence checks performed on the target
// repository and whether the check shall be done also with digests mapped to different repositories. The
// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost
// of upload does not outweigh a latency.
func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) {
size := layer.Size()
switch {
// big blob
case size > middleLayerMaximumSize:
// 1st attempt to mount the blob few times
// 2nd few existence checks with digests associated to any repository
// then fallback to upload
return 4, 3, true
// middle sized blobs; if we could not get the size, assume we deal with middle sized blob
case size > smallLayerMaximumSize:
// 1st attempt to mount blobs of average size few times
// 2nd try at most 1 existence check if there's an existing mapping to the target repository
// then fallback to upload
return 3, 1, false
// small blobs, do a minimum number of checks
default:
return 1, 1, false
}
}
// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The
// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain
// only metadata entries having registry part of SourceRepository matching the part of repoInfo.
func getRepositoryMountCandidates(
repoInfo reference.Named,
hmacKey []byte,
max int,
v2Metadata []metadata.V2Metadata,
) []metadata.V2Metadata {
candidates := []metadata.V2Metadata{}
for _, meta := range v2Metadata {
sourceRepo, err := reference.ParseNamed(meta.SourceRepository)
if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) {
continue
}
// target repository is not a viable candidate
if meta.SourceRepository == repoInfo.Name() {
continue
}
candidates = append(candidates, meta)
}
sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates)
if max >= 0 && len(candidates) > max {
// select the youngest metadata
candidates = candidates[:max]
}
return candidates
}
// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The
// candidate "a" is preferred over "b":
//
// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the
// "b" was not
// 2. if a number of its repository path components exactly matching path components of target repository is higher
type byLikeness struct {
arr []metadata.V2Metadata
hmacKey []byte
pathComponents []string
}
func (bla byLikeness) Less(i, j int) bool {
aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey)
bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey)
if aMacMatch != bMacMatch {
return aMacMatch
}
aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents)
bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents)
return aMatch > bMatch
}
func (bla byLikeness) Swap(i, j int) {
bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i]
}
func (bla byLikeness) Len() int { return len(bla.arr) }
func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) {
// reverse the metadata array to shift the newest entries to the beginning
for i := 0; i < len(marr)/2; i++ {
marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i]
}
// keep equal entries ordered from the youngest to the oldest
sort.Stable(byLikeness{
arr: marr,
hmacKey: hmacKey,
pathComponents: getPathComponents(repoInfo.Name()),
})
}
// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents".
func numOfMatchingPathComponents(pth string, matchComponents []string) int {
pthComponents := getPathComponents(pth)
i := 0
for ; i < len(pthComponents) && i < len(matchComponents); i++ {
if matchComponents[i] != pthComponents[i] {
return i
}
}
return i
}
func getPathComponents(path string) []string {
return strings.Split(path, "/")
}
func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) {
if layerUpload != nil {
logrus.Debugf("cancelling upload of blob %s", dgst)
err := layerUpload.Cancel(ctx)
if err != nil {
logrus.Warnf("failed to cancel upload: %v", err)
}
}
}
| [
"\"DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE\""
]
| []
| [
"DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE"
]
| [] | ["DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE"] | go | 1 | 0 | |
packages/girder/plugins/dicom_viewer/plugin_tests/dicom_viewer_test.py | import os
import json
import six
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.upload import Upload
from girder.models.user import User
import pydicom
from tests import base
from girder_dicom_viewer import _removeUniqueMetadata, _extractFileData
from girder_dicom_viewer.event_helper import _EventHelper
def setUpModule():
base.enabledPlugins.append('dicom_viewer')
base.startServer()
global _removeUniqueMetadata
global _extractFileData
def tearDownModule():
base.stopServer()
class DicomViewerTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.dataDir = os.path.join(
os.environ['GIRDER_TEST_DATA_PREFIX'], 'plugins', 'dicom_viewer')
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%[email protected]' % num)
for num in [0, 1]]
def testRemoveUniqueMetadata(self):
dicomMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'commonVal',
'uniqueKey1': 'commonVal'
}
additionalMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'uniqueVal',
'uniqueKey2': 'commonVal',
}
commonMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54
}
self.assertEqual(_removeUniqueMetadata(dicomMeta, additionalMeta), commonMeta)
def testExtractFileData(self):
dicomFile = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'assetstoreId': '599c4a19c9c5cb11f1ff5d32',
'creatorId': '5984b9fec9c5cb370447068c',
'exts': ['dcm'],
'itemId': '599c4cf3c9c5cb11f1ff5d96',
'mimeType': 'application/dicom',
'name': '000000.dcm',
'size': 133356
}
dicomMeta = {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
result = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'name': '000000.dcm',
'dicom': {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
}
self.assertEqual(_extractFileData(dicomFile, dicomMeta), result)
def testFileProcessHandler(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection1', admin, public=True)
folder = Folder().createFolder(collection, 'folder1', parentType='collection', public=True)
item = Item().createItem('item1', admin, folder)
# Upload non-DICOM files
self._uploadNonDicomFiles(item, admin)
nonDicomItem = Item().load(item['_id'], force=True)
self.assertIsNone(nonDicomItem.get('dicom'))
# Upload DICOM files
self._uploadDicomFiles(item, admin)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check if the files list contain the good keys and all the file are well sorted
for i in range(0, 4):
self.assertTrue('_id' in dicomItem['dicom']['files'][i])
self.assertTrue('name' in dicomItem['dicom']['files'][i])
self.assertEqual(dicomItem['dicom']['files'][i]['name'], 'dicomFile{}.dcm'.format(i))
self.assertTrue('SeriesNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('InstanceNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('SliceLocation' in dicomItem['dicom']['files'][i]['dicom'])
# Check the common metadata
self.assertIsNotNone(dicomItem['dicom']['meta'])
def testMakeDicomItem(self):
admin, user = self.users
# create a collection, folder, and item
collection = Collection().createCollection('collection2', admin, public=True)
folder = Folder().createFolder(collection, 'folder2', parentType='collection', public=True)
item = Item().createItem('item2', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Check the endpoint 'parseDicom' for an admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=admin)
self.assertStatusOk(resp)
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check the endpoint 'parseDicom' for an non admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=user)
self.assertStatus(resp, 403)
def _uploadNonDicomFiles(self, item, user):
# Upload a fake file to check that the item is not traited
nonDicomContent = b'hello world\n'
ndcmFile = Upload().uploadFromFile(
obj=six.BytesIO(nonDicomContent),
size=len(nonDicomContent),
name='nonDicom.txt',
parentType='item',
parent=item,
mimeType='text/plain',
user=user
)
self.assertIsNotNone(ndcmFile)
def _uploadDicomFiles(self, item, user):
# Upload the files in the reverse order to check if they're well sorted
for i in [1, 3, 0, 2]:
file = os.path.join(self.dataDir, '00000%i.dcm' % i)
with open(file, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(file),
name='dicomFile{}.dcm'.format(i),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
def _purgeDicomItem(self, item):
item.pop('dicom')
return item
def testSearchForDicomItem(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection3', admin, public=True)
folder = Folder().createFolder(collection, 'folder3', parentType='collection', public=True)
item = Item().createItem('item3', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Search for DICOM item with 'brain research' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'brain research',
'mode': 'dicom',
'types': json.dumps(["item"])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# Search for DICOM item with substring 'in resea' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'in resea',
'mode': 'dicom',
'types': json.dumps(["item"])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# TODO: Add test to search for a private DICOM item with an other user
# this test should not found anything
def testDicomWithIOError(self):
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'CT_small.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection4', admin, public=True)
folder = Folder().createFolder(collection, 'folder4', parentType='collection', public=True)
item = Item().createItem('item4', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
def testDicomWithBinaryValues(self):
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'OBXXXX1A.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection5', admin, public=True)
folder = Folder().createFolder(collection, 'folder5', parentType='collection', public=True)
item = Item().createItem('item5', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
| []
| []
| [
"GIRDER_TEST_DATA_PREFIX"
]
| [] | ["GIRDER_TEST_DATA_PREFIX"] | python | 1 | 0 | |
alktrace.go | // Alktrace: Tries to get as much infos as it can about a k8s service to help
// tbshooting networking issues
package main
import (
"errors"
"flag"
"fmt"
"log"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
typev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
// Handling User input
flag.Usage = func() {
fmt.Printf("Usage: \n sudo ./alktrace [options] <domain/ip> \nOptions:\n")
flag.PrintDefaults()
}
protoToUse := flag.String("proto", "", "Set the protocol to use. By default use udp on Linux. \n Can be tcp, icmp")
portDest := flag.String("p", "80", "Set the destination port to use. Using 80 by default")
kconfig := flag.String("kconf", filepath.Join(os.Getenv("HOME"), ".kube", "config"), "Path to the kubeconfig. Defaults to ~/.kube/config")
namespace := flag.String("ns", "", "Specify the namespace in which the service reside. Seeks into all namespaces by default")
svcName := flag.String("svc", "", "Specify the service name for which you want more infos")
auto := flag.Bool("auto", false, "Passing this flag instead of svcName to try to find automatically infos on the service traced")
recurse := flag.Bool("recurse", false, "Tries to trace the pods found if there is any")
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
return
}
host := dns(flag.Args()[0])
proto := proto(*protoToUse)
var wg sync.WaitGroup
// Trace to the dest given
trace(host, proto, *portDest, false, wg)
if *svcName != "" || *auto {
// Get all infos from k8s about the destination
pods, err := getK8sInfos(*kconfig, host, *svcName, *namespace)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if *recurse {
for _, pod := range pods.Items {
fmt.Println("Tracing the pod : ", pod.Status.PodIP)
wg.Add(1)
go trace(pod.Status.PodIP, proto, *portDest, true, wg)
}
wg.Wait()
}
}
}
func dns(host string) string {
addresses, err := net.LookupHost(host)
if err != nil {
fmt.Println(err)
panic(err)
}
return addresses[0]
}
func proto(proto string) string {
switch proto {
case "tcp":
return "-T"
case "icmp":
return "-I"
default:
return "-U"
}
}
func trace(host string, proto string, port string, pod bool, wg sync.WaitGroup) {
if pod {
defer wg.Done()
}
result, err := exec.Command("sudo", "traceroute", "-w", "1", "-q", "1", proto, "-p", port, host).Output()
if err != nil {
fmt.Println(err)
fmt.Fprintln(os.Stdout, "Please check that you have traceroute (not the one from inetutils which is not powerful enough)")
fmt.Fprintln(os.Stdout, "Please check also your firewall rules")
return
}
fmt.Fprintf(os.Stdout, "%s", result)
}
func getK8sInfos(kconfig string, host string, svcName string, namespace string) (*corev1.PodList, error) {
//fmt.Fprintf(os.Stdout, "Using config from : %s\n", kconfig)
k8sClient, err := getClient(kconfig)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
var svc *corev1.Service
if svcName != "" {
svc, err = getServiceForDeployment(svcName, namespace, k8sClient)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(2)
}
} else {
svc, err = findService(host, namespace, k8sClient)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(2)
}
}
pods, err := getPodsForSvc(svc, namespace, k8sClient)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(2)
}
for _, pod := range pods.Items {
fmt.Fprintf(os.Stdout, " %v (%v) on Node : %v \n", pod.Name, pod.Status.PodIP, pod.Status.HostIP)
}
return pods, nil
}
func getClient(configLocation string) (typev1.CoreV1Interface, error) {
kubeconfig := filepath.Clean(configLocation)
//fmt.Fprintf(os.Stdout, "Cleaned location : %s\n", kubeconfig)
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
log.Fatal(err)
}
//fmt.Fprintf(os.Stdout, "Config built: %v\n", config)
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return clientset.CoreV1(), nil
}
func getServiceForDeployment(deployment string, namespace string, k8sClient typev1.CoreV1Interface) (*corev1.Service, error) {
listOptions := metav1.ListOptions{}
svcs, err := k8sClient.Services(namespace).List(listOptions)
if err != nil {
log.Fatal(err)
}
for _, svc := range svcs.Items {
if strings.Contains(svc.Name, deployment) {
fmt.Fprintf(os.Stdout, "\n The service reached (%v) serves the pods : \n", svc.Name)
return &svc, nil
}
}
return nil, errors.New("cannot find service for deployment")
}
func findService(host string, namespace string, k8sClient typev1.CoreV1Interface) (*corev1.Service, error) {
listOptions := metav1.ListOptions{}
svcs, err := k8sClient.Services(namespace).List(listOptions)
if err != nil {
log.Fatal(err)
}
for _, svc := range svcs.Items {
if svc.Spec.ClusterIP == host {
fmt.Fprintf(os.Stdout, "\n The service reached (%v) serves the pods : \n", svc.Name)
return &svc, nil
}
}
return nil, errors.New("cannot find service for deployment")
}
func getPodsForSvc(svc *corev1.Service, namespace string, k8sClient typev1.CoreV1Interface) (*corev1.PodList, error) {
set := labels.Set(svc.Spec.Selector)
listOptions := metav1.ListOptions{LabelSelector: set.AsSelector().String()}
pods, err := k8sClient.Pods(namespace).List(listOptions)
return pods, err
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
arcade/application.py | """
The main window class that all object-oriented applications should
derive from.
"""
import logging
import os
import time
from numbers import Number
from typing import Tuple, Optional
import pyglet
import pyglet.gl as gl
import arcade
from arcade import get_display_size
from arcade import get_viewport
from arcade import set_viewport
from arcade import set_window
from arcade.context import ArcadeContext
from arcade.arcade_types import Color
LOG = logging.getLogger(__name__)
MOUSE_BUTTON_LEFT = 1
MOUSE_BUTTON_MIDDLE = 2
MOUSE_BUTTON_RIGHT = 4
_window: 'Window'
def get_screens():
"""
Return a list of screens. So for a two-monitor setup, this should return
a list of two screens. Can be used with arcade.Window to select which
window we full-screen on.
:returns: List of screens, one for each monitor.
:rtype: List
"""
display = pyglet.canvas.get_display()
return display.get_screens()
class NoOpenGLException(Exception):
"""
Exception when we can't get an OpenGL 3.3+ context
"""
pass
class Window(pyglet.window.Window):
"""
The Window class forms the basis of most advanced games that use Arcade.
It represents a window on the screen, and manages events.
"""
def __init__(self,
width: int = 800,
height: int = 600,
title: str = 'Arcade Window',
fullscreen: bool = False,
resizable: bool = False,
update_rate: Optional[float] = 1 / 60,
antialiasing: bool = True,
gl_version: Tuple[int, int] = (3, 3),
screen: pyglet.canvas.Screen = None,
style: Optional[str] = pyglet.window.Window.WINDOW_STYLE_DEFAULT,
visible: bool = True,
vsync: bool = False,
gc_mode: str = "auto",
center_window: bool = False):
"""
Construct a new window
:param int width: Window width
:param int height: Window height
:param str title: Title (appears in title bar)
:param bool fullscreen: Should this be full screen?
:param bool resizable: Can the user resize the window?
:param float update_rate: How frequently to update the window.
:param bool antialiasing: Should OpenGL's anti-aliasing be enabled?
:param Tuple[int,int] gl_version: What OpenGL version to request. This is ``(3, 3)`` by default
and can be overridden when using more advanced OpenGL features.
:param bool visible: Should the window be visible immediately
:param bool vsync: Wait for vertical screen refresh before swapping buffer
This can make animations and movement look smoother.
:param bool gc_mode: Decides how opengl objects should be garbage collected
:param bool center_window: If true, will center the window.
"""
# In certain environments (mainly headless) we can't have antialiasing/MSAA enabled.
# TODO: Detect other headless environments
# Detect replit environment
if os.environ.get("REPL_ID"):
antialiasing = False
if antialiasing:
config = pyglet.gl.Config(major_version=gl_version[0],
minor_version=gl_version[1],
double_buffer=True,
sample_buffers=1,
samples=4)
else:
config = pyglet.gl.Config(major_version=3,
minor_version=3,
double_buffer=True)
try:
super().__init__(width=width, height=height, caption=title,
resizable=resizable, config=config, vsync=vsync, visible=visible, style=style)
self.register_event_type('update')
self.register_event_type('on_update')
except pyglet.window.NoSuchConfigException:
raise NoOpenGLException("Unable to create an OpenGL 3.3+ context. "
"Check to make sure your system supports OpenGL 3.3 or higher.")
if antialiasing:
try:
gl.glEnable(gl.GL_MULTISAMPLE_ARB)
except pyglet.gl.GLException:
print("Warning: Anti-aliasing not supported on this computer.")
if update_rate:
self.set_update_rate(update_rate)
self.set_vsync(vsync)
super().set_fullscreen(fullscreen, screen)
# This used to be necessary on Linux, but no longer appears to be.
# With Pyglet 2.0+, setting this to false will not allow the screen to
# update. It does, however, cause flickering if creating a window that
# isn't derived from the Window class.
# self.invalid = False
set_window(self)
self._current_view: Optional[View] = None
self.textbox_time = 0.0
self.key: Optional[int] = None
self._ctx: ArcadeContext = ArcadeContext(self, gc_mode=gc_mode)
set_viewport(0, self.width, 0, self.height)
self._background_color: Color = (0, 0, 0, 0)
# See if we should center the window
if center_window:
self.center_window()
@property
def current_view(self):
"""
This property returns the current view being shown.
To set a different view, call the
:py:meth:`arcade.Window.show_view` method.
:rtype: arcade.View
"""
return self._current_view
@property
def ctx(self) -> ArcadeContext:
"""
The OpenGL context for this window.
:type: :py:class:`arcade.ArcadeContext`
"""
return self._ctx
def clear(self, color: Optional[Color] = None):
"""Clears the window with the configured background color
set through :py:attr:`arcade.Window.background_color`.
:param Color color: Optional color overriding the current background color
"""
color = color if color is not None else self.background_color
self.ctx.screen.clear(color)
@property
def background_color(self):
"""Get or set the background color for this window.
If the background color is an ``RGB`` value instead of ``RGBA```
we assume alpha value 255.
:type: Color
"""
return self._background_color
@background_color.setter
def background_color(self, value: Color):
self._background_color = value
def close(self):
""" Close the Window. """
super().close()
pyglet.clock.unschedule(self._dispatch_updates)
def set_fullscreen(self, fullscreen=True, screen=None, mode=None,
width=None, height=None):
"""
Set if we are full screen or not.
:param bool fullscreen:
:param screen: Which screen should we display on? See :func:`get_screens`
:param mode:
:param int width:
:param int height:
"""
super().set_fullscreen(fullscreen, screen, mode, width, height)
def center_window(self):
"""
Center the window on the screen.
"""
# Get the display screen using pyglet
screen_width, screen_height = get_display_size()
window_width, window_height = self.get_size()
# Center the window
self.set_location((screen_width - window_width) // 2, (screen_height - window_height) // 2)
def update(self, delta_time: float):
"""
Move everything. For better consistency in naming, use ``on_update`` instead.
:param float delta_time: Time interval since the last time the function was called in seconds.
"""
pass
def on_update(self, delta_time: float):
"""
Move everything. Perform collision checks. Do all the game logic here.
:param float delta_time: Time interval since the last time the function was called.
"""
pass
def _dispatch_updates(self, delta_time: float):
self.dispatch_event('update', delta_time)
self.dispatch_event('on_update', delta_time)
def set_update_rate(self, rate: float):
"""
Set how often the screen should be updated.
For example, self.set_update_rate(1 / 60) will set the update rate to 60 fps
:param float rate: Update frequency in seconds
"""
pyglet.clock.unschedule(self._dispatch_updates)
pyglet.clock.schedule_interval(self._dispatch_updates, rate)
def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):
"""
Override this function to add mouse functionality.
:param float x: x position of mouse
:param float y: y position of mouse
:param float dx: Change in x since the last time this method was called
:param float dy: Change in y since the last time this method was called
"""
pass
def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):
"""
Override this function to add mouse button functionality.
:param float x: x position of the mouse
:param float y: y position of the mouse
:param int button: What button was hit. One of:
arcade.MOUSE_BUTTON_LEFT, arcade.MOUSE_BUTTON_RIGHT,
arcade.MOUSE_BUTTON_MIDDLE
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
pass
def on_mouse_drag(self, x: float, y: float, dx: float, dy: float, buttons: int, modifiers: int):
"""
Override this function to add mouse button functionality.
:param float x: x position of mouse
:param float y: y position of mouse
:param float dx: Change in x since the last time this method was called
:param float dy: Change in y since the last time this method was called
:param int buttons: Which button is pressed
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
self.on_mouse_motion(x, y, dx, dy)
def on_mouse_release(self, x: float, y: float, button: int,
modifiers: int):
"""
Override this function to add mouse button functionality.
:param float x:
:param float y:
:param int button:
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
pass
def on_mouse_scroll(self, x: int, y: int, scroll_x: int, scroll_y: int):
"""
User moves the scroll wheel.
:param int x:
:param int y:
:param int scroll_x:
:param int scroll_y:
"""
pass
def set_mouse_visible(self, visible: bool = True):
"""
If true, user can see the mouse cursor while it is over the window. Set false,
the mouse is not visible. Default is true.
:param bool visible:
"""
super().set_mouse_visible(visible)
def on_key_press(self, symbol: int, modifiers: int):
"""
Override this function to add key press functionality.
:param int symbol: Key that was hit
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
try:
self.key = symbol
except AttributeError:
pass
# TEMP HACK
if symbol == arcade.key.F12:
self.ctx.default_atlas.show()
def on_key_release(self, symbol: int, modifiers: int):
"""
Override this function to add key release functionality.
:param int symbol: Key that was hit
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
try:
self.key = None
except AttributeError:
pass
def on_draw(self):
"""
Override this function to add your custom drawing code.
"""
pass
def on_resize(self, width: float, height: float):
"""
Override this function to add custom code to be called any time the window
is resized. The only responsibility here is to update the viewport.
:param int width: New width
:param int height: New height
"""
# Retain projection scrolling if applied
original_viewport = self.get_viewport()
self.set_viewport(original_viewport[0],
original_viewport[0] + width,
original_viewport[2],
original_viewport[2] + height)
def set_min_size(self, width: int, height: int):
""" Wrap the Pyglet window call to set minimum size
:param float width: width in pixels.
:param float height: height in pixels.
"""
if self._resizable:
super().set_minimum_size(width, height)
else:
raise ValueError('Cannot set min size on non-resizable window')
def set_max_size(self, width: int, height: int):
""" Wrap the Pyglet window call to set maximum size
:param int width: width in pixels.
:param int height: height in pixels.
:Raises ValueError:
"""
if self._resizable:
super().set_maximum_size(width, height)
else:
raise ValueError('Cannot set max size on non-resizable window')
def set_size(self, width: int, height: int):
"""
Ignore the resizable flag and set the size
:param int width:
:param int height:
"""
super().set_size(width, height)
def get_size(self) -> Tuple[int, int]:
"""
Get the size of the window.
:returns: (width, height)
"""
return super().get_size()
def get_location(self) -> Tuple[int, int]:
"""
Return the X/Y coordinates of the window
:returns: x, y of window location
"""
return super().get_location()
def set_visible(self, visible=True):
"""
Set if the window is visible or not. Normally, a program's window is visible.
:param bool visible:
"""
super().set_visible(visible)
# noinspection PyMethodMayBeStatic
def set_viewport(self, left: float, right: float, bottom: float, top: float):
"""
Set the viewport. (What coordinates we can see.
Used to scale and/or scroll the screen).
See :py:func:`arcade.set_viewport` for more detailed information.
:param Number left:
:param Number right:
:param Number bottom:
:param Number top:
"""
set_viewport(left, right, bottom, top)
# noinspection PyMethodMayBeStatic
def get_viewport(self) -> Tuple[float, float, float, float]:
""" Get the viewport. (What coordinates we can see.) """
return get_viewport()
def use(self):
"""Bind the window's framebuffer for rendering commands"""
self.ctx.screen.use()
def test(self, frames: int = 10):
"""
Used by unit test cases. Runs the event loop a few times and stops.
:param int frames:
"""
start_time = time.time()
for i in range(frames):
self.switch_to()
self.dispatch_events()
self.dispatch_event('on_draw')
self.flip()
current_time = time.time()
elapsed_time = current_time - start_time
start_time = current_time
if elapsed_time < 1. / 60.:
sleep_time = (1. / 60.) - elapsed_time
time.sleep(sleep_time)
self._dispatch_updates(1 / 60)
def show_view(self, new_view: 'View'):
"""
Select the view to show. Calling this function is the same as setting the
:py:meth:`arcade.Window.current_view` attribute.
:param View new_view: View to show
"""
if not isinstance(new_view, View):
raise ValueError("Must pass an arcade.View object to "
"Window.show_view()")
# Store the Window that is showing the "new_view" View.
if new_view.window is None:
new_view.window = self
elif new_view.window != self:
raise RuntimeError("You are attempting to pass the same view "
"object between multiple windows. A single "
"view object can only be used in one window.")
# remove previously shown view's handlers
if self._current_view is not None:
self._current_view.on_hide_view()
self.remove_handlers(self._current_view)
# push new view's handlers
self._current_view = new_view
self.push_handlers(
**{
et: getattr(new_view, et, None)
for et in self.event_types
if et != 'on_show' and hasattr(new_view, et)
}
)
self._current_view.on_show()
self._current_view.on_show_view()
# Note: After the View has been pushed onto pyglet's stack of event handlers (via push_handlers()), pyglet
# will still call the Window's event handlers. (See pyglet's EventDispatcher.dispatch_event() implementation
# for details)
def hide_view(self):
"""
Hide the currently active view (if any) returning us
back to ``on_draw`` and ``on_update`` functions in the window.
This is not necessary to call if you are switching views.
Simply call ``show_view`` again.
"""
if self._current_view is None:
return
self._current_view.on_hide_view()
self.remove_handlers(self._current_view)
self._current_view = None
def _create(self):
super()._create()
def _recreate(self, changes):
super()._recreate(changes)
def flip(self):
""" Swap OpenGL and backing buffers for double-buffered windows. """
super().flip()
def switch_to(self):
""" Switch the this window. """
super().switch_to()
def set_caption(self, caption):
""" Set the caption for the window. """
super().set_caption(caption)
def set_minimum_size(self, width: int, height: int):
""" Set smallest window size. """
super().set_minimum_size(width, height)
def set_maximum_size(self, width, height):
""" Set largest window size. """
super().set_maximum_size(width, height)
def set_location(self, x, y):
""" Set location of the window. """
super().set_location(x, y)
def activate(self):
""" Activate this window. """
super().activate()
def minimize(self):
""" Minimize the window. """
super().minimize()
def maximize(self):
""" Maximize the window. """
super().maximize()
def set_vsync(self, vsync: bool):
""" Set if we sync our draws to the monitors vertical sync rate. """
super().set_vsync(vsync)
def set_mouse_platform_visible(self, platform_visible=None):
""" This does something. """
super().set_mouse_platform_visible(platform_visible)
def set_exclusive_mouse(self, exclusive=True):
""" Capture the mouse. """
super().set_exclusive_mouse(exclusive)
def set_exclusive_keyboard(self, exclusive=True):
""" Capture all keyboard input. """
super().set_exclusive_keyboard(exclusive)
def get_system_mouse_cursor(self, name):
""" Get the system mouse cursor """
return super().get_system_mouse_cursor(name)
def dispatch_events(self):
""" Dispatch events """
super().dispatch_events()
def open_window(width: int, height: int, window_title: str, resizable: bool = False,
antialiasing: bool = True) -> Window:
"""
This function opens a window. For ease-of-use we assume there will only be one window, and the
programmer does not need to keep a handle to the window. This isn't the best architecture, because
the window handle is stored in a global, but it makes things easier for programmers if they don't
have to track a window pointer.
:param Number width: Width of the window.
:param Number height: Height of the window.
:param str window_title: Title of the window.
:param bool resizable: Whether the window can be user-resizable.
:param bool antialiasing: Smooth the graphics?
:returns: Handle to window
:rtype: Window
"""
global _window
_window = Window(width, height, window_title, resizable=resizable, update_rate=None,
antialiasing=antialiasing)
_window.invalid = False
return _window
class View:
"""
Support different views/screens in a window.
"""
def __init__(self,
window: Window = None):
if window is None:
self.window = arcade.get_window()
else:
self.window = window
self.key: Optional[int] = None
def update(self, delta_time: float):
"""To be overridden"""
pass
def on_update(self, delta_time: float):
"""To be overridden"""
pass
def on_draw(self):
"""Called when this view should draw"""
pass
def on_show(self):
"""Called when this view is shown and if window dispatches a on_show event.
(first time showing window or resize)
"""
pass
def on_show_view(self):
"""Called when this view is shown"""
pass
def on_hide_view(self):
"""Called when this view is not shown anymore"""
pass
def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):
"""
Override this function to add mouse functionality.
:param float x: x position of mouse
:param float y: y position of mouse
:param float dx: Change in x since the last time this method was called
:param float dy: Change in y since the last time this method was called
"""
pass
def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):
"""
Override this function to add mouse button functionality.
:param float x: x position of the mouse
:param float y: y position of the mouse
:param int button: What button was hit. One of:
arcade.MOUSE_BUTTON_LEFT, arcade.MOUSE_BUTTON_RIGHT,
arcade.MOUSE_BUTTON_MIDDLE
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
pass
def on_mouse_drag(self, x: float, y: float, dx: float, dy: float, _buttons: int, _modifiers: int):
"""
Override this function to add mouse button functionality.
:param float x: x position of mouse
:param float y: y position of mouse
:param float dx: Change in x since the last time this method was called
:param float dy: Change in y since the last time this method was called
:param int _buttons: Which button is pressed
:param int _modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
self.on_mouse_motion(x, y, dx, dy)
def on_mouse_release(self, x: float, y: float, button: int,
modifiers: int):
"""
Override this function to add mouse button functionality.
:param float x:
:param float y:
:param int button:
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
pass
def on_mouse_scroll(self, x: int, y: int, scroll_x: int, scroll_y: int):
"""
User moves the scroll wheel.
:param int x:
:param int y:
:param int scroll_x:
:param int scroll_y:
"""
pass
def on_key_press(self, symbol: int, modifiers: int):
"""
Override this function to add key press functionality.
:param int symbol: Key that was hit
:param int modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
try:
self.key = symbol
except AttributeError:
pass
def on_key_release(self, _symbol: int, _modifiers: int):
"""
Override this function to add key release functionality.
:param int _symbol: Key that was hit
:param int _modifiers: Bitwise 'and' of all modifiers (shift, ctrl, num lock)
pressed during this event. See :ref:`keyboard_modifiers`.
"""
try:
self.key = None
except AttributeError:
pass
| []
| []
| [
"REPL_ID"
]
| [] | ["REPL_ID"] | python | 1 | 0 | |
django_skeleton/asgi.py | """
ASGI config for django_skeleton project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_skeleton.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
kythe/proto/filetree_go_proto/filetree.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: kythe/proto/filetree.proto
package filetree_go_proto
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type DirectoryReply_Kind int32
const (
DirectoryReply_UNKNOWN DirectoryReply_Kind = 0
DirectoryReply_FILE DirectoryReply_Kind = 1
DirectoryReply_DIRECTORY DirectoryReply_Kind = 2
)
var DirectoryReply_Kind_name = map[int32]string{
0: "UNKNOWN",
1: "FILE",
2: "DIRECTORY",
}
var DirectoryReply_Kind_value = map[string]int32{
"UNKNOWN": 0,
"FILE": 1,
"DIRECTORY": 2,
}
func (x DirectoryReply_Kind) String() string {
return proto.EnumName(DirectoryReply_Kind_name, int32(x))
}
func (DirectoryReply_Kind) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_filetree_2585d68aef5af310, []int{3, 0}
}
type CorpusRootsRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CorpusRootsRequest) Reset() { *m = CorpusRootsRequest{} }
func (m *CorpusRootsRequest) String() string { return proto.CompactTextString(m) }
func (*CorpusRootsRequest) ProtoMessage() {}
func (*CorpusRootsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_filetree_2585d68aef5af310, []int{0}
}
func (m *CorpusRootsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CorpusRootsRequest.Unmarshal(m, b)
}
func (m *CorpusRootsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CorpusRootsRequest.Marshal(b, m, deterministic)
}
func (dst *CorpusRootsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CorpusRootsRequest.Merge(dst, src)
}
func (m *CorpusRootsRequest) XXX_Size() int {
return xxx_messageInfo_CorpusRootsRequest.Size(m)
}
func (m *CorpusRootsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CorpusRootsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CorpusRootsRequest proto.InternalMessageInfo
type CorpusRootsReply struct {
Corpus []*CorpusRootsReply_Corpus `protobuf:"bytes,1,rep,name=corpus,proto3" json:"corpus,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CorpusRootsReply) Reset() { *m = CorpusRootsReply{} }
func (m *CorpusRootsReply) String() string { return proto.CompactTextString(m) }
func (*CorpusRootsReply) ProtoMessage() {}
func (*CorpusRootsReply) Descriptor() ([]byte, []int) {
return fileDescriptor_filetree_2585d68aef5af310, []int{1}
}
func (m *CorpusRootsReply) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CorpusRootsReply.Unmarshal(m, b)
}
func (m *CorpusRootsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CorpusRootsReply.Marshal(b, m, deterministic)
}
func (dst *CorpusRootsReply) XXX_Merge(src proto.Message) {
xxx_messageInfo_CorpusRootsReply.Merge(dst, src)
}
func (m *CorpusRootsReply) XXX_Size() int {
return xxx_messageInfo_CorpusRootsReply.Size(m)
}
func (m *CorpusRootsReply) XXX_DiscardUnknown() {
xxx_messageInfo_CorpusRootsReply.DiscardUnknown(m)
}
var xxx_messageInfo_CorpusRootsReply proto.InternalMessageInfo
func (m *CorpusRootsReply) GetCorpus() []*CorpusRootsReply_Corpus {
if m != nil {
return m.Corpus
}
return nil
}
type CorpusRootsReply_Corpus struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Root []string `protobuf:"bytes,2,rep,name=root,proto3" json:"root,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CorpusRootsReply_Corpus) Reset() { *m = CorpusRootsReply_Corpus{} }
func (m *CorpusRootsReply_Corpus) String() string { return proto.CompactTextString(m) }
func (*CorpusRootsReply_Corpus) ProtoMessage() {}
func (*CorpusRootsReply_Corpus) Descriptor() ([]byte, []int) {
return fileDescriptor_filetree_2585d68aef5af310, []int{1, 0}
}
func (m *CorpusRootsReply_Corpus) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CorpusRootsReply_Corpus.Unmarshal(m, b)
}
func (m *CorpusRootsReply_Corpus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CorpusRootsReply_Corpus.Marshal(b, m, deterministic)
}
func (dst *CorpusRootsReply_Corpus) XXX_Merge(src proto.Message) {
xxx_messageInfo_CorpusRootsReply_Corpus.Merge(dst, src)
}
func (m *CorpusRootsReply_Corpus) XXX_Size() int {
return xxx_messageInfo_CorpusRootsReply_Corpus.Size(m)
}
func (m *CorpusRootsReply_Corpus) XXX_DiscardUnknown() {
xxx_messageInfo_CorpusRootsReply_Corpus.DiscardUnknown(m)
}
var xxx_messageInfo_CorpusRootsReply_Corpus proto.InternalMessageInfo
func (m *CorpusRootsReply_Corpus) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *CorpusRootsReply_Corpus) GetRoot() []string {
if m != nil {
return m.Root
}
return nil
}
type DirectoryRequest struct {
Corpus string `protobuf:"bytes,1,opt,name=corpus,proto3" json:"corpus,omitempty"`
Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DirectoryRequest) Reset() { *m = DirectoryRequest{} }
func (m *DirectoryRequest) String() string { return proto.CompactTextString(m) }
func (*DirectoryRequest) ProtoMessage() {}
func (*DirectoryRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_filetree_2585d68aef5af310, []int{2}
}
func (m *DirectoryRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DirectoryRequest.Unmarshal(m, b)
}
func (m *DirectoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DirectoryRequest.Marshal(b, m, deterministic)
}
func (dst *DirectoryRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DirectoryRequest.Merge(dst, src)
}
func (m *DirectoryRequest) XXX_Size() int {
return xxx_messageInfo_DirectoryRequest.Size(m)
}
func (m *DirectoryRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DirectoryRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DirectoryRequest proto.InternalMessageInfo
func (m *DirectoryRequest) GetCorpus() string {
if m != nil {
return m.Corpus
}
return ""
}
func (m *DirectoryRequest) GetRoot() string {
if m != nil {
return m.Root
}
return ""
}
func (m *DirectoryRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
type DirectoryReply struct {
Corpus string `protobuf:"bytes,3,opt,name=corpus,proto3" json:"corpus,omitempty"`
Root string `protobuf:"bytes,4,opt,name=root,proto3" json:"root,omitempty"`
Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"`
Entry []*DirectoryReply_Entry `protobuf:"bytes,6,rep,name=entry,proto3" json:"entry,omitempty"`
Subdirectory []string `protobuf:"bytes,1,rep,name=subdirectory,proto3" json:"subdirectory,omitempty"` // Deprecated: Do not use.
File []string `protobuf:"bytes,2,rep,name=file,proto3" json:"file,omitempty"` // Deprecated: Do not use.
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DirectoryReply) Reset() { *m = DirectoryReply{} }
func (m *DirectoryReply) String() string { return proto.CompactTextString(m) }
func (*DirectoryReply) ProtoMessage() {}
func (*DirectoryReply) Descriptor() ([]byte, []int) {
return fileDescriptor_filetree_2585d68aef5af310, []int{3}
}
func (m *DirectoryReply) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DirectoryReply.Unmarshal(m, b)
}
func (m *DirectoryReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DirectoryReply.Marshal(b, m, deterministic)
}
func (dst *DirectoryReply) XXX_Merge(src proto.Message) {
xxx_messageInfo_DirectoryReply.Merge(dst, src)
}
func (m *DirectoryReply) XXX_Size() int {
return xxx_messageInfo_DirectoryReply.Size(m)
}
func (m *DirectoryReply) XXX_DiscardUnknown() {
xxx_messageInfo_DirectoryReply.DiscardUnknown(m)
}
var xxx_messageInfo_DirectoryReply proto.InternalMessageInfo
func (m *DirectoryReply) GetCorpus() string {
if m != nil {
return m.Corpus
}
return ""
}
func (m *DirectoryReply) GetRoot() string {
if m != nil {
return m.Root
}
return ""
}
func (m *DirectoryReply) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *DirectoryReply) GetEntry() []*DirectoryReply_Entry {
if m != nil {
return m.Entry
}
return nil
}
// Deprecated: Do not use.
func (m *DirectoryReply) GetSubdirectory() []string {
if m != nil {
return m.Subdirectory
}
return nil
}
// Deprecated: Do not use.
func (m *DirectoryReply) GetFile() []string {
if m != nil {
return m.File
}
return nil
}
type DirectoryReply_Entry struct {
Kind DirectoryReply_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=kythe.proto.DirectoryReply_Kind" json:"kind,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DirectoryReply_Entry) Reset() { *m = DirectoryReply_Entry{} }
func (m *DirectoryReply_Entry) String() string { return proto.CompactTextString(m) }
func (*DirectoryReply_Entry) ProtoMessage() {}
func (*DirectoryReply_Entry) Descriptor() ([]byte, []int) {
return fileDescriptor_filetree_2585d68aef5af310, []int{3, 0}
}
func (m *DirectoryReply_Entry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DirectoryReply_Entry.Unmarshal(m, b)
}
func (m *DirectoryReply_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DirectoryReply_Entry.Marshal(b, m, deterministic)
}
func (dst *DirectoryReply_Entry) XXX_Merge(src proto.Message) {
xxx_messageInfo_DirectoryReply_Entry.Merge(dst, src)
}
func (m *DirectoryReply_Entry) XXX_Size() int {
return xxx_messageInfo_DirectoryReply_Entry.Size(m)
}
func (m *DirectoryReply_Entry) XXX_DiscardUnknown() {
xxx_messageInfo_DirectoryReply_Entry.DiscardUnknown(m)
}
var xxx_messageInfo_DirectoryReply_Entry proto.InternalMessageInfo
func (m *DirectoryReply_Entry) GetKind() DirectoryReply_Kind {
if m != nil {
return m.Kind
}
return DirectoryReply_UNKNOWN
}
func (m *DirectoryReply_Entry) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func init() {
proto.RegisterType((*CorpusRootsRequest)(nil), "kythe.proto.CorpusRootsRequest")
proto.RegisterType((*CorpusRootsReply)(nil), "kythe.proto.CorpusRootsReply")
proto.RegisterType((*CorpusRootsReply_Corpus)(nil), "kythe.proto.CorpusRootsReply.Corpus")
proto.RegisterType((*DirectoryRequest)(nil), "kythe.proto.DirectoryRequest")
proto.RegisterType((*DirectoryReply)(nil), "kythe.proto.DirectoryReply")
proto.RegisterType((*DirectoryReply_Entry)(nil), "kythe.proto.DirectoryReply.Entry")
proto.RegisterEnum("kythe.proto.DirectoryReply_Kind", DirectoryReply_Kind_name, DirectoryReply_Kind_value)
}
func init() {
proto.RegisterFile("kythe/proto/filetree.proto", fileDescriptor_filetree_2585d68aef5af310)
}
var fileDescriptor_filetree_2585d68aef5af310 = []byte{
// 418 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0xcd, 0x3a, 0x8e, 0xc1, 0x63, 0x28, 0xd6, 0x08, 0x55, 0x96, 0x51, 0x55, 0xd7, 0x42, 0xa8,
0x07, 0xe4, 0xa2, 0x80, 0xc4, 0x85, 0x53, 0xdb, 0x54, 0x8a, 0x8a, 0x12, 0xb1, 0x14, 0x21, 0x8e,
0xad, 0x3d, 0xb4, 0x56, 0x5d, 0xaf, 0x59, 0x6f, 0x2a, 0xf9, 0xca, 0xff, 0xf0, 0x23, 0x7c, 0x15,
0xda, 0x75, 0xd2, 0xd8, 0xa8, 0xc9, 0xc9, 0x33, 0x6f, 0xdf, 0x3c, 0xcf, 0xbc, 0x19, 0x08, 0x6f,
0x1b, 0x75, 0x43, 0x47, 0x95, 0x14, 0x4a, 0x1c, 0xfd, 0xcc, 0x0b, 0x52, 0x92, 0x28, 0x31, 0x29,
0x7a, 0xe6, 0xad, 0x4d, 0xe2, 0x97, 0x80, 0x27, 0x42, 0x56, 0x8b, 0x9a, 0x0b, 0xa1, 0x6a, 0x4e,
0xbf, 0x16, 0x54, 0xab, 0xf8, 0x37, 0x03, 0xbf, 0x07, 0x57, 0x45, 0x83, 0x9f, 0xc0, 0x49, 0x0d,
0x16, 0xb0, 0x68, 0x78, 0xe8, 0x8d, 0x5f, 0x27, 0x1d, 0xa1, 0xe4, 0x7f, 0xfa, 0x0a, 0x58, 0xd6,
0x84, 0xef, 0xc0, 0x69, 0x11, 0x44, 0xb0, 0xcb, 0xcb, 0x3b, 0x0a, 0x58, 0xc4, 0x0e, 0x5d, 0x6e,
0x62, 0x8d, 0x49, 0x21, 0x54, 0x60, 0x45, 0x43, 0x8d, 0xe9, 0x38, 0xe6, 0xe0, 0x9f, 0xe6, 0x92,
0x52, 0x25, 0x64, 0xb3, 0x6c, 0x0c, 0x77, 0x3b, 0x3d, 0xe8, 0xea, 0x65, 0xd6, 0xa9, 0x67, 0xab,
0x7a, 0x8d, 0x55, 0x97, 0xea, 0x26, 0x18, 0xb6, 0x98, 0x8e, 0xe3, 0xbf, 0x16, 0xec, 0x74, 0x44,
0xf5, 0x58, 0x6b, 0xc9, 0xe1, 0xa3, 0x92, 0xf6, 0x23, 0x92, 0xa3, 0xb5, 0x24, 0x7e, 0x84, 0x11,
0x95, 0x4a, 0x36, 0x81, 0x63, 0x5c, 0x39, 0xe8, 0xb9, 0xd2, 0xff, 0x57, 0x32, 0xd1, 0x44, 0xde,
0xf2, 0xf1, 0x0d, 0x3c, 0xab, 0x17, 0x57, 0xd9, 0x8a, 0x61, 0x5c, 0x75, 0x8f, 0xad, 0x80, 0xf1,
0x1e, 0x8e, 0xbb, 0x60, 0xeb, 0x0d, 0xb6, 0xde, 0x98, 0x77, 0x93, 0x87, 0x5f, 0x60, 0x64, 0xf4,
0xf0, 0x03, 0xd8, 0xb7, 0x79, 0x99, 0x19, 0x4b, 0x76, 0xc6, 0xd1, 0xb6, 0x06, 0xce, 0xf3, 0x32,
0xe3, 0x86, 0xfd, 0xb0, 0x06, 0x6b, 0xbd, 0x86, 0xf8, 0x2d, 0xd8, 0x9a, 0x81, 0x1e, 0x3c, 0xf9,
0x36, 0x3b, 0x9f, 0xcd, 0xbf, 0xcf, 0xfc, 0x01, 0x3e, 0x05, 0xfb, 0x6c, 0xfa, 0x79, 0xe2, 0x33,
0x7c, 0x0e, 0xee, 0xe9, 0x94, 0x4f, 0x4e, 0x2e, 0xe6, 0xfc, 0x87, 0x6f, 0x8d, 0xff, 0x30, 0x78,
0x71, 0x96, 0x17, 0x74, 0x21, 0x89, 0xbe, 0x92, 0xbc, 0xcf, 0x53, 0xc2, 0x39, 0x78, 0x9d, 0x4b,
0xc0, 0xfd, 0xcd, 0x37, 0x62, 0x16, 0x1a, 0xee, 0x6d, 0x3d, 0xa2, 0x78, 0x80, 0x53, 0x70, 0x1f,
0x66, 0xc0, 0xbd, 0x4d, 0xb3, 0xb5, 0x62, 0xaf, 0xb6, 0x8c, 0x1e, 0x0f, 0x8e, 0x0f, 0x60, 0x3f,
0x15, 0x77, 0xc9, 0xb5, 0x10, 0xd7, 0x05, 0x25, 0x19, 0xdd, 0x2b, 0x21, 0x8a, 0xba, 0x5b, 0x73,
0xe5, 0x98, 0xcf, 0xfb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x15, 0x08, 0x3d, 0x24, 0x40, 0x03,
0x00, 0x00,
}
| []
| []
| []
| [] | [] | go | null | null | null |
internal/pkg/storage/bson/db/services/db_service.go | package services
import (
"context"
"fmt"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"os"
"time"
)
const (
VideoCollection = "videos"
CategoriesCollection = "categories"
)
type DatabaseService struct {
*mongo.Database
}
func ProvideDatabaseService() DatabaseService {
server := mountServerConnection(os.Getenv("ENV"),
os.Getenv("APP_DB_USERNAME"),
os.Getenv("APP_DB_PASSWORD"),
os.Getenv("APP_DB_HOST"),
os.Getenv("APP_DB_NAME"))
clientOptions := options.Client().ApplyURI(server)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, err := mongo.Connect(ctx, clientOptions)
if err != nil {
return DatabaseService{}
}
return DatabaseService{client.Database(os.Getenv("APP_DB_NAME"))}
}
func mountServerConnection(env, user, password, hostname, dbname string) string {
if env == "dev" || env == "" {
return "mongodb://mongo:27017/dev_env"
}
return fmt.Sprintf("mongodb+srv://%s:%s@%s/%s?retryWrites=true&w=majority", user, password, hostname, dbname)
}
func makeFindOptions(filter string, page int64, pageSize int64) (bson.M, *options.FindOptions) {
collectionFilter := bson.M{}
findOptions := options.Find()
findOptions.SetLimit(pageSize)
findOptions.SetSkip((page - 1) * pageSize)
if filter != "" {
collectionFilter = bson.M{"titulo": bson.M{"$regex": fmt.Sprintf(".*%s.*", filter)}}
}
return collectionFilter, findOptions
}
| [
"\"ENV\"",
"\"APP_DB_USERNAME\"",
"\"APP_DB_PASSWORD\"",
"\"APP_DB_HOST\"",
"\"APP_DB_NAME\"",
"\"APP_DB_NAME\""
]
| []
| [
"ENV",
"APP_DB_NAME",
"APP_DB_PASSWORD",
"APP_DB_HOST",
"APP_DB_USERNAME"
]
| [] | ["ENV", "APP_DB_NAME", "APP_DB_PASSWORD", "APP_DB_HOST", "APP_DB_USERNAME"] | go | 5 | 0 | |
dovetail/run.py | #!/usr/bin/env python
#
# [email protected]
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
import click
import os
import copy
import time
import uuid
import utils.dovetail_logger as dt_logger
import utils.dovetail_utils as dt_utils
from parser import Parser
from container import Container
from testcase import Testcase
from testcase import Testsuite
from report import Report
from report import FunctestCrawler, YardstickCrawler, BottlenecksCrawler
from report import FunctestChecker, YardstickChecker, BottlenecksChecker
from utils.dovetail_config import DovetailConfig as dt_cfg
from test_runner import DockerRunner, ShellRunner
def load_testsuite(testsuite):
Testsuite.load()
return Testsuite.get(testsuite)
def load_testcase():
Testcase.load()
def run_test(testsuite, testarea, logger):
testcase_list = Testcase.get_testcase_list(testsuite, testarea)
duration = 0
start_time = time.time()
for testcase_name in testcase_list:
logger.info('>>[testcase]: {}'.format(testcase_name))
testcase = Testcase.get(testcase_name)
if testcase is None:
logger.error('Test case {} is not defined in testcase folder, '
'skipping.'.format(testcase_name))
continue
run_testcase = True
# if testcase.exceed_max_retry_times():
# run_testcase = False
# if testcase.script_result_acquired():
# run_testcase = False
if run_testcase:
testcase.run()
check_tc_result(testcase, logger)
end_time = time.time()
duration = end_time - start_time
return duration
def check_tc_result(testcase, logger):
result_dir = dt_cfg.dovetail_config['result_dir']
validate_type = testcase.validate_type()
functest_result = dt_cfg.dovetail_config['functest']['result']['file_path']
dovetail_result = os.path.join(result_dir,
dt_cfg.dovetail_config['result_file'])
if dt_cfg.dovetail_config['report_dest'].startswith("http"):
if dt_utils.store_db_results(dt_cfg.dovetail_config['report_dest'],
dt_cfg.dovetail_config['build_tag'],
testcase.name(), dovetail_result,
logger):
logger.info("Results have been pushed to database and stored "
"with local file {}.".format(dovetail_result))
else:
logger.error("Failed to push results to database.")
if dt_cfg.dovetail_config['report_dest'] == "file":
if validate_type.lower() == 'yardstick':
result_file = os.path.join(result_dir, testcase.name() + '.out')
elif validate_type.lower() == 'functest':
result_file = os.path.join(result_dir, functest_result)
elif validate_type.lower() == 'bottlenecks':
result_file = os.path.join(result_dir, testcase.name() + '.out')
else:
logger.error("Don't support {} now.".format(validate_type))
return
if os.path.isfile(result_file):
logger.info(
"Results have been stored with file {}.".format(result_file))
else:
logger.error(
"Failed to store results with file {}.".format(result_file))
result = Report.get_result(testcase)
Report.check_result(testcase, result)
def validate_input(input_dict, check_dict, logger):
# for 'func_tag' and 'yard_tag' options
func_tag = input_dict['func_tag']
yard_tag = input_dict['yard_tag']
bott_tag = input_dict['bott_tag']
valid_tag = check_dict['valid_docker_tag']
if func_tag is not None and func_tag not in valid_tag:
logger.error("The input option 'func_tag' can't be {}, "
"valid values are {}.".format(func_tag, valid_tag))
raise SystemExit(1)
if yard_tag is not None and yard_tag not in valid_tag:
logger.error("The input option 'yard_tag' can't be {}, "
"valid values are {}.".format(yard_tag, valid_tag))
raise SystemExit(1)
if bott_tag is not None and bott_tag not in valid_tag:
logger.error("The input option 'bott_tag' can't be {}, "
"valid values are {}.".format(bott_tag, valid_tag))
raise SystemExit(1)
# for 'report' option
report = input_dict['report']
if report:
if not (report.startswith("http") or report == "file"):
logger.error("Report type can't be {}, valid types are 'file' "
"and 'http'.".format(input_dict['report']))
raise SystemExit(1)
def filter_config(input_dict, logger):
cli_dict = dt_cfg.dovetail_config['cli']
configs = {}
for key in cli_dict:
if not cli_dict[key]:
continue
try:
cli_config = cli_dict[key]['config']
if cli_config is None:
continue
except KeyError:
continue
for key, value in input_dict.items():
for config_key, config_value in cli_config.items():
value_dict = {}
value_dict['value'] = value
try:
value_dict['path'] = config_value['path']
if key == config_key:
configs[key] = value_dict
break
if key.upper() == config_key:
configs[key.upper()] = value_dict
break
except KeyError as e:
logger.exception('KeyError {}.'.format(e))
raise SystemExit(1)
if not configs:
return None
return configs
def create_logs():
Container.create_log()
Parser.create_log()
Report.create_log()
FunctestCrawler.create_log()
YardstickCrawler.create_log()
BottlenecksCrawler.create_log()
FunctestChecker.create_log()
YardstickChecker.create_log()
BottlenecksChecker.create_log()
Testcase.create_log()
Testsuite.create_log()
DockerRunner.create_log()
ShellRunner.create_log()
def clean_results_dir():
result_path = dt_cfg.dovetail_config['result_dir']
if os.path.exists(result_path):
if os.path.isdir(result_path):
cmd = 'sudo rm -rf %s/*' % (result_path)
dt_utils.exec_cmd(cmd, exit_on_error=False, exec_msg_on=False)
else:
print "result_dir in dovetail_config.yml is not a directory."
raise SystemExit(1)
def get_result_path():
try:
dovetail_home = os.environ["DOVETAIL_HOME"]
except Exception:
print("ERROR: mandatory env variable 'DOVETAIL_HOME' is not found, "
"please set in env_config.sh and source this file before "
"running.")
return None
result_path = os.path.join(dovetail_home, 'results')
dt_cfg.dovetail_config['result_dir'] = result_path
pre_config_path = os.path.join(dovetail_home, 'pre_config')
patch_set_path = os.path.join(dovetail_home, 'patch')
dt_cfg.dovetail_config['config_dir'] = pre_config_path
dt_cfg.dovetail_config['patch_dir'] = patch_set_path
return dovetail_home
def copy_userconfig_files(logger):
dovetail_home = os.path.dirname(os.path.abspath(__file__))
userconfig_path = os.path.join(dovetail_home, 'userconfig')
pre_config_path = dt_cfg.dovetail_config['config_dir']
if not os.path.isdir(pre_config_path):
os.makedirs(pre_config_path)
cmd = 'sudo cp -r %s/* %s' % (userconfig_path, pre_config_path)
dt_utils.exec_cmd(cmd, logger, exit_on_error=False)
def copy_patch_files(logger):
dovetail_home = os.path.dirname(os.path.abspath(__file__))
patch_path = os.path.join(dovetail_home, 'patch')
patch_set_path = dt_cfg.dovetail_config['patch_dir']
if not os.path.isdir(patch_set_path):
os.makedirs(patch_set_path)
cmd = 'sudo cp -r %s/* %s' % (patch_path, patch_set_path)
dt_utils.exec_cmd(cmd, logger, exit_on_error=False)
# env_init can source some env variable used in dovetail, such as
# when https+credential used, OS_CACERT
def env_init(logger):
openrc = os.path.join(dt_cfg.dovetail_config['config_dir'],
dt_cfg.dovetail_config['env_file'])
if not os.path.isfile(openrc):
logger.error("File {} does not exist.".format(openrc))
dt_utils.source_env(openrc)
def check_hosts_file(logger):
hosts_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
'hosts.yaml')
if not os.path.isfile(hosts_file):
logger.warn("There is no hosts file {}, may be some issues with "
"domain name resolution.".format(hosts_file))
def main(*args, **kwargs):
"""Dovetail compliance test entry!"""
build_tag = "daily-master-%s" % str(uuid.uuid1())
dt_cfg.dovetail_config['build_tag'] = build_tag
if not get_result_path():
return
clean_results_dir()
if kwargs['debug']:
os.environ['DEBUG'] = 'true'
create_logs()
logger = dt_logger.Logger('run').getLogger()
logger.info('================================================')
logger.info('Dovetail compliance: {}!'.format(kwargs['testsuite']))
logger.info('================================================')
logger.info('Build tag: {}'.format(dt_cfg.dovetail_config['build_tag']))
env_init(logger)
copy_userconfig_files(logger)
copy_patch_files(logger)
dt_utils.check_docker_version(logger)
validate_input(kwargs, dt_cfg.dovetail_config['validate_input'], logger)
check_hosts_file(logger)
configs = filter_config(kwargs, logger)
if configs is not None:
dt_cfg.update_config(configs)
if kwargs['report']:
if(kwargs['report'].endswith('/')):
kwargs['report'] = kwargs['report'][0:kwargs['report'].rfind('/')]
dt_cfg.dovetail_config['report_dest'] = kwargs['report']
dt_cfg.update_cmds()
if kwargs['offline']:
dt_cfg.dovetail_config['offline'] = True
else:
dt_cfg.dovetail_config['offline'] = False
origin_testarea = kwargs['testarea']
testsuite_validation = False
if kwargs['testsuite'] in dt_cfg.dovetail_config['testsuite_supported']:
testsuite_validation = True
testarea_validation, testarea = Testcase.check_testarea(origin_testarea)
if testsuite_validation and testarea_validation:
testsuite_yaml = load_testsuite(kwargs['testsuite'])
load_testcase()
duration = run_test(testsuite_yaml, testarea, logger)
if dt_cfg.dovetail_config['report_dest'] == "file":
Report.generate(testsuite_yaml, testarea, duration)
if dt_cfg.dovetail_config['report_dest'].startswith("http"):
Report.save_logs()
else:
logger.error('Invalid input commands, testsuite {} testarea {}'
.format(kwargs['testsuite'], origin_testarea))
dt_cfg.load_config_files()
dovetail_config = copy.deepcopy(dt_cfg.dovetail_config)
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
if dovetail_config['cli']['options'] is not None:
for key, value in dovetail_config['cli']['options'].items():
if value is not None:
for k, v in value.items():
flags = v['flags']
v.pop('flags')
v.pop('path', None)
main = click.option(*flags, **v)(main)
if dovetail_config['cli']['arguments'] is not None:
for key, value in dovetail_config['cli']['arguments'].items():
if value is not None:
for k, v in value.items():
flags = v['flags']
v.pop('flags')
v.pop('path', None)
main = click.argument(flags, **v)(main)
main = click.command(context_settings=CONTEXT_SETTINGS)(main)
if __name__ == '__main__':
main()
| []
| []
| [
"DOVETAIL_HOME",
"DEBUG"
]
| [] | ["DOVETAIL_HOME", "DEBUG"] | python | 2 | 0 | |
app/__init__.py | from flask.ext.sqlalchemy import SQLAlchemy
import os
db = SQLAlchemy()
def create_app():
from flask import Flask
from .api_1_0 import api_blueprint as api_1_0_blueprint
from .api_1_0 import api
app = Flask(__name__)
app.register_blueprint(api_1_0_blueprint)
app.config['SQLALCHEMY_DATABASE_URI'] = \
'postgresql://' + os.environ.get('DB_USER') + \
':' + os.environ.get('DB_PASS') + \
'@' + os.environ.get('DB_HOST') + \
'/' + os.environ.get('DB')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db.init_app(app)
api.init_app(app)
return app | []
| []
| [
"DB_PASS",
"DB",
"DB_USER",
"DB_HOST"
]
| [] | ["DB_PASS", "DB", "DB_USER", "DB_HOST"] | python | 4 | 0 | |
Algorithms/Java/com/graphtheory/EvenTree.java | // Author: Sagar Malik
// https://github.com/SagarMalik
package com.graphtheory;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.IntStream;
public class EvenTree {
static class Solution {
static class TreeNode {
private static Map<Integer, TreeNode> keeper = new HashMap<>();
int val;
List<TreeNode> children;
public TreeNode(int v) {
val = v;
children = new ArrayList<>();
keeper.put(val, this);
}
public void addChild(TreeNode t) {
children.add(t);
}
public static void AddConnection(int child, int parent) {
TreeNode p = keeper.get(parent);
if (p == null)
p = new TreeNode(parent);
TreeNode c = keeper.get(child);
if (c == null)
c = new TreeNode(child);
p.addChild(c);
}
public static TreeNode getRoot() {
return keeper.get(1);
}
static int count = 0;
public int getCount() {
int ct = 1;
for (TreeNode c : children) {
int childCount = c.getCount();
if (childCount % 2 != 0)
ct += childCount;
else
count++;
}
return ct;
}
public static int populate() {
TreeNode root = getRoot();
root.getCount();
return count;
}
}
// Complete the evenForest function below.
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter =
new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] tNodesEdges = bufferedReader.readLine().replaceAll("\\s+$", "").split(" ");
// int tNodes = Integer.parseInt(tNodesEdges[0]);
int tEdges = Integer.parseInt(tNodesEdges[1]);
// List<Integer> tFrom = new ArrayList<>();
// List<Integer> tTo = new ArrayList<>();
IntStream.range(0, tEdges).forEach(i -> {
try {
String[] tFromTo = bufferedReader.readLine().replaceAll("\\s+$", "").split(" ");
TreeNode.AddConnection(Integer.parseInt(tFromTo[0]), Integer.parseInt(tFromTo[1]));
} catch (IOException ex) {
throw new RuntimeException(ex);
}
});
int res = TreeNode.populate();
bufferedWriter.write(String.valueOf(res));
bufferedWriter.newLine();
bufferedReader.close();
bufferedWriter.close();
}
}
public static void main(String[] args) {
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
test/com/xhaus/modjy/ModjyTestBase.java | package com.xhaus.modjy;
import java.io.File;
import java.util.Map;
import java.util.Iterator;
import junit.framework.*;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServlet;
import com.mockrunner.servlet.BasicServletTestCaseAdapter;
import com.mockrunner.mock.web.WebMockObjectFactory;
import com.mockrunner.mock.web.MockServletConfig;
import com.mockrunner.mock.web.MockServletContext;
import com.mockrunner.mock.web.MockHttpServletRequest;
import com.mockrunner.mock.web.MockHttpServletResponse;
import org.jdom.Element;
import org.jdom.output.XMLOutputter;
import org.python.core.PyObject;
import org.python.util.PythonInterpreter;
import com.xhaus.modjy.ModjyJServlet;
/**
*
*/
public class ModjyTestBase extends BasicServletTestCaseAdapter
{
final static String DEFAULT_APP_DIR = "test_apps_dir";
final static String LIB_PYTHON_DIR = "lib-python";
final static String LIB_PYTHON_TEST_PATH = "lib_python_folder";
final static String DEFAULT_APP_FILE = "simple_app.py";
final static String DEFAULT_APP_NAME = "simple_app";
public WebMockObjectFactory factory;
public MockServletConfig servletConfig;
public MockServletContext servletContext;
public WebMockObjectFactory getFactory ()
{
if (factory == null)
factory = getWebMockObjectFactory();
return factory;
}
public MockServletConfig getConfig ()
{
if (servletConfig == null)
servletConfig = getFactory().getMockServletConfig();
return servletConfig;
}
public MockServletContext getContext ()
{
if (servletContext == null)
servletContext = getFactory().getMockServletContext();
return servletContext;
}
// public void dumpContextRealPaths ( )
// {
// Map pathMap = ((LoggingMockServletContext)getContext()).actualPaths;
// Iterator it = pathMap.keySet().iterator();
// while (it.hasNext())
// {
// String pathName = (String) it.next();
// System.out.println("Path '"+pathName+"'-->'"+pathMap.get(pathName)+"'");
// }
// }
public void setInitParameter ( String name, String value )
{
getConfig().setInitParameter(name, value);
}
public void setRealPath ( String source, String target )
{
getContext().setRealPath(source, target);
}
public void addHeader(String headerName, String headerValue)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.addHeader(headerName, headerValue);
getFactory().addRequestWrapper(request);
}
public void setBodyContent(String content)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setBodyContent(content);
getFactory().addRequestWrapper(request);
}
public void setServletContextPath(String path)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setContextPath(path);
getFactory().addRequestWrapper(request);
}
public void setServletPath(String path)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setServletPath(path);
getFactory().addRequestWrapper(request);
}
public void setRequestURI(String uri)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setRequestURI(uri);
getFactory().addRequestWrapper(request);
}
public void setScheme(String scheme)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setScheme(scheme);
getFactory().addRequestWrapper(request);
}
public void setPathInfo(String pathInfo)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setPathInfo(pathInfo);
getFactory().addRequestWrapper(request);
}
public void setQueryString(String qString)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setQueryString(qString);
getFactory().addRequestWrapper(request);
}
public void setProtocol(String protocol)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setProtocol(protocol);
getFactory().addRequestWrapper(request);
}
public void setServerName(String name)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
// Using setLocalName() here: See here for more: http://docs.sun.com/source/819-0077/J2EE.html
request.setLocalName(name);
getFactory().addRequestWrapper(request);
}
public void setServerPort(int port)
{
MockHttpServletRequest request = (MockHttpServletRequest) getFactory().getWrappedRequest();
request.setLocalPort(port);
getFactory().addRequestWrapper(request);
}
public void setPythonHome(String app_dir)
{
setInitParameter("python.home", app_dir);
}
public void setAppDir(String app_dir)
{
setInitParameter("app_directory", app_dir);
}
public void setAppFile(String app_file)
{
setInitParameter("app_filename", app_file);
}
public void setAppName(String app_name)
{
setInitParameter("app_callable_name", app_name);
}
public void setAppImportable(String app_path)
{
setAppDir("");
setAppFile("");
setAppName("");
setInitParameter("app_import_name", app_path);
}
public MockHttpServletResponse getResponse()
{
MockHttpServletResponse response = (MockHttpServletResponse) getFactory().getWrappedResponse();
return response;
}
public int getStatus()
{
MockHttpServletResponse response = (MockHttpServletResponse) getFactory().getWrappedResponse();
return response.getStatusCode();
}
protected void baseSetUp()
throws Exception
{
super.setUp();
String jythonHome = System.getenv("JYTHON_HOME");
setRealPath(jythonHome, jythonHome);
setRealPath("/WEB-INF/"+LIB_PYTHON_DIR, LIB_PYTHON_TEST_PATH);
setRealPath("/WEB-INF/lib/modjy.jar", "../modjy.jar");
setPythonHome(jythonHome);
setAppDir(DEFAULT_APP_DIR);
setAppFile(DEFAULT_APP_FILE);
setAppName(DEFAULT_APP_NAME);
setInitParameter("exc_handler", "testing");
// dumpContextRealPaths();
}
protected PyObject evalPythonString(String pyString)
{
// Efficiency be damned: it's a testing phase
PythonInterpreter interp = new PythonInterpreter();
try
{
return interp.eval(pyString);
}
catch (Exception x)
{
System.err.println("Exception evaling '"+pyString+"': " + x);
return null;
}
}
protected void createServlet()
{
createServlet(ModjyJServlet.class);
// Set zero content: this can be overridden later
setBodyContent("");
clearOutput();
}
// Leave this here as a simple template for a test
public void testHelloWorld() throws Exception
{
baseSetUp();
createServlet();
doGet();
String result = new XMLOutputter().outputString(getOutputAsJDOMDocument());
}
public static void main(String args[])
{
TestSuite suite = new TestSuite();
suite.addTestSuite(ModjyTestBase.class);
suite.addTestSuite(ModjyTestAppInvocation.class);
suite.addTestSuite(ModjyTestEnviron.class);
suite.addTestSuite(ModjyTestHeaders.class);
suite.addTestSuite(ModjyTestContentHeaders.class);
suite.addTestSuite(ModjyTestReturnIterable.class);
suite.addTestSuite(ModjyTestWebInf.class);
suite.addTestSuite(ModjyTestWSGIStreams.class);
junit.textui.TestRunner.run(suite);
}
}
| [
"\"JYTHON_HOME\""
]
| []
| [
"JYTHON_HOME"
]
| [] | ["JYTHON_HOME"] | java | 1 | 0 | |
Code/Python/Apps/GSOC_JMB/App/PersonalWsgiServer/plugins/WsgiApps/django_app.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: JMB
"""
This is a WSGI app for serving Django apps simply. Unfortunately, it doesn't do
that just yet and and won't work as you expect it. Thus, it's not going to go in
the main Kamaelia tree just yet, but I'll leave it in the Kamaelia Publish distribution
for all the masochists out there. :)
"""
import os, sys
from static import static_app
import django.core.handlers.wsgi
_paths_set = set([])
def application(environ = {}, start_response = None):
if not environ['kp.project_path'] in _paths_set:
_paths_set.add(environ['kp.project_path'])
sys.path.append(environ['kp.project_path'])
#django doesn't handle PATH_INFO or SCRIPT_NAME variables properly in the current version
if environ.get('kp.django_path_handling', False):
environ['PATH_INFO'] = environ['SCRIPT_NAME'] + environ['PATH_INFO']
#from pprint import pprint
#pprint(environ)
os.environ['DJANGO_SETTINGS_MODULE'] = environ['kp.django_settings_module']
_application = django.core.handlers.wsgi.WSGIHandler()
return _application(environ, start_response)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
mars/services/task/supervisor/manager.py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import importlib
import time
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, List, Type, Union, Optional
from .... import oscar as mo
from ....core import TileableGraph, TileableType, enter_mode
from ....core.context import set_context
from ....core.operand import Fetch
from ...cluster.api import ClusterAPI
from ...context import ThreadedServiceContext
from ...lifecycle.api import LifecycleAPI
from ...meta import MetaAPI
from ...scheduling import SchedulingAPI
from ...subtask import SubtaskResult
from ..config import task_options
from ..core import Task, new_task_id, TaskStatus
from ..errors import TaskNotExist
from .preprocessor import TaskPreprocessor
from .processor import TaskProcessorActor
class TaskConfigurationActor(mo.Actor):
def __init__(self,
task_conf: Dict[str, Any],
task_preprocessor_cls: Type[TaskPreprocessor] = None):
for name, value in task_conf.items():
setattr(task_options, name, value)
self._task_preprocessor_cls = task_preprocessor_cls
def get_config(self):
return {
'task_options': task_options,
'task_preprocessor_cls': self._task_preprocessor_cls
}
@dataclass
class ResultTileableInfo:
tileable: TileableType
processor_ref: Union[TaskProcessorActor, mo.ActorRef]
class TaskManagerActor(mo.Actor):
_task_name_to_parent_task_id: Dict[str, str]
_task_name_to_task_ids: Dict[str, List[str]]
_task_id_to_processor_ref: Dict[str, Union[TaskProcessorActor, mo.ActorRef]]
_tileable_key_to_info: Dict[str, List[ResultTileableInfo]]
_cluster_api: Optional[ClusterAPI]
_meta_api: Optional[MetaAPI]
_lifecycle_api: Optional[LifecycleAPI]
def __init__(self, session_id: str):
self._session_id = session_id
self._config = None
self._task_preprocessor_cls = None
self._last_idle_time = None
self._task_name_to_parent_task_id = dict()
self._task_name_to_task_ids = defaultdict(list)
self._task_id_to_processor_ref = dict()
self._tileable_key_to_info = defaultdict(list)
self._cluster_api = None
self._meta_api = None
self._lifecycle_api = None
self._scheduling_api = None
async def __post_create__(self):
self._cluster_api = await ClusterAPI.create(self.address)
self._scheduling_api = await SchedulingAPI.create(self._session_id, self.address)
self._meta_api = await MetaAPI.create(self._session_id, self.address)
self._lifecycle_api = await LifecycleAPI.create(
self._session_id, self.address)
# get config
configuration_ref = await mo.actor_ref(
TaskConfigurationActor.default_uid(),
address=self.address)
task_conf = await configuration_ref.get_config()
self._config, self._task_preprocessor_cls = \
task_conf['task_options'], task_conf['task_preprocessor_cls']
self._task_preprocessor_cls = self._get_task_preprocessor_cls()
# init context
await self._init_context()
async def __pre_destroy__(self):
for processor_ref in self._task_id_to_processor_ref.values():
await processor_ref.destroy()
async def _init_context(self):
loop = asyncio.get_running_loop()
context = ThreadedServiceContext(
self._session_id, self.address, self.address, loop=loop)
await context.init()
set_context(context)
@staticmethod
def gen_uid(session_id):
return f'{session_id}_task_manager'
@enter_mode(kernel=True)
async def submit_tileable_graph(self,
graph: TileableGraph,
task_name: str = None,
fuse_enabled: bool = None,
extra_config: dict = None) -> str:
self._last_idle_time = None
if task_name is None:
# new task without task name
task_id = task_name = new_task_id()
parent_task_id = new_task_id()
elif task_name in self._task_name_to_parent_task_id:
# task with the same name submitted before
parent_task_id = self._task_name_to_parent_task_id[task_name]
task_id = new_task_id()
else:
# new task with task_name
task_id = new_task_id()
parent_task_id = new_task_id()
uid = TaskProcessorActor.gen_uid(self._session_id, parent_task_id)
if task_name not in self._task_name_to_parent_task_id:
# gen main task which mean each submission from user
processor_ref = await mo.create_actor(
TaskProcessorActor, self._session_id, parent_task_id,
task_name=task_name, address=self.address, uid=uid)
self._task_name_to_parent_task_id[task_name] = parent_task_id
else:
processor_ref = await mo.actor_ref(mo.ActorRef(self.address, uid))
self._task_name_to_task_ids[task_name].append(task_id)
self._task_id_to_processor_ref[task_id] = processor_ref
if fuse_enabled is None:
fuse_enabled = self._config.fuse_enabled
# gen task
task = Task(task_id, self._session_id,
graph, task_name,
parent_task_id=parent_task_id,
fuse_enabled=fuse_enabled,
extra_config=extra_config)
# gen task processor
tiled_context = await self._gen_tiled_context(graph)
await processor_ref.add_task(
task, tiled_context, self._config, self._task_preprocessor_cls)
for tileable in graph.result_tileables:
info = ResultTileableInfo(tileable=tileable,
processor_ref=processor_ref)
self._tileable_key_to_info[tileable.key].append(info)
return task_id
async def get_tileable_graph_dict_by_task_id(self, task_id):
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError:
raise TaskNotExist(f'Task {task_id} does not exist')
res = await processor_ref.get_tileable_graph_as_dict()
return res
async def get_tileable_details(self, task_id):
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError:
raise TaskNotExist(f'Task {task_id} does not exist')
return await processor_ref.get_tileable_details()
async def _gen_tiled_context(self, graph: TileableGraph) -> \
Dict[TileableType, TileableType]:
# process graph, add fetch node to tiled context
tiled_context = dict()
for tileable in graph:
if isinstance(tileable.op, Fetch) and tileable.is_coarse():
info = self._tileable_key_to_info[tileable.key][-1]
tiled_context[tileable] = \
await info.processor_ref.get_result_tileable(tileable.key)
return tiled_context
def _get_task_preprocessor_cls(self):
if self._task_preprocessor_cls is not None:
assert isinstance(self._task_preprocessor_cls, str)
module, name = self._task_preprocessor_cls.rsplit('.', 1)
return getattr(importlib.import_module(module), name)
else:
return TaskPreprocessor
async def wait_task(self,
task_id: str,
timeout: int = None):
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
return processor_ref.wait(timeout)
async def cancel_task(self, task_id: str):
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
yield processor_ref.cancel()
async def get_task_results(self, progress: bool = False):
if not self._task_id_to_processor_ref:
raise mo.Return([])
results = yield asyncio.gather(*[
ref.result() for ref in self._task_id_to_processor_ref.values()
])
if progress:
task_to_result = {res.task_id: res for res in results}
progress_task_ids = []
for res in results:
if res.status != TaskStatus.terminated:
progress_task_ids.append(res.task_id)
else:
res.progress = 1.0
progresses = yield asyncio.gather(*[
self._task_id_to_processor_ref[task_id].progress()
for task_id in progress_task_ids
])
for task_id, progress in zip(progress_task_ids, progresses):
task_to_result[task_id].progress = progress
raise mo.Return(results)
async def get_task_result(self, task_id: str):
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
return await processor_ref.result()
async def get_task_result_tileables(self, task_id: str):
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
return await processor_ref.get_result_tileables()
async def set_subtask_result(self, subtask_result: SubtaskResult):
task_id = subtask_result.task_id
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
yield processor_ref.set_subtask_result(subtask_result)
@mo.extensible
async def get_task_progress(self, task_id: str) -> float:
try:
processor_ref = self._task_id_to_processor_ref[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
return await processor_ref.progress()
async def get_last_idle_time(self):
if self._last_idle_time is None:
for processor_ref in self._task_id_to_processor_ref.values():
if not await processor_ref.is_done():
break
else:
self._last_idle_time = time.time()
return self._last_idle_time
| []
| []
| []
| [] | [] | python | null | null | null |
plugins/youtube_dl_button.py | import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
import asyncio
import json
import os
import shutil
import time
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from datetime import datetime
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from pyrogram.types import InputMediaPhoto
from translation import Translation
from helper_funcs.help_Nekmo_ffmpeg import generate_screen_shots
from helper_funcs.display_progress import progress_for_pyrogram, humanbytes
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from PIL import Image
async def youtube_dl_call_back(bot, update):
cb_data = update.data
tg_send_type, youtube_dl_format, youtube_dl_ext = cb_data.split("|")
thumb_image_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + ".jpg"
save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + ".json"
try:
with open(save_ytdl_json_path, "r", encoding="utf8") as f:
response_json = json.load(f)
except (FileNotFoundError) as e:
await bot.delete_messages(
chat_id=update.message.chat.id,
message_ids=update.message.message_id,
revoke=True
)
return False
youtube_dl_url = update.message.reply_to_message.text
custom_file_name = str(response_json.get("title")) + \
"_" + youtube_dl_format + "." + youtube_dl_ext
youtube_dl_username = None
youtube_dl_password = None
if "|" in youtube_dl_url:
url_parts = youtube_dl_url.split("|")
if len(url_parts) == 2:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
elif len(url_parts) == 4:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
youtube_dl_username = url_parts[2]
youtube_dl_password = url_parts[3]
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
if youtube_dl_url is not None:
youtube_dl_url = youtube_dl_url.strip()
if custom_file_name is not None:
custom_file_name = custom_file_name.strip()
if youtube_dl_username is not None:
youtube_dl_username = youtube_dl_username.strip()
if youtube_dl_password is not None:
youtube_dl_password = youtube_dl_password.strip()
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
await bot.edit_message_text(
text=Translation.DOWNLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
description = Translation.CUSTOM_CAPTION_UL_FILE
if "fulltitle" in response_json:
description = response_json["fulltitle"][0:1021]
tmp_directory_for_each_user = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id)
if not os.path.isdir(tmp_directory_for_each_user):
os.makedirs(tmp_directory_for_each_user)
download_directory = tmp_directory_for_each_user + "/" + custom_file_name
command_to_exec = []
if tg_send_type == "audio":
command_to_exec = [
"youtube-dl",
"-c",
"--max-filesize", str(Config.TG_MAX_FILE_SIZE),
"--prefer-ffmpeg",
"--extract-audio",
"--audio-format", youtube_dl_ext,
"--audio-quality", youtube_dl_format,
youtube_dl_url,
"-o", download_directory
]
else:
minus_f_format = youtube_dl_format
if "youtu" in youtube_dl_url:
minus_f_format = youtube_dl_format + "+bestaudio"
command_to_exec = [
"youtube-dl",
"-c",
"--max-filesize", str(Config.TG_MAX_FILE_SIZE),
"--embed-subs",
"-f", minus_f_format,
"--hls-prefer-ffmpeg", youtube_dl_url,
"-o", download_directory
]
if Config.HTTP_PROXY != "":
command_to_exec.append("--proxy")
command_to_exec.append(Config.HTTP_PROXY)
if youtube_dl_username is not None:
command_to_exec.append("--username")
command_to_exec.append(youtube_dl_username)
if youtube_dl_password is not None:
command_to_exec.append("--password")
command_to_exec.append(youtube_dl_password)
command_to_exec.append("--no-warnings")
if "hotstar" in youtube_dl_url:
command_to_exec.append("--geo-bypass-country")
command_to_exec.append("IN")
start = datetime.now()
process = await asyncio.create_subprocess_exec(
*command_to_exec,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
# logger.info(e_response)
# logger.info(t_response)
ad_string_to_replace = "please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output."
if e_response and ad_string_to_replace in e_response:
error_message = e_response.replace(ad_string_to_replace, "")
await bot.edit_message_text(
chat_id=update.message.chat.id,
message_id=update.message.message_id,
text=error_message
)
return False
if t_response:
try:
os.remove(save_ytdl_json_path)
except:
pass
end_one = datetime.now()
time_taken_for_download = (end_one -start).seconds
file_size = Config.TG_MAX_FILE_SIZE + 1
try:
file_size = os.stat(download_directory).st_size
except FileNotFoundError as exc:
try:
download_directory = os.path.splitext(download_directory)[0] + "." + "mkv"
file_size = os.stat(download_directory).st_size
except Exception as e:
await bot.edit_message_text(
chat_id=update.message.chat.id,
text="Some errors occured while downloading video!",
message_id=update.message.message_id
)
logger.info("FnF error - " + str(e))
return
if file_size > Config.TG_MAX_FILE_SIZE:
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.RCHD_TG_API_LIMIT.format(time_taken_for_download, humanbytes(file_size)),
message_id=update.message.message_id
)
else:
if Config.SCREENSHOTS:
is_w_f = False
images = await generate_screen_shots(
download_directory,
tmp_directory_for_each_user,
is_w_f,
Config.DEF_WATER_MARK_FILE,
300,
9
)
try:
await bot.edit_message_text(text=Translation.UPLOAD_START, chat_id=update.message.chat.id, message_id=update.message.message_id)
except:
pass
# get the correct width, height, and duration for videos greater than 10MB
width = 0
height = 0
duration = 0
if tg_send_type != "file":
metadata = extractMetadata(createParser(download_directory))
if metadata is not None:
if metadata.has("duration"):
duration = metadata.get('duration').seconds
if os.path.exists(thumb_image_path):
width = 0
height = 0
metadata = extractMetadata(createParser(thumb_image_path))
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
if tg_send_type == "vm":
height = width
Image.open(thumb_image_path).convert(
"RGB").save(thumb_image_path)
img = Image.open(thumb_image_path)
if tg_send_type == "file":
img.resize((320, height))
else:
img.resize((90, height))
img.save(thumb_image_path, "JPEG")
else:
thumb_image_path = None
start_time = time.time()
if tg_send_type == "audio":
await update.message.reply_to_message.reply_chat_action("upload_audio")
await bot.send_audio(
chat_id=update.message.chat.id,
audio=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
# performer=response_json["uploader"],
# title=response_json["title"],
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/TMC_BOTX')]]),
thumb=thumb_image_path,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "file":
await update.message.reply_to_message.reply_chat_action("upload_document")
await bot.send_document(
chat_id=update.message.chat.id,
document=download_directory,
thumb=thumb_image_path,
caption=description,
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/TMC_BOTX')]]),
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "vm":
await update.message.reply_to_message.reply_chat_action("upload_video_note")
await bot.send_video_note(
chat_id=update.message.chat.id,
video_note=download_directory,
duration=duration,
length=width,
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "video":
await update.message.reply_to_message.reply_chat_action("upload_video")
await bot.send_video(
chat_id=update.message.chat.id,
video=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
width=width,
height=height,
supports_streaming=True,
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/TMC_BOTX')]]),
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
logger.info("Did this happen? :\\")
end_two = datetime.now()
time_taken_for_upload = (end_two - end_one).seconds
media_album_p = []
if Config.SCREENSHOTS:
if images is not None:
i = 0
caption = ""
if is_w_f:
caption = ""
for image in images:
if os.path.exists(image):
if i == 0:
media_album_p.append(
InputMediaPhoto(
media=image,
caption=caption,
parse_mode="html"
)
)
else:
media_album_p.append(
InputMediaPhoto(
media=image
)
)
i = i + 1
await bot.send_media_group(
chat_id=update.message.chat.id,
disable_notification=True,
reply_to_message_id=update.message.message_id,
media=media_album_p
)
try:
shutil.rmtree(tmp_directory_for_each_user)
except:
pass
try:
os.remove(thumb_image_path)
except:
pass
await bot.edit_message_text(
text=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(time_taken_for_download, time_taken_for_upload),
chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_web_page_preview=True
)
| []
| []
| [
"WEBHOOK"
]
| [] | ["WEBHOOK"] | python | 1 | 0 | |
discovery/engine/abc.py | import abc
import os
class Engine(abc.ABC):
def __init__(self, host: str = "localhost", port: int = 8500, scheme: str = "http"):
self._host = str(os.getenv("CONSUL_HOST", host))
self._port = int(os.getenv("CONSUL_PORT", port))
self._scheme = str(os.getenv("CONSUL_SCHEMA", scheme))
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def scheme(self):
return self._scheme
@property
def url(self):
return f"{self.scheme}://{self.host}:{self.port}"
async def get(self, *args, **kwargs):
raise NotImplementedError
async def put(self, *args, **kwargs):
raise NotImplementedError
async def delete(self, *args, **kwargs):
raise NotImplementedError
async def post(self, *args, **kwargs):
raise NotImplementedError
| []
| []
| [
"CONSUL_SCHEMA",
"CONSUL_PORT",
"CONSUL_HOST"
]
| [] | ["CONSUL_SCHEMA", "CONSUL_PORT", "CONSUL_HOST"] | python | 3 | 0 | |
examples/stock/price/getAStockPrice/main.go | package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/stock"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.Stock.Price(&stock.PriceRequest{
Symbol: "AAPL",
})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
backend/mobile_app_33660/wsgi.py | """
WSGI config for mobile_app_33660 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_app_33660.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cogkge/modules/gnn/helper.py | import numpy as np, sys, os, random, pdb, json, uuid, time, argparse
from pprint import pprint
import logging, logging.config
from collections import defaultdict as ddict
# from ordered_set import OrderedSet
# PyTorch related imports
import torch
from torch.nn import functional as F
from torch.nn.init import xavier_normal_
from torch.utils.data import DataLoader
from torch.nn import Parameter
# from torch_scatter import scatter_add
from .util_scatter import scatter_add
try:
from torch import irfft
from torch import rfft
except ImportError:
from torch.fft import irfft2
from torch.fft import rfft2
def rfft(x, d):
t = rfft2(x, dim=(-d))
return torch.stack((t.real, t.imag), -1)
def irfft(x, d, signal_sizes):
return irfft2(torch.complex(x[:, :, 0], x[:, :, 1]), s=signal_sizes, dim=(-d))
np.set_printoptions(precision=4)
def set_gpu(gpus):
"""
Sets the GPU to be used for the run
Parameters
----------
gpus: List of GPUs to be used for the run
Returns
-------
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open(config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def get_combined_results(left_results, right_results):
results = {}
count = float(left_results['count'])
results['left_mr'] = round(left_results['mr'] / count, 5)
results['left_mrr'] = round(left_results['mrr'] / count, 5)
results['right_mr'] = round(right_results['mr'] / count, 5)
results['right_mrr'] = round(right_results['mrr'] / count, 5)
results['mr'] = round((left_results['mr'] + right_results['mr']) / (2 * count), 5)
results['mrr'] = round((left_results['mrr'] + right_results['mrr']) / (2 * count), 5)
for k in range(10):
results['left_hits@{}'.format(k + 1)] = round(left_results['hits@{}'.format(k + 1)] / count, 5)
results['right_hits@{}'.format(k + 1)] = round(right_results['hits@{}'.format(k + 1)] / count, 5)
results['hits@{}'.format(k + 1)] = round(
(left_results['hits@{}'.format(k + 1)] + right_results['hits@{}'.format(k + 1)]) / (2 * count), 5)
return results
def get_param(shape):
param = Parameter(torch.Tensor(*shape));
xavier_normal_(param.data)
return param
def com_mult(a, b):
r1, i1 = a[..., 0], a[..., 1]
r2, i2 = b[..., 0], b[..., 1]
return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)
def conj(a):
a[..., 1] = -a[..., 1]
return a
def cconv(a, b):
return irfft(com_mult(rfft(a, 1), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def ccorr(a, b):
return irfft(com_mult(conj(rfft(a, 1)), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def construct_adj(train_dataset, relation_dict_len):
edge_index, edge_type = [], []
if train_dataset.data.shape[1] == 3: # score_based
for sub, rel, obj in train_dataset.data:
edge_index.append((sub, obj))
edge_type.append(rel)
for sub, rel, obj in train_dataset.data:
edge_index.append((obj, sub))
edge_type.append(rel + relation_dict_len)
else: # classification-based
label = train_dataset.label_data
for j,(sub, rel) in enumerate(train_dataset.data):
for elem in torch.nonzero(label[j]):
e2_idx = elem.item()
edge_index.append((sub,e2_idx))
edge_type.append(rel)
for j,(sub, rel) in enumerate(train_dataset.data):
for elem in torch.nonzero(label[j]):
e2_idx = elem.item()
edge_index.append((e2_idx,sub))
edge_type.append(rel + relation_dict_len)
return edge_index,edge_type | []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
discovery/cmd/openstack-discoverer/main.go | // Copyright © 2018 Heptio
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"flag"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"time"
"github.com/gophercloud/gophercloud"
gopheropenstack "github.com/gophercloud/gophercloud/openstack"
"github.com/heptio/gimbal/discovery/pkg/buildinfo"
"github.com/heptio/gimbal/discovery/pkg/k8s"
localmetrics "github.com/heptio/gimbal/discovery/pkg/metrics"
"github.com/heptio/gimbal/discovery/pkg/openstack"
"github.com/heptio/gimbal/discovery/pkg/signals"
"github.com/heptio/gimbal/discovery/pkg/util"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
)
var (
printVersion bool
gimbalKubeCfgFile string
discoverStackCfgFile string
backendName string
numProcessThreads int
debug bool
reconciliationPeriod time.Duration
httpClientTimeout time.Duration
openstackCertificateAuthorityFile string
prometheusListenPort int
discovererMetrics localmetrics.DiscovererMetrics
log *logrus.Logger
gimbalKubeClientQPS float64
gimbalKubeClientBurst int
)
const (
clusterType = "openstack"
defaultUserDomainName = "Default"
)
func init() {
flag.BoolVar(&printVersion, "version", false, "Show version and quit")
flag.StringVar(&gimbalKubeCfgFile, "gimbal-kubecfg-file", "", "Location of kubecfg file for access to gimbal system kubernetes api, defaults to service account tokens")
flag.StringVar(&backendName, "backend-name", "", "Name of cluster (must be unique)")
flag.IntVar(&numProcessThreads, "num-threads", 2, "Specify number of threads to use when processing queue items.")
flag.BoolVar(&debug, "debug", false, "Enable debug logging.")
flag.DurationVar(&reconciliationPeriod, "reconciliation-period", 30*time.Second, "The interval of time between reconciliation loop runs.")
flag.DurationVar(&httpClientTimeout, "http-client-timeout", 5*time.Second, "The HTTP client request timeout.")
flag.StringVar(&openstackCertificateAuthorityFile, "openstack-certificate-authority", "", "Path to cert file of the OpenStack API certificate authority.")
flag.IntVar(&prometheusListenPort, "prometheus-listen-address", 8080, "The address to listen on for Prometheus HTTP requests")
flag.Float64Var(&gimbalKubeClientQPS, "gimbal-client-qps", 5, "The maximum queries per second (QPS) that can be performed on the Gimbal Kubernetes API server")
flag.IntVar(&gimbalKubeClientBurst, "gimbal-client-burst", 10, "The maximum number of queries that can be performed on the Gimbal Kubernetes API server during a burst")
flag.Parse()
}
func main() {
if printVersion {
fmt.Println("openstack-discoverer")
fmt.Printf("Version: %s\n", buildinfo.Version)
fmt.Printf("Git commit: %s\n", buildinfo.GitSHA)
fmt.Printf("Git tree state: %s\n", buildinfo.GitTreeState)
os.Exit(0)
}
log = logrus.New()
log.Formatter = util.GetFormatter()
if debug {
log.Level = logrus.DebugLevel
}
log.Info("Gimbal OpenStack Discoverer Starting up...")
log.Infof("Version: %s", buildinfo.Version)
log.Infof("Backend name: %s", backendName)
log.Infof("Number of queue worker threads: %d", numProcessThreads)
log.Infof("Reconciliation period: %v", reconciliationPeriod)
log.Infof("Gimbal kubernetes client QPS: %v", gimbalKubeClientQPS)
log.Infof("Gimbal kubernetes client burst: %d", gimbalKubeClientBurst)
// Init prometheus metrics
discovererMetrics = localmetrics.NewMetrics("openstack", backendName)
discovererMetrics.RegisterPrometheus(true)
// Log info metric
discovererMetrics.DiscovererInfoMetric(buildinfo.Version)
// Validate cluster name
if util.IsInvalidBackendName(backendName) {
log.Fatalf("The Kubernetes cluster name must be provided using the `--backend-name` flag or the one passed is invalid")
}
log.Infof("BackendName is: %s", backendName)
gimbalKubeClient, err := k8s.NewClientWithQPS(gimbalKubeCfgFile, log, float32(gimbalKubeClientQPS), gimbalKubeClientBurst)
if err != nil {
log.Fatal("Failed to create kubernetes client", err)
}
username := os.Getenv("OS_USERNAME")
if username == "" {
log.Fatal("The OpenStack username must be provided using the OS_USERNAME environment variable.")
}
password := os.Getenv("OS_PASSWORD")
if password == "" {
log.Fatal("The OpenStack password must be provided using the OS_PASSWORD environment variable.")
}
identityEndpoint := os.Getenv("OS_AUTH_URL")
if identityEndpoint == "" {
log.Fatal("The OpenStack Authentication URL must be provided using the OS_AUTH_URL environment variable.")
}
tenantName := os.Getenv("OS_TENANT_NAME")
if tenantName == "" {
log.Fatal("The OpenStack tenant name must be provided using the OS_TENANT_NAME environment variable")
}
userDomainName := os.Getenv("OS_USER_DOMAIN_NAME")
if userDomainName == "" {
log.Warnf("The OS_USER_DOMAIN_NAME environment variable was not set. Using %q as the OpenStack user domain name.", defaultUserDomainName)
userDomainName = defaultUserDomainName
}
// Create and configure client
osClient, err := gopheropenstack.NewClient(identityEndpoint)
if err != nil {
log.Fatalf("Failed to create OpenStack client: %v", err)
}
transport := &openstack.LogRoundTripper{
RoundTripper: http.DefaultTransport,
Log: log,
BackendName: backendName,
Metrics: &discovererMetrics,
}
if openstackCertificateAuthorityFile != "" {
transport.RoundTripper = httpTransportWithCA(log, openstackCertificateAuthorityFile)
}
osClient.HTTPClient = http.Client{
Transport: transport,
Timeout: httpClientTimeout,
}
osAuthOptions := gophercloud.AuthOptions{
IdentityEndpoint: identityEndpoint,
Username: username,
Password: password,
DomainName: userDomainName,
TenantName: tenantName,
}
if err := gopheropenstack.Authenticate(osClient, osAuthOptions); err != nil {
log.Fatalf("Failed to authenticate with OpenStack: %v", err)
}
identity, err := openstack.NewIdentityV3(osClient)
if err != nil {
log.Fatalf("Failed to create Identity V3 API client: %v", err)
}
lbv2, err := openstack.NewLoadBalancerV2(osClient)
if err != nil {
log.Fatalf("Failed to create Network V2 API client: %v", err)
}
reconciler := openstack.NewReconciler(
backendName,
clusterType,
gimbalKubeClient,
reconciliationPeriod,
lbv2,
identity,
log,
numProcessThreads,
discovererMetrics,
)
stopCh := signals.SetupSignalHandler()
go func() {
// Expose the registered metrics via HTTP.
http.Handle("/metrics", promhttp.HandlerFor(discovererMetrics.Registry, promhttp.HandlerOpts{}))
srv := &http.Server{Addr: fmt.Sprintf(":%d", prometheusListenPort)}
log.Info("Listening for Prometheus metrics on port: ", prometheusListenPort)
if err := srv.ListenAndServe(); err != nil {
log.Fatal(err)
}
<-stopCh
log.Info("Shutting down Prometheus server...")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
log.Fatal(err)
}
}()
log.Info("Starting reconciler")
go reconciler.Run(stopCh)
<-stopCh
log.Info("Stopped OpenStack discoverer")
}
func httpTransportWithCA(log *logrus.Logger, caFile string) http.RoundTripper {
ca, err := ioutil.ReadFile(caFile)
if err != nil {
log.Fatalf("Error reading certificate authority for OpenStack: %v", err)
}
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(ca); !ok {
log.Fatalf("Failed to add certificate authority to CA pool. Verify certificate is a valid, PEM-encoded certificate.")
}
// Use default transport with CA
// TODO(abrand): Is there a better way to do this?
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
RootCAs: pool,
},
}
}
| [
"\"OS_USERNAME\"",
"\"OS_PASSWORD\"",
"\"OS_AUTH_URL\"",
"\"OS_TENANT_NAME\"",
"\"OS_USER_DOMAIN_NAME\""
]
| []
| [
"OS_USER_DOMAIN_NAME",
"OS_AUTH_URL",
"OS_PASSWORD",
"OS_USERNAME",
"OS_TENANT_NAME"
]
| [] | ["OS_USER_DOMAIN_NAME", "OS_AUTH_URL", "OS_PASSWORD", "OS_USERNAME", "OS_TENANT_NAME"] | go | 5 | 0 | |
main.go | package main
import (
"os"
"github.com/fatih/color"
"github.com/gruntwork-io/terragrunt/cli"
"github.com/gruntwork-io/terragrunt/errors"
"github.com/gruntwork-io/terragrunt/shell"
"github.com/gruntwork-io/terragrunt/util"
)
// This variable is set at build time using -ldflags parameters. For more info, see:
// http://stackoverflow.com/a/11355611/483528
var VERSION string
// The main entrypoint for Terragrunt
func main() {
defer errors.Recover(checkForErrorsAndExit)
app := cli.CreateTerragruntCli(VERSION, os.Stdout, os.Stderr)
err := app.Run(os.Args)
checkForErrorsAndExit(err)
}
// If there is an error, display it in the console and exit with a non-zero exit code. Otherwise, exit 0.
func checkForErrorsAndExit(err error) {
if err == nil {
os.Exit(0)
} else {
logger := util.CreateLogger("")
if os.Getenv("TERRAGRUNT_DEBUG") != "" {
logger.Println(errors.PrintErrorWithStackTrace(err))
} else {
// Log error in red so that it is highlighted
util.ColorLogf(logger, color.New(color.FgRed), err.Error())
}
// exit with the underlying error code
exitCode, exitCodeErr := shell.GetExitCode(err)
if exitCodeErr != nil {
exitCode = 1
logger.Println("Unable to determine underlying exit code, so Terragrunt will exit with error code 1")
}
os.Exit(exitCode)
}
}
| [
"\"TERRAGRUNT_DEBUG\""
]
| []
| [
"TERRAGRUNT_DEBUG"
]
| [] | ["TERRAGRUNT_DEBUG"] | go | 1 | 0 | |
cluster-autoscaler/cloudprovider/azure/azure_manager.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate go run azure_instance_types/gen.go
package azure
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
"k8s.io/klog"
providerazure "k8s.io/legacy-cloud-providers/azure"
azclients "k8s.io/legacy-cloud-providers/azure/clients"
"k8s.io/legacy-cloud-providers/azure/retry"
)
const (
vmTypeVMSS = "vmss"
vmTypeStandard = "standard"
vmTypeAKS = "aks"
scaleToZeroSupportedStandard = false
scaleToZeroSupportedVMSS = true
refreshInterval = 1 * time.Minute
// The path of deployment parameters for standard vm.
deploymentParametersPath = "/var/lib/azure/azuredeploy.parameters.json"
vmssTagMin = "min"
vmssTagMax = "max"
autoDiscovererTypeLabel = "label"
labelAutoDiscovererKeyMinNodes = "min"
labelAutoDiscovererKeyMaxNodes = "max"
metadataURL = "http://169.254.169.254/metadata/instance"
// backoff
backoffRetriesDefault = 6
backoffExponentDefault = 1.5
backoffDurationDefault = 5 // in seconds
backoffJitterDefault = 1.0
// rate limit
rateLimitQPSDefault = 1.0
rateLimitBucketDefault = 5
)
var validLabelAutoDiscovererKeys = strings.Join([]string{
labelAutoDiscovererKeyMinNodes,
labelAutoDiscovererKeyMaxNodes,
}, ", ")
// A labelAutoDiscoveryConfig specifies how to autodiscover Azure scale sets.
type labelAutoDiscoveryConfig struct {
// Key-values to match on.
Selector map[string]string
}
// AzureManager handles Azure communication and data caching.
type AzureManager struct {
config *Config
azClient *azClient
env azure.Environment
asgCache *asgCache
lastRefresh time.Time
asgAutoDiscoverySpecs []labelAutoDiscoveryConfig
explicitlyConfigured map[string]bool
}
// CloudProviderRateLimitConfig indicates the rate limit config for each clients.
type CloudProviderRateLimitConfig struct {
// The default rate limit config options.
azclients.RateLimitConfig
// Rate limit config for each clients. Values would override default settings above.
InterfaceRateLimit *azclients.RateLimitConfig `json:"interfaceRateLimit,omitempty" yaml:"interfaceRateLimit,omitempty"`
VirtualMachineRateLimit *azclients.RateLimitConfig `json:"virtualMachineRateLimit,omitempty" yaml:"virtualMachineRateLimit,omitempty"`
StorageAccountRateLimit *azclients.RateLimitConfig `json:"storageAccountRateLimit,omitempty" yaml:"storageAccountRateLimit,omitempty"`
DiskRateLimit *azclients.RateLimitConfig `json:"diskRateLimit,omitempty" yaml:"diskRateLimit,omitempty"`
VirtualMachineScaleSetRateLimit *azclients.RateLimitConfig `json:"virtualMachineScaleSetRateLimit,omitempty" yaml:"virtualMachineScaleSetRateLimit,omitempty"`
}
// Config holds the configuration parsed from the --cloud-config flag
type Config struct {
CloudProviderRateLimitConfig
Cloud string `json:"cloud" yaml:"cloud"`
Location string `json:"location" yaml:"location"`
TenantID string `json:"tenantId" yaml:"tenantId"`
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"`
VMType string `json:"vmType" yaml:"vmType"`
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
UserAssignedIdentityID string `json:"userAssignedIdentityID" yaml:"userAssignedIdentityID"`
// Configs only for standard vmType (agent pools).
Deployment string `json:"deployment" yaml:"deployment"`
DeploymentParameters map[string]interface{} `json:"deploymentParameters" yaml:"deploymentParameters"`
//Configs only for AKS
ClusterName string `json:"clusterName" yaml:"clusterName"`
//Config only for AKS
NodeResourceGroup string `json:"nodeResourceGroup" yaml:"nodeResourceGroup"`
// VMSS metadata cache TTL in seconds, only applies for vmss type
VmssCacheTTL int64 `json:"vmssCacheTTL" yaml:"vmssCacheTTL"`
// VMSS instances cache TTL in seconds, only applies for vmss type
VmssVmsCacheTTL int64 `json:"vmssVmsCacheTTL" yaml:"vmssVmsCacheTTL"`
// Jitter in seconds subtracted from the VMSS cache TTL before the first refresh
VmssVmsCacheJitter int `json:"vmssVmsCacheJitter" yaml:"vmssVmsCacheJitter"`
// number of latest deployments that will not be deleted
MaxDeploymentsCount int64 `json:"maxDeploymentsCount" yaml:"maxDeploymentsCount"`
// Enable exponential backoff to manage resource request retries
CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty" yaml:"cloudProviderBackoff,omitempty"`
CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries,omitempty" yaml:"cloudProviderBackoffRetries,omitempty"`
CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent,omitempty" yaml:"cloudProviderBackoffExponent,omitempty"`
CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration,omitempty" yaml:"cloudProviderBackoffDuration,omitempty"`
CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter,omitempty" yaml:"cloudProviderBackoffJitter,omitempty"`
}
// InitializeCloudProviderRateLimitConfig initializes rate limit configs.
func InitializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig) {
if config == nil {
return
}
// Assign read rate limit defaults if no configuration was passed in.
if config.CloudProviderRateLimitQPS == 0 {
config.CloudProviderRateLimitQPS = rateLimitQPSDefault
}
if config.CloudProviderRateLimitBucket == 0 {
config.CloudProviderRateLimitBucket = rateLimitBucketDefault
}
// Assing write rate limit defaults if no configuration was passed in.
if config.CloudProviderRateLimitQPSWrite == 0 {
config.CloudProviderRateLimitQPSWrite = config.CloudProviderRateLimitQPS
}
if config.CloudProviderRateLimitBucketWrite == 0 {
config.CloudProviderRateLimitBucketWrite = config.CloudProviderRateLimitBucket
}
config.InterfaceRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.InterfaceRateLimit)
config.VirtualMachineRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineRateLimit)
config.StorageAccountRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.StorageAccountRateLimit)
config.DiskRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.DiskRateLimit)
config.VirtualMachineScaleSetRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineScaleSetRateLimit)
}
// overrideDefaultRateLimitConfig overrides the default CloudProviderRateLimitConfig.
func overrideDefaultRateLimitConfig(defaults, config *azclients.RateLimitConfig) *azclients.RateLimitConfig {
// If config not set, apply defaults.
if config == nil {
return defaults
}
// Remain disabled if it's set explicitly.
if !config.CloudProviderRateLimit {
return &azclients.RateLimitConfig{CloudProviderRateLimit: false}
}
// Apply default values.
if config.CloudProviderRateLimitQPS == 0 {
config.CloudProviderRateLimitQPS = defaults.CloudProviderRateLimitQPS
}
if config.CloudProviderRateLimitBucket == 0 {
config.CloudProviderRateLimitBucket = defaults.CloudProviderRateLimitBucket
}
if config.CloudProviderRateLimitQPSWrite == 0 {
config.CloudProviderRateLimitQPSWrite = defaults.CloudProviderRateLimitQPSWrite
}
if config.CloudProviderRateLimitBucketWrite == 0 {
config.CloudProviderRateLimitBucketWrite = defaults.CloudProviderRateLimitBucketWrite
}
return config
}
func (cfg *Config) getAzureClientConfig(servicePrincipalToken *adal.ServicePrincipalToken, env *azure.Environment) *azclients.ClientConfig {
azClientConfig := &azclients.ClientConfig{
Location: cfg.Location,
SubscriptionID: cfg.SubscriptionID,
ResourceManagerEndpoint: env.ResourceManagerEndpoint,
Authorizer: autorest.NewBearerAuthorizer(servicePrincipalToken),
Backoff: &retry.Backoff{Steps: 1},
}
if cfg.CloudProviderBackoff {
azClientConfig.Backoff = &retry.Backoff{
Steps: cfg.CloudProviderBackoffRetries,
Factor: cfg.CloudProviderBackoffExponent,
Duration: time.Duration(cfg.CloudProviderBackoffDuration) * time.Second,
Jitter: cfg.CloudProviderBackoffJitter,
}
}
return azClientConfig
}
// TrimSpace removes all leading and trailing white spaces.
func (cfg *Config) TrimSpace() {
cfg.Cloud = strings.TrimSpace(cfg.Cloud)
cfg.Location = strings.TrimSpace(cfg.Location)
cfg.TenantID = strings.TrimSpace(cfg.TenantID)
cfg.SubscriptionID = strings.TrimSpace(cfg.SubscriptionID)
cfg.ResourceGroup = strings.TrimSpace(cfg.ResourceGroup)
cfg.VMType = strings.TrimSpace(cfg.VMType)
cfg.AADClientID = strings.TrimSpace(cfg.AADClientID)
cfg.AADClientSecret = strings.TrimSpace(cfg.AADClientSecret)
cfg.AADClientCertPath = strings.TrimSpace(cfg.AADClientCertPath)
cfg.AADClientCertPassword = strings.TrimSpace(cfg.AADClientCertPassword)
cfg.Deployment = strings.TrimSpace(cfg.Deployment)
cfg.ClusterName = strings.TrimSpace(cfg.ClusterName)
cfg.NodeResourceGroup = strings.TrimSpace(cfg.NodeResourceGroup)
}
// CreateAzureManager creates Azure Manager object to work with Azure.
func CreateAzureManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions) (*AzureManager, error) {
var err error
cfg := &Config{}
if configReader != nil {
body, err := ioutil.ReadAll(configReader)
if err != nil {
return nil, fmt.Errorf("failed to read config: %v", err)
}
err = json.Unmarshal(body, cfg)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal config body: %v", err)
}
} else {
cfg.Cloud = os.Getenv("ARM_CLOUD")
cfg.Location = os.Getenv("LOCATION")
cfg.ResourceGroup = os.Getenv("ARM_RESOURCE_GROUP")
cfg.TenantID = os.Getenv("ARM_TENANT_ID")
cfg.AADClientID = os.Getenv("ARM_CLIENT_ID")
cfg.AADClientSecret = os.Getenv("ARM_CLIENT_SECRET")
cfg.VMType = strings.ToLower(os.Getenv("ARM_VM_TYPE"))
cfg.AADClientCertPath = os.Getenv("ARM_CLIENT_CERT_PATH")
cfg.AADClientCertPassword = os.Getenv("ARM_CLIENT_CERT_PASSWORD")
cfg.Deployment = os.Getenv("ARM_DEPLOYMENT")
cfg.ClusterName = os.Getenv("AZURE_CLUSTER_NAME")
cfg.NodeResourceGroup = os.Getenv("AZURE_NODE_RESOURCE_GROUP")
subscriptionID, err := getSubscriptionIdFromInstanceMetadata()
if err != nil {
return nil, err
}
cfg.SubscriptionID = subscriptionID
useManagedIdentityExtensionFromEnv := os.Getenv("ARM_USE_MANAGED_IDENTITY_EXTENSION")
if len(useManagedIdentityExtensionFromEnv) > 0 {
cfg.UseManagedIdentityExtension, err = strconv.ParseBool(useManagedIdentityExtensionFromEnv)
if err != nil {
return nil, err
}
}
userAssignedIdentityIDFromEnv := os.Getenv("ARM_USER_ASSIGNED_IDENTITY_ID")
if userAssignedIdentityIDFromEnv != "" {
cfg.UserAssignedIdentityID = userAssignedIdentityIDFromEnv
}
if vmssCacheTTL := os.Getenv("AZURE_VMSS_CACHE_TTL"); vmssCacheTTL != "" {
cfg.VmssCacheTTL, err = strconv.ParseInt(vmssCacheTTL, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed to parse AZURE_VMSS_CACHE_TTL %q: %v", vmssCacheTTL, err)
}
}
if vmssVmsCacheTTL := os.Getenv("AZURE_VMSS_VMS_CACHE_TTL"); vmssVmsCacheTTL != "" {
cfg.VmssVmsCacheTTL, err = strconv.ParseInt(vmssVmsCacheTTL, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed to parse AZURE_VMSS_VMS_CACHE_TTL %q: %v", vmssVmsCacheTTL, err)
}
}
if vmssVmsCacheJitter := os.Getenv("AZURE_VMSS_VMS_CACHE_JITTER"); vmssVmsCacheJitter != "" {
cfg.VmssVmsCacheJitter, err = strconv.Atoi(vmssVmsCacheJitter)
if err != nil {
return nil, fmt.Errorf("failed to parse AZURE_VMSS_VMS_CACHE_JITTER %q: %v", vmssVmsCacheJitter, err)
}
}
if threshold := os.Getenv("AZURE_MAX_DEPLOYMENT_COUNT"); threshold != "" {
cfg.MaxDeploymentsCount, err = strconv.ParseInt(threshold, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed to parse AZURE_MAX_DEPLOYMENT_COUNT %q: %v", threshold, err)
}
}
if enableBackoff := os.Getenv("ENABLE_BACKOFF"); enableBackoff != "" {
cfg.CloudProviderBackoff, err = strconv.ParseBool(enableBackoff)
if err != nil {
return nil, fmt.Errorf("failed to parse ENABLE_BACKOFF %q: %v", enableBackoff, err)
}
}
if cfg.CloudProviderBackoff {
if backoffRetries := os.Getenv("BACKOFF_RETRIES"); backoffRetries != "" {
retries, err := strconv.ParseInt(backoffRetries, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed to parse BACKOFF_RETRIES %q: %v", retries, err)
}
cfg.CloudProviderBackoffRetries = int(retries)
} else {
cfg.CloudProviderBackoffRetries = backoffRetriesDefault
}
if backoffExponent := os.Getenv("BACKOFF_EXPONENT"); backoffExponent != "" {
cfg.CloudProviderBackoffExponent, err = strconv.ParseFloat(backoffExponent, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse BACKOFF_EXPONENT %q: %v", backoffExponent, err)
}
} else {
cfg.CloudProviderBackoffExponent = backoffExponentDefault
}
if backoffDuration := os.Getenv("BACKOFF_DURATION"); backoffDuration != "" {
duration, err := strconv.ParseInt(backoffDuration, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed to parse BACKOFF_DURATION %q: %v", backoffDuration, err)
}
cfg.CloudProviderBackoffDuration = int(duration)
} else {
cfg.CloudProviderBackoffDuration = backoffDurationDefault
}
if backoffJitter := os.Getenv("BACKOFF_JITTER"); backoffJitter != "" {
cfg.CloudProviderBackoffJitter, err = strconv.ParseFloat(backoffJitter, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse BACKOFF_JITTER %q: %v", backoffJitter, err)
}
} else {
cfg.CloudProviderBackoffJitter = backoffJitterDefault
}
}
}
cfg.TrimSpace()
if cloudProviderRateLimit := os.Getenv("CLOUD_PROVIDER_RATE_LIMIT"); cloudProviderRateLimit != "" {
cfg.CloudProviderRateLimit, err = strconv.ParseBool(cloudProviderRateLimit)
if err != nil {
return nil, fmt.Errorf("failed to parse CLOUD_PROVIDER_RATE_LIMIT: %q, %v", cloudProviderRateLimit, err)
}
}
InitializeCloudProviderRateLimitConfig(&cfg.CloudProviderRateLimitConfig)
// Defaulting vmType to vmss.
if cfg.VMType == "" {
cfg.VMType = vmTypeVMSS
}
// Read parameters from deploymentParametersPath if it is not set.
if cfg.VMType == vmTypeStandard && len(cfg.DeploymentParameters) == 0 {
parameters, err := readDeploymentParameters(deploymentParametersPath)
if err != nil {
klog.Errorf("readDeploymentParameters failed with error: %v", err)
return nil, err
}
cfg.DeploymentParameters = parameters
}
if cfg.MaxDeploymentsCount == 0 {
cfg.MaxDeploymentsCount = int64(defaultMaxDeploymentsCount)
}
// Defaulting env to Azure Public Cloud.
env := azure.PublicCloud
if cfg.Cloud != "" {
env, err = azure.EnvironmentFromName(cfg.Cloud)
if err != nil {
return nil, err
}
}
if err := validateConfig(cfg); err != nil {
return nil, err
}
klog.Infof("Starting azure manager with subscription ID %q", cfg.SubscriptionID)
azClient, err := newAzClient(cfg, &env)
if err != nil {
return nil, err
}
// Create azure manager.
manager := &AzureManager{
config: cfg,
env: env,
azClient: azClient,
explicitlyConfigured: make(map[string]bool),
}
cache, err := newAsgCache()
if err != nil {
return nil, err
}
manager.asgCache = cache
specs, err := parseLabelAutoDiscoverySpecs(discoveryOpts)
if err != nil {
return nil, err
}
manager.asgAutoDiscoverySpecs = specs
if err := manager.fetchExplicitAsgs(discoveryOpts.NodeGroupSpecs); err != nil {
return nil, err
}
if err := manager.forceRefresh(); err != nil {
return nil, err
}
return manager, nil
}
func (m *AzureManager) fetchExplicitAsgs(specs []string) error {
changed := false
for _, spec := range specs {
asg, err := m.buildAsgFromSpec(spec)
if err != nil {
return fmt.Errorf("failed to parse node group spec: %v", err)
}
if m.RegisterAsg(asg) {
changed = true
}
m.explicitlyConfigured[asg.Id()] = true
}
if changed {
if err := m.regenerateCache(); err != nil {
return err
}
}
return nil
}
func (m *AzureManager) buildAsgFromSpec(spec string) (cloudprovider.NodeGroup, error) {
scaleToZeroSupported := scaleToZeroSupportedStandard
if strings.EqualFold(m.config.VMType, vmTypeVMSS) {
scaleToZeroSupported = scaleToZeroSupportedVMSS
}
s, err := dynamic.SpecFromString(spec, scaleToZeroSupported)
if err != nil {
return nil, fmt.Errorf("failed to parse node group spec: %v", err)
}
switch m.config.VMType {
case vmTypeStandard:
return NewAgentPool(s, m)
case vmTypeVMSS:
return NewScaleSet(s, m, -1)
case vmTypeAKS:
return NewAKSAgentPool(s, m)
default:
return nil, fmt.Errorf("vmtype %s not supported", m.config.VMType)
}
}
// Refresh is called before every main loop and can be used to dynamically update cloud provider state.
// In particular the list of node groups returned by NodeGroups can change as a result of CloudProvider.Refresh().
func (m *AzureManager) Refresh() error {
if m.lastRefresh.Add(refreshInterval).After(time.Now()) {
return nil
}
return m.forceRefresh()
}
func (m *AzureManager) forceRefresh() error {
// TODO: Refactor some of this logic out of forceRefresh and
// consider merging the list call with the Nodes() call
if err := m.fetchAutoAsgs(); err != nil {
klog.Errorf("Failed to fetch ASGs: %v", err)
}
if err := m.regenerateCache(); err != nil {
klog.Errorf("Failed to regenerate ASG cache: %v", err)
return err
}
m.lastRefresh = time.Now()
klog.V(2).Infof("Refreshed ASG list, next refresh after %v", m.lastRefresh.Add(refreshInterval))
return nil
}
// Fetch automatically discovered ASGs. These ASGs should be unregistered if
// they no longer exist in Azure.
func (m *AzureManager) fetchAutoAsgs() error {
groups, err := m.getFilteredAutoscalingGroups(m.asgAutoDiscoverySpecs)
if err != nil {
return fmt.Errorf("cannot autodiscover ASGs: %s", err)
}
changed := false
exists := make(map[string]bool)
for _, asg := range groups {
asgID := asg.Id()
exists[asgID] = true
if m.explicitlyConfigured[asgID] {
// This ASG was explicitly configured, but would also be
// autodiscovered. We want the explicitly configured min and max
// nodes to take precedence.
klog.V(3).Infof("Ignoring explicitly configured ASG %s for autodiscovery.", asg.Id())
continue
}
if m.RegisterAsg(asg) {
klog.V(3).Infof("Autodiscovered ASG %s using tags %v", asg.Id(), m.asgAutoDiscoverySpecs)
changed = true
}
}
for _, asg := range m.getAsgs() {
asgID := asg.Id()
if !exists[asgID] && !m.explicitlyConfigured[asgID] {
m.UnregisterAsg(asg)
changed = true
}
}
if changed {
if err := m.regenerateCache(); err != nil {
return err
}
}
return nil
}
func (m *AzureManager) getAsgs() []cloudprovider.NodeGroup {
return m.asgCache.get()
}
// RegisterAsg registers an ASG.
func (m *AzureManager) RegisterAsg(asg cloudprovider.NodeGroup) bool {
return m.asgCache.Register(asg)
}
// UnregisterAsg unregisters an ASG.
func (m *AzureManager) UnregisterAsg(asg cloudprovider.NodeGroup) bool {
return m.asgCache.Unregister(asg)
}
// GetAsgForInstance returns AsgConfig of the given Instance
func (m *AzureManager) GetAsgForInstance(instance *azureRef) (cloudprovider.NodeGroup, error) {
return m.asgCache.FindForInstance(instance, m.config.VMType)
}
func (m *AzureManager) regenerateCache() error {
m.asgCache.mutex.Lock()
defer m.asgCache.mutex.Unlock()
return m.asgCache.regenerate()
}
// Cleanup the ASG cache.
func (m *AzureManager) Cleanup() {
m.asgCache.Cleanup()
}
func (m *AzureManager) getFilteredAutoscalingGroups(filter []labelAutoDiscoveryConfig) (asgs []cloudprovider.NodeGroup, err error) {
if len(filter) == 0 {
return nil, nil
}
switch m.config.VMType {
case vmTypeVMSS:
asgs, err = m.listScaleSets(filter)
case vmTypeStandard:
asgs, err = m.listAgentPools(filter)
case vmTypeAKS:
return nil, nil
default:
err = fmt.Errorf("vmType %q not supported", m.config.VMType)
}
if err != nil {
return nil, err
}
return asgs, nil
}
// listScaleSets gets a list of scale sets and instanceIDs.
func (m *AzureManager) listScaleSets(filter []labelAutoDiscoveryConfig) ([]cloudprovider.NodeGroup, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, rerr := m.azClient.virtualMachineScaleSetsClient.List(ctx, m.config.ResourceGroup)
if rerr != nil {
klog.Errorf("VirtualMachineScaleSetsClient.List for %v failed: %v", m.config.ResourceGroup, rerr)
return nil, rerr.Error()
}
var asgs []cloudprovider.NodeGroup
for _, scaleSet := range result {
if len(filter) > 0 {
if scaleSet.Tags == nil || len(scaleSet.Tags) == 0 {
continue
}
if !matchDiscoveryConfig(scaleSet.Tags, filter) {
continue
}
}
spec := &dynamic.NodeGroupSpec{
Name: *scaleSet.Name,
MinSize: 1,
MaxSize: -1,
SupportScaleToZero: scaleToZeroSupportedVMSS,
}
if val, ok := scaleSet.Tags["min"]; ok {
if minSize, err := strconv.Atoi(*val); err == nil {
spec.MinSize = minSize
} else {
return asgs, fmt.Errorf("invalid minimum size specified for vmss: %s", err)
}
} else {
return asgs, fmt.Errorf("no minimum size specified for vmss: %s", *scaleSet.Name)
}
if spec.MinSize < 0 {
return asgs, fmt.Errorf("minimum size must be a non-negative number of nodes")
}
if val, ok := scaleSet.Tags["max"]; ok {
if maxSize, err := strconv.Atoi(*val); err == nil {
spec.MaxSize = maxSize
} else {
return asgs, fmt.Errorf("invalid maximum size specified for vmss: %s", err)
}
} else {
return asgs, fmt.Errorf("no maximum size specified for vmss: %s", *scaleSet.Name)
}
if spec.MaxSize < 1 {
return asgs, fmt.Errorf("maximum size must be greater than 1 node")
}
if spec.MaxSize < spec.MinSize {
return asgs, fmt.Errorf("maximum size must be greater than minimum size")
}
curSize := int64(-1)
if scaleSet.Sku != nil && scaleSet.Sku.Capacity != nil {
curSize = *scaleSet.Sku.Capacity
}
asg, err := NewScaleSet(spec, m, curSize)
if err != nil {
klog.Warningf("ignoring nodegroup %q %s", *scaleSet.Name, err)
continue
}
asgs = append(asgs, asg)
}
return asgs, nil
}
// listAgentPools gets a list of agent pools and instanceIDs.
// Note: filter won't take effect for agent pools.
func (m *AzureManager) listAgentPools(filter []labelAutoDiscoveryConfig) (asgs []cloudprovider.NodeGroup, err error) {
ctx, cancel := getContextWithCancel()
defer cancel()
deploy, err := m.azClient.deploymentsClient.Get(ctx, m.config.ResourceGroup, m.config.Deployment)
if err != nil {
klog.Errorf("deploymentsClient.Get(%s, %s) failed: %v", m.config.ResourceGroup, m.config.Deployment, err)
return nil, err
}
parameters := deploy.Properties.Parameters.(map[string]interface{})
for k := range parameters {
if k == "masterVMSize" || !strings.HasSuffix(k, "VMSize") {
continue
}
poolName := strings.TrimRight(k, "VMSize")
spec := &dynamic.NodeGroupSpec{
Name: poolName,
MinSize: 1,
MaxSize: -1,
SupportScaleToZero: scaleToZeroSupportedStandard,
}
asg, _ := NewAgentPool(spec, m)
asgs = append(asgs, asg)
}
return asgs, nil
}
// ParseLabelAutoDiscoverySpecs returns any provided NodeGroupAutoDiscoverySpecs
// parsed into configuration appropriate for ASG autodiscovery.
func parseLabelAutoDiscoverySpecs(o cloudprovider.NodeGroupDiscoveryOptions) ([]labelAutoDiscoveryConfig, error) {
cfgs := make([]labelAutoDiscoveryConfig, len(o.NodeGroupAutoDiscoverySpecs))
var err error
for i, spec := range o.NodeGroupAutoDiscoverySpecs {
cfgs[i], err = parseLabelAutoDiscoverySpec(spec)
if err != nil {
return nil, err
}
}
return cfgs, nil
}
// parseLabelAutoDiscoverySpec parses a single spec and returns the corredponding node group spec.
func parseLabelAutoDiscoverySpec(spec string) (labelAutoDiscoveryConfig, error) {
cfg := labelAutoDiscoveryConfig{
Selector: make(map[string]string),
}
tokens := strings.Split(spec, ":")
if len(tokens) != 2 {
return cfg, fmt.Errorf("spec \"%s\" should be discoverer:key=value,key=value", spec)
}
discoverer := tokens[0]
if discoverer != autoDiscovererTypeLabel {
return cfg, fmt.Errorf("unsupported discoverer specified: %s", discoverer)
}
for _, arg := range strings.Split(tokens[1], ",") {
kv := strings.Split(arg, "=")
if len(kv) != 2 {
return cfg, fmt.Errorf("invalid key=value pair %s", kv)
}
k, v := kv[0], kv[1]
if k == "" || v == "" {
return cfg, fmt.Errorf("empty value not allowed in key=value tag pairs")
}
cfg.Selector[k] = v
}
return cfg, nil
}
// getSubscriptionId reads the Subscription ID from the instance metadata.
func getSubscriptionIdFromInstanceMetadata() (string, error) {
subscriptionID, present := os.LookupEnv("ARM_SUBSCRIPTION_ID")
if !present {
metadataService, err := providerazure.NewInstanceMetadataService(metadataURL)
if err != nil {
return "", err
}
metadata, err := metadataService.GetMetadata(0)
if err != nil {
return "", err
}
return metadata.Compute.SubscriptionID, nil
}
return subscriptionID, nil
}
| [
"\"ARM_CLOUD\"",
"\"LOCATION\"",
"\"ARM_RESOURCE_GROUP\"",
"\"ARM_TENANT_ID\"",
"\"ARM_CLIENT_ID\"",
"\"ARM_CLIENT_SECRET\"",
"\"ARM_VM_TYPE\"",
"\"ARM_CLIENT_CERT_PATH\"",
"\"ARM_CLIENT_CERT_PASSWORD\"",
"\"ARM_DEPLOYMENT\"",
"\"AZURE_CLUSTER_NAME\"",
"\"AZURE_NODE_RESOURCE_GROUP\"",
"\"ARM_USE_MANAGED_IDENTITY_EXTENSION\"",
"\"ARM_USER_ASSIGNED_IDENTITY_ID\"",
"\"AZURE_VMSS_CACHE_TTL\"",
"\"AZURE_VMSS_VMS_CACHE_TTL\"",
"\"AZURE_VMSS_VMS_CACHE_JITTER\"",
"\"AZURE_MAX_DEPLOYMENT_COUNT\"",
"\"ENABLE_BACKOFF\"",
"\"BACKOFF_RETRIES\"",
"\"BACKOFF_EXPONENT\"",
"\"BACKOFF_DURATION\"",
"\"BACKOFF_JITTER\"",
"\"CLOUD_PROVIDER_RATE_LIMIT\""
]
| []
| [
"ARM_TENANT_ID",
"AZURE_VMSS_CACHE_TTL",
"BACKOFF_DURATION",
"BACKOFF_EXPONENT",
"ARM_DEPLOYMENT",
"ARM_CLIENT_CERT_PASSWORD",
"AZURE_CLUSTER_NAME",
"ARM_USER_ASSIGNED_IDENTITY_ID",
"ARM_VM_TYPE",
"AZURE_VMSS_VMS_CACHE_TTL",
"ARM_CLIENT_SECRET",
"ARM_USE_MANAGED_IDENTITY_EXTENSION",
"BACKOFF_RETRIES",
"ARM_CLOUD",
"CLOUD_PROVIDER_RATE_LIMIT",
"ARM_CLIENT_CERT_PATH",
"AZURE_MAX_DEPLOYMENT_COUNT",
"LOCATION",
"ARM_CLIENT_ID",
"ARM_RESOURCE_GROUP",
"BACKOFF_JITTER",
"ENABLE_BACKOFF",
"AZURE_NODE_RESOURCE_GROUP",
"AZURE_VMSS_VMS_CACHE_JITTER"
]
| [] | ["ARM_TENANT_ID", "AZURE_VMSS_CACHE_TTL", "BACKOFF_DURATION", "BACKOFF_EXPONENT", "ARM_DEPLOYMENT", "ARM_CLIENT_CERT_PASSWORD", "AZURE_CLUSTER_NAME", "ARM_USER_ASSIGNED_IDENTITY_ID", "ARM_VM_TYPE", "AZURE_VMSS_VMS_CACHE_TTL", "ARM_CLIENT_SECRET", "ARM_USE_MANAGED_IDENTITY_EXTENSION", "BACKOFF_RETRIES", "ARM_CLOUD", "CLOUD_PROVIDER_RATE_LIMIT", "ARM_CLIENT_CERT_PATH", "AZURE_MAX_DEPLOYMENT_COUNT", "LOCATION", "ARM_CLIENT_ID", "ARM_RESOURCE_GROUP", "BACKOFF_JITTER", "ENABLE_BACKOFF", "AZURE_NODE_RESOURCE_GROUP", "AZURE_VMSS_VMS_CACHE_JITTER"] | go | 24 | 0 | |
pkg/controller/iotconfig/adapter.go | /*
* Copyright 2019, EnMasse authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package iotconfig
import (
"context"
"fmt"
"os"
"strings"
"github.com/enmasseproject/enmasse/pkg/util/cchange"
"github.com/enmasseproject/enmasse/pkg/util"
iotv1alpha1 "github.com/enmasseproject/enmasse/pkg/apis/iot/v1alpha1"
"github.com/enmasseproject/enmasse/pkg/util/install"
"github.com/enmasseproject/enmasse/pkg/util/recon"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type adapter struct {
Name string
AdapterConfigProvider func(*iotv1alpha1.IoTConfig) *iotv1alpha1.AdapterConfig
}
var adapters = []adapter{
{
Name: "mqtt",
AdapterConfigProvider: func(config *iotv1alpha1.IoTConfig) *iotv1alpha1.AdapterConfig {
return &config.Spec.AdaptersConfig.MqttAdapterConfig.AdapterConfig
},
},
{
Name: "http",
AdapterConfigProvider: func(config *iotv1alpha1.IoTConfig) *iotv1alpha1.AdapterConfig {
return &config.Spec.AdaptersConfig.HttpAdapterConfig.AdapterConfig
},
},
{
Name: "lorawan",
AdapterConfigProvider: func(config *iotv1alpha1.IoTConfig) *iotv1alpha1.AdapterConfig {
return &config.Spec.AdaptersConfig.LoraWanAdapterConfig.AdapterConfig
},
},
{
Name: "sigfox",
AdapterConfigProvider: func(config *iotv1alpha1.IoTConfig) *iotv1alpha1.AdapterConfig {
return &config.Spec.AdaptersConfig.SigfoxAdapterConfig.AdapterConfig
},
},
}
func (a adapter) IsEnabled(config *iotv1alpha1.IoTConfig) bool {
// find adapter config
adapterConfig := a.AdapterConfigProvider(config)
if adapterConfig != nil && adapterConfig.Enabled != nil {
return *adapterConfig.Enabled
}
// return setting from env-var
return globalIsAdapterEnabled(a.Name)
}
func findAdapter(name string) adapter {
for _, a := range adapters {
if a.Name == name {
return a
}
}
panic(fmt.Errorf("failed to find adapter '%s'", name))
}
func (r *ReconcileIoTConfig) addQpidProxySetup(config *iotv1alpha1.IoTConfig, deployment *appsv1.Deployment, containers iotv1alpha1.CommonAdapterContainers) error {
err := install.ApplyContainerWithError(deployment, "qdr-cfg", func(container *corev1.Container) error {
if err := install.SetContainerImage(container, "iot-proxy-configurator", config); err != nil {
return err
}
// set default resource limits
container.Resources = corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceMemory: *resource.NewQuantity(64*1024*1024 /* 64Mi */, resource.BinarySI),
},
}
if len(container.VolumeMounts) != 1 {
container.VolumeMounts = make([]corev1.VolumeMount, 1)
}
container.VolumeMounts[0].Name = "qdr-tmp-certs"
container.VolumeMounts[0].MountPath = "/var/qdr-certs"
container.VolumeMounts[0].ReadOnly = false
// apply container options
applyContainerConfig(container, containers.ProxyConfigurator)
// return
return nil
})
if err != nil {
return err
}
err = install.ApplyContainerWithError(deployment, "qdr-proxy", func(container *corev1.Container) error {
if err := install.SetContainerImage(container, "router", config); err != nil {
return err
}
container.Args = []string{"/sbin/qdrouterd", "-c", "/etc/qdr/config/qdrouterd.conf"}
// set default resource limits
container.Resources = corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceMemory: *resource.NewQuantity(128*1024*1024 /* 128Mi */, resource.BinarySI),
},
}
if len(container.VolumeMounts) != 2 {
container.VolumeMounts = make([]corev1.VolumeMount, 2)
}
container.VolumeMounts[0].Name = "qdr-tmp-certs"
container.VolumeMounts[0].MountPath = "/var/qdr-certs"
container.VolumeMounts[0].ReadOnly = true
container.VolumeMounts[1].Name = "qdr-proxy-config"
container.VolumeMounts[1].MountPath = "/etc/qdr/config"
container.VolumeMounts[1].ReadOnly = true
// apply container options
applyContainerConfig(container, containers.Proxy)
// return
return nil
})
if err != nil {
return err
}
install.ApplyConfigMapVolume(deployment, "qdr-proxy-config", "qdr-proxy-configurator")
install.ApplyEmptyDirVolume(deployment, "qdr-tmp-certs")
return nil
}
func AppendHonoAdapterEnvs(config *iotv1alpha1.IoTConfig, container *corev1.Container, adapter adapter) error {
username := adapter.Name + "-adapter@HONO"
password := config.Status.Adapters[adapter.Name].InterServicePassword
container.Env = append(container.Env, []corev1.EnvVar{
{Name: "HONO_MESSAGING_HOST", Value: "localhost"},
{Name: "HONO_MESSAGING_PORT", Value: "5672"},
{Name: "HONO_COMMAND_HOST", Value: "localhost"},
{Name: "HONO_COMMAND_PORT", Value: "5672"},
{Name: "HONO_REGISTRATION_HOST", Value: FullHostNameForEnvVar("iot-device-registry")},
{Name: "HONO_REGISTRATION_USERNAME", Value: username},
{Name: "HONO_REGISTRATION_PASSWORD", Value: password},
{Name: "HONO_CREDENTIALS_HOST", Value: FullHostNameForEnvVar("iot-device-registry")},
{Name: "HONO_CREDENTIALS_USERNAME", Value: username},
{Name: "HONO_CREDENTIALS_PASSWORD", Value: password},
{Name: "HONO_DEVICE_CONNECTION_HOST", Value: FullHostNameForEnvVar("iot-device-registry")},
{Name: "HONO_DEVICE_CONNECTION_USERNAME", Value: username},
{Name: "HONO_DEVICE_CONNECTION_PASSWORD", Value: password},
{Name: "HONO_TENANT_HOST", Value: FullHostNameForEnvVar("iot-tenant-service")},
{Name: "HONO_TENANT_USERNAME", Value: username},
{Name: "HONO_TENANT_PASSWORD", Value: password},
}...)
if err := AppendTrustStores(config, container, []string{
"HONO_CREDENTIALS_TRUST_STORE_PATH",
"HONO_DEVICE_CONNECTION_TRUST_STORE_PATH",
"HONO_REGISTRATION_TRUST_STORE_PATH",
"HONO_TENANT_TRUST_STORE_PATH",
}); err != nil {
return err
}
return nil
}
func (r *ReconcileIoTConfig) processQdrProxyConfig(ctx context.Context, config *iotv1alpha1.IoTConfig, configCtx *cchange.ConfigChangeRecorder) (reconcile.Result, error) {
rc := &recon.ReconcileContext{}
rc.ProcessSimple(func() error {
return r.processConfigMap(ctx, "qdr-proxy-configurator", config, false, func(config *iotv1alpha1.IoTConfig, configMap *corev1.ConfigMap) error {
if configMap.Data == nil {
configMap.Data = make(map[string]string)
}
configMap.Data["qdrouterd.conf"] = `
router {
mode: standalone
id: Router.Proxy
defaultDistribution: unavailable
}
listener {
host: localhost
port: 5672
saslMechanisms: ANONYMOUS
}
`
configCtx.AddString(configMap.Data["qdrouterd.conf"])
return nil
})
})
return rc.Result()
}
func hasEndpointKeyAndCert(endpoint *iotv1alpha1.AdapterEndpointConfig) bool {
return endpoint != nil &&
endpoint.KeyCertificateStrategy != nil &&
endpoint.KeyCertificateStrategy.Key != nil &&
endpoint.KeyCertificateStrategy.Certificate != nil
}
func applyAdapterEndpointDeployment(endpoint *iotv1alpha1.AdapterEndpointConfig, deployment *appsv1.Deployment, endpointSecretName string) error {
if endpoint != nil && endpoint.SecretNameStrategy != nil {
// use provided secret
install.ApplySecretVolume(deployment, "tls", endpoint.SecretNameStrategy.TlsSecretName)
} else if endpoint != nil && endpoint.KeyCertificateStrategy != nil {
install.ApplySecretVolume(deployment, "tls", endpointSecretName+"-"+endpoint.KeyCertificateStrategy.HashString())
} else {
// use service CA as fallback
if !util.IsOpenshift() {
return fmt.Errorf("not running in OpenShift, unable to use service CA, you need to provide a protocol adapter endpoint key/certificate")
}
install.ApplySecretVolume(deployment, "tls", endpointSecretName+"-tls")
}
return nil
}
func applyAdapterEndpointService(endpoint *iotv1alpha1.AdapterEndpointConfig, service *corev1.Service, endpointSecretName string) error {
if service.Annotations != nil {
delete(service.Annotations, "service.alpha.openshift.io/serving-cert-secret-name")
}
if endpoint != nil && endpoint.SecretNameStrategy != nil {
// use provided secret
} else if endpoint != nil && endpoint.KeyCertificateStrategy != nil {
// use provided key/cert
} else {
if !util.IsOpenshift() {
return fmt.Errorf("not running in OpenShift, unable to use service CA, you need to provide a protocol adapter endpoint key/certificate")
}
// use service CA as fallback
if service.Annotations == nil {
service.Annotations = make(map[string]string)
}
service.Annotations["service.alpha.openshift.io/serving-cert-secret-name"] = endpointSecretName + "-tls"
}
return nil
}
func (r *ReconcileIoTConfig) reconcileEndpointKeyCertificateSecret(ctx context.Context, config *iotv1alpha1.IoTConfig, endpoint *iotv1alpha1.AdapterEndpointConfig, adapterName string, delete bool) error {
if delete || !hasEndpointKeyAndCert(endpoint) {
// cleanup previous secrets
return r.cleanupSecrets(ctx, config, adapterName)
}
kc := endpoint.KeyCertificateStrategy
name := adapterName + "-" + kc.HashString()
return r.processSecret(ctx, name, config, false, func(config *iotv1alpha1.IoTConfig, secret *corev1.Secret) error {
// cleanup previous secrets
if err := r.cleanupSecrets(ctx, config, adapterName); err != nil {
return err
}
install.ApplyDefaultLabels(&secret.ObjectMeta, "iot", adapterName+"tls")
install.ApplyTlsSecret(secret, kc.Key, kc.Certificate)
return nil
})
}
func globalIsAdapterEnabled(name string) bool {
v := os.Getenv("IOT_ADAPTER_" + strings.ToUpper(name) + "_ENABLED")
return v == "" || v == "true"
}
| [
"\"IOT_ADAPTER_\" + strings.ToUpper(name"
]
| []
| [
"IOT_ADAPTER_\" + strings.ToUpper(nam"
]
| [] | ["IOT_ADAPTER_\" + strings.ToUpper(nam"] | go | 1 | 0 | |
tests/test_hub.py | import os
from unittest import TestCase
import pandas as pd
from huggingface_hub import HfFolder
from hf_benchmarks import extract_tags, get_benchmark_repos
from .testing_utils import (
BOGUS_BENCHMARK_NAME,
DUMMY_BENCHMARK_NAME,
DUMMY_EVALUATION_ID,
DUMMY_MODEL_ID,
DUMMY_PREDICTION_ID,
)
class ExtractTagsTest(TestCase):
def test_no_tags(self):
repo_info = {"modelId": "bert-base-uncased"}
tags = extract_tags(repo_info)
self.assertDictEqual(tags, {})
def test_no_keyed_tags(self):
repo_info = {"modelId": "bert-base-uncased", "tags": ["exbert"]}
tags = extract_tags(repo_info)
self.assertDictEqual(tags, {})
def test_keyed_tags(self):
repo_info = {"modelId": "bert-base-uncased", "tags": ["benchmark:glue", "dataset:wikipedia"]}
tags = extract_tags(repo_info)
self.assertDictEqual(tags, {"benchmark": "glue", "dataset": "wikipedia"})
def test_keyed_tags_with_multiple_colons(self):
repo_info = {"modelId": "bert-base-uncased", "tags": ["benchmark:glue:superglue", "dataset:wikipedia"]}
tags = extract_tags(repo_info)
self.assertDictEqual(tags, {"benchmark": "glue:superglue", "dataset": "wikipedia"})
def test_mixed_tags(self):
repo_info = {"modelId": "bert-base-uncased", "tags": ["exbert", "benchmark:glue", "dataset:wikipedia"]}
tags = extract_tags(repo_info)
self.assertDictEqual(tags, {"benchmark": "glue", "dataset": "wikipedia"})
class GetBenchmarkReposTest(TestCase):
@classmethod
def setUpClass(cls):
"""
Share this valid token in all tests below. Needed for CI
"""
token = os.getenv("HF_HUB_TOKEN")
if token:
HfFolder.save_token(token)
def test_no_datasets_repo(self):
data = get_benchmark_repos(
benchmark=BOGUS_BENCHMARK_NAME, use_auth_token=True, endpoint="datasets", repo_type="prediction"
)
self.assertEqual(len(data), 0)
def test_no_models_repo(self):
data = get_benchmark_repos(
benchmark=BOGUS_BENCHMARK_NAME, use_auth_token=True, endpoint="models", repo_type="prediction"
)
self.assertEqual(len(data), 0)
def test_prediction_repo(self):
data = get_benchmark_repos(
benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint="datasets", repo_type="prediction"
)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["id"], DUMMY_PREDICTION_ID)
def test_evaluation_repo(self):
data = get_benchmark_repos(
benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint="datasets", repo_type="evaluation"
)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["id"], DUMMY_EVALUATION_ID)
def test_model_upload_repo(self):
data = get_benchmark_repos(
benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint="models", repo_type="model"
)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["modelId"], DUMMY_MODEL_ID)
def test_repo_in_submission_window(self):
# Grab repo to extract timestamp
# TODO(lewtun): Use HfApi.dataset_info if we bump huggingface-hub in AutoNLP backend
repo = get_benchmark_repos(
benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint="datasets", repo_type="prediction"
)
submission_time = pd.to_datetime(repo[0].get("lastModified"))
start_date = (submission_time - pd.Timedelta(days=1)).date()
end_date = (submission_time + pd.Timedelta(days=1)).date()
data = get_benchmark_repos(
benchmark=DUMMY_BENCHMARK_NAME,
use_auth_token=True,
endpoint="datasets",
repo_type="prediction",
start_date=start_date,
end_date=end_date,
)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["id"], DUMMY_PREDICTION_ID)
def test_repo_outside_submission_window(self):
# Grab repo to extract timestamp
# TODO(lewtun): Use HfApi.dataset_info if we bump huggingface-hub in AutoNLP backend
repo = get_benchmark_repos(
benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint="datasets", repo_type="prediction"
)
submission_time = pd.to_datetime(repo[0].get("lastModified"))
start_date = (submission_time + pd.Timedelta(days=1)).date()
end_date = (submission_time + pd.Timedelta(days=2)).date()
data = get_benchmark_repos(
benchmark=DUMMY_BENCHMARK_NAME,
use_auth_token=True,
endpoint="datasets",
repo_type="prediction",
start_date=start_date,
end_date=end_date,
)
self.assertEqual(len(data), 0)
| []
| []
| [
"HF_HUB_TOKEN"
]
| [] | ["HF_HUB_TOKEN"] | python | 1 | 0 | |
query_test.go | package pgx_test
import (
"bytes"
"context"
"database/sql"
"errors"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"testing"
"time"
"github.com/cockroachdb/apd"
"github.com/gofrs/uuid"
"github.com/jackc/pgconn"
"github.com/jackc/pgconn/stmtcache"
"github.com/jackc/pgtype"
gofrs "github.com/jackc/pgtype/ext/gofrs-uuid"
"github.com/nappspt/schemapgx/v4"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConnQueryScan(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var sum, rowCount int32
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
defer rows.Close()
for rows.Next() {
var n int32
rows.Scan(&n)
sum += n
rowCount++
}
if rows.Err() != nil {
t.Fatalf("conn.Query failed: %v", err)
}
assert.Equal(t, "SELECT 10", string(rows.CommandTag()))
if rowCount != 10 {
t.Error("Select called onDataRow wrong number of times")
}
if sum != 55 {
t.Error("Wrong values returned")
}
}
func TestConnQueryRowsFieldDescriptionsBeforeNext(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
rows, err := conn.Query(context.Background(), "select 'hello' as msg")
require.NoError(t, err)
defer rows.Close()
require.Len(t, rows.FieldDescriptions(), 1)
assert.Equal(t, []byte("msg"), rows.FieldDescriptions()[0].Name)
}
func TestConnQueryWithoutResultSetCommandTag(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
rows, err := conn.Query(context.Background(), "create temporary table t (id serial);")
assert.NoError(t, err)
rows.Close()
assert.NoError(t, rows.Err())
assert.Equal(t, "CREATE TABLE", string(rows.CommandTag()))
}
func TestConnQueryScanWithManyColumns(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
columnCount := 1000
sql := "select "
for i := 0; i < columnCount; i++ {
if i > 0 {
sql += ","
}
sql += fmt.Sprintf(" %d", i)
}
sql += " from generate_series(1,5)"
dest := make([]int, columnCount)
var rowCount int
rows, err := conn.Query(context.Background(), sql)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
defer rows.Close()
for rows.Next() {
destPtrs := make([]interface{}, columnCount)
for i := range destPtrs {
destPtrs[i] = &dest[i]
}
if err := rows.Scan(destPtrs...); err != nil {
t.Fatalf("rows.Scan failed: %v", err)
}
rowCount++
for i := range dest {
if dest[i] != i {
t.Errorf("dest[%d] => %d, want %d", i, dest[i], i)
}
}
}
if rows.Err() != nil {
t.Fatalf("conn.Query failed: %v", err)
}
if rowCount != 5 {
t.Errorf("rowCount => %d, want %d", rowCount, 5)
}
}
func TestConnQueryValues(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var rowCount int32
rows, err := conn.Query(context.Background(), "select 'foo'::text, 'bar'::varchar, n, null, n from generate_series(1,$1) n", 10)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
defer rows.Close()
for rows.Next() {
rowCount++
values, err := rows.Values()
require.NoError(t, err)
require.Len(t, values, 5)
assert.Equal(t, "foo", values[0])
assert.Equal(t, "bar", values[1])
assert.EqualValues(t, rowCount, values[2])
assert.Nil(t, values[3])
assert.EqualValues(t, rowCount, values[4])
}
if rows.Err() != nil {
t.Fatalf("conn.Query failed: %v", err)
}
if rowCount != 10 {
t.Error("Select called onDataRow wrong number of times")
}
}
// https://github.com/jackc/pgx/issues/666
func TestConnQueryValuesWhenUnableToDecode(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
// Note that this relies on pgtype.Record not supporting the text protocol. This seems safe as it is impossible to
// decode the text protocol because unlike the binary protocol there is no way to determine the OIDs of the elements.
rows, err := conn.Query(context.Background(), "select (array[1::oid], null)", pgx.QueryResultFormats{pgx.TextFormatCode})
require.NoError(t, err)
defer rows.Close()
require.True(t, rows.Next())
values, err := rows.Values()
require.NoError(t, err)
require.Equal(t, "({1},)", values[0])
}
func TestConnQueryValuesWithUnknownOID(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
tx, err := conn.Begin(ctx)
require.NoError(t, err)
defer tx.Rollback(ctx)
_, err = tx.Exec(ctx, "create type fruit as enum('orange', 'apple', 'pear')")
require.NoError(t, err)
rows, err := conn.Query(context.Background(), "select 'orange'::fruit")
require.NoError(t, err)
defer rows.Close()
require.True(t, rows.Next())
values, err := rows.Values()
require.NoError(t, err)
require.Equal(t, "orange", values[0])
}
// https://github.com/jackc/pgx/issues/478
func TestConnQueryReadRowMultipleTimes(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var rowCount int32
rows, err := conn.Query(context.Background(), "select 'foo'::text, 'bar'::varchar, n, null, n from generate_series(1,$1) n", 10)
require.NoError(t, err)
defer rows.Close()
for rows.Next() {
rowCount++
for i := 0; i < 2; i++ {
values, err := rows.Values()
require.NoError(t, err)
require.Len(t, values, 5)
require.Equal(t, "foo", values[0])
require.Equal(t, "bar", values[1])
require.EqualValues(t, rowCount, values[2])
require.Nil(t, values[3])
require.EqualValues(t, rowCount, values[4])
var a, b string
var c int32
var d pgtype.Unknown
var e int32
err = rows.Scan(&a, &b, &c, &d, &e)
require.NoError(t, err)
require.Equal(t, "foo", a)
require.Equal(t, "bar", b)
require.Equal(t, rowCount, c)
require.Equal(t, pgtype.Null, d.Status)
require.Equal(t, rowCount, e)
}
}
require.NoError(t, rows.Err())
require.Equal(t, int32(10), rowCount)
}
// https://github.com/jackc/pgx/issues/386
func TestConnQueryValuesWithMultipleComplexColumnsOfSameType(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
expected0 := &pgtype.Int8Array{
Elements: []pgtype.Int8{
{Int: 1, Status: pgtype.Present},
{Int: 2, Status: pgtype.Present},
{Int: 3, Status: pgtype.Present},
},
Dimensions: []pgtype.ArrayDimension{{Length: 3, LowerBound: 1}},
Status: pgtype.Present,
}
expected1 := &pgtype.Int8Array{
Elements: []pgtype.Int8{
{Int: 4, Status: pgtype.Present},
{Int: 5, Status: pgtype.Present},
{Int: 6, Status: pgtype.Present},
},
Dimensions: []pgtype.ArrayDimension{{Length: 3, LowerBound: 1}},
Status: pgtype.Present,
}
var rowCount int32
rows, err := conn.Query(context.Background(), "select '{1,2,3}'::bigint[], '{4,5,6}'::bigint[] from generate_series(1,$1) n", 10)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
defer rows.Close()
for rows.Next() {
rowCount++
values, err := rows.Values()
if err != nil {
t.Fatalf("rows.Values failed: %v", err)
}
if len(values) != 2 {
t.Errorf("Expected rows.Values to return 2 values, but it returned %d", len(values))
}
if !reflect.DeepEqual(values[0], *expected0) {
t.Errorf(`Expected values[0] to be %v, but it was %v`, *expected0, values[0])
}
if !reflect.DeepEqual(values[1], *expected1) {
t.Errorf(`Expected values[1] to be %v, but it was %v`, *expected1, values[1])
}
}
if rows.Err() != nil {
t.Fatalf("conn.Query failed: %v", err)
}
if rowCount != 10 {
t.Error("Select called onDataRow wrong number of times")
}
}
// https://github.com/jackc/pgx/issues/228
func TestRowsScanDoesNotAllowScanningBinaryFormatValuesIntoString(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var s string
err := conn.QueryRow(context.Background(), "select 1").Scan(&s)
if err == nil || !(strings.Contains(err.Error(), "cannot decode binary value into string") || strings.Contains(err.Error(), "cannot assign")) {
t.Fatalf("Expected Scan to fail to encode binary value into string but: %v", err)
}
ensureConnValid(t, conn)
}
func TestConnQueryRawValues(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var rowCount int32
rows, err := conn.Query(
context.Background(),
"select 'foo'::text, 'bar'::varchar, n, null, n from generate_series(1,$1) n",
pgx.QuerySimpleProtocol(true),
10,
)
require.NoError(t, err)
defer rows.Close()
for rows.Next() {
rowCount++
rawValues := rows.RawValues()
assert.Len(t, rawValues, 5)
assert.Equal(t, "foo", string(rawValues[0]))
assert.Equal(t, "bar", string(rawValues[1]))
assert.Equal(t, strconv.FormatInt(int64(rowCount), 10), string(rawValues[2]))
assert.Nil(t, rawValues[3])
assert.Equal(t, strconv.FormatInt(int64(rowCount), 10), string(rawValues[4]))
}
require.NoError(t, rows.Err())
assert.EqualValues(t, 10, rowCount)
}
// Test that a connection stays valid when query results are closed early
func TestConnQueryCloseEarly(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
// Immediately close query without reading any rows
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
rows.Close()
ensureConnValid(t, conn)
// Read partial response then close
rows, err = conn.Query(context.Background(), "select generate_series(1,$1)", 10)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
ok := rows.Next()
if !ok {
t.Fatal("rows.Next terminated early")
}
var n int32
rows.Scan(&n)
if n != 1 {
t.Fatalf("Expected 1 from first row, but got %v", n)
}
rows.Close()
ensureConnValid(t, conn)
}
func TestConnQueryCloseEarlyWithErrorOnWire(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
rows, err := conn.Query(context.Background(), "select 1/(10-n) from generate_series(1,10) n")
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
assert.False(t, pgconn.SafeToRetry(err))
rows.Close()
ensureConnValid(t, conn)
}
// Test that a connection stays valid when query results read incorrectly
func TestConnQueryReadWrongTypeError(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
// Read a single value incorrectly
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
rowsRead := 0
for rows.Next() {
var t time.Time
rows.Scan(&t)
rowsRead++
}
if rowsRead != 1 {
t.Fatalf("Expected error to cause only 1 row to be read, but %d were read", rowsRead)
}
if rows.Err() == nil {
t.Fatal("Expected Rows to have an error after an improper read but it didn't")
}
if rows.Err().Error() != "can't scan into dest[0]: Can't convert OID 23 to time.Time" && !strings.Contains(rows.Err().Error(), "cannot assign") {
t.Fatalf("Expected different Rows.Err(): %v", rows.Err())
}
ensureConnValid(t, conn)
}
// Test that a connection stays valid when query results read incorrectly
func TestConnQueryReadTooManyValues(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
// Read too many values
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
rowsRead := 0
for rows.Next() {
var n, m int32
rows.Scan(&n, &m)
rowsRead++
}
if rowsRead != 1 {
t.Fatalf("Expected error to cause only 1 row to be read, but %d were read", rowsRead)
}
if rows.Err() == nil {
t.Fatal("Expected Rows to have an error after an improper read but it didn't")
}
ensureConnValid(t, conn)
}
func TestConnQueryScanIgnoreColumn(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
rows, err := conn.Query(context.Background(), "select 1::int8, 2::int8, 3::int8")
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
ok := rows.Next()
if !ok {
t.Fatal("rows.Next terminated early")
}
var n, m int64
err = rows.Scan(&n, nil, &m)
if err != nil {
t.Fatalf("rows.Scan failed: %v", err)
}
rows.Close()
if n != 1 {
t.Errorf("Expected n to equal 1, but it was %d", n)
}
if m != 3 {
t.Errorf("Expected n to equal 3, but it was %d", m)
}
ensureConnValid(t, conn)
}
// https://github.com/jackc/pgx/issues/570
func TestConnQueryDeferredError(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server does not support deferred constraint (https://github.com/cockroachdb/cockroach/issues/31632)")
mustExec(t, conn, `create temporary table t (
id text primary key,
n int not null,
unique (n) deferrable initially deferred
);
insert into t (id, n) values ('a', 1), ('b', 2), ('c', 3);`)
rows, err := conn.Query(context.Background(), `update t set n=n+1 where id='b' returning *`)
if err != nil {
t.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id string
var n int32
err = rows.Scan(&id, &n)
if err != nil {
t.Fatal(err)
}
}
if rows.Err() == nil {
t.Fatal("expected error 23505 but got none")
}
if err, ok := rows.Err().(*pgconn.PgError); !ok || err.Code != "23505" {
t.Fatalf("expected error 23505, got %v", err)
}
ensureConnValid(t, conn)
}
func TestConnQueryErrorWhileReturningRows(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server uses numeric instead of int")
for i := 0; i < 100; i++ {
func() {
sql := `select 42 / (random() * 20)::integer from generate_series(1,100000)`
rows, err := conn.Query(context.Background(), sql)
if err != nil {
t.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var n int32
if err := rows.Scan(&n); err != nil {
t.Fatalf("Row scan failed: %v", err)
}
}
if _, ok := rows.Err().(*pgconn.PgError); !ok {
t.Fatalf("Expected pgx.PgError, got %v", rows.Err())
}
ensureConnValid(t, conn)
}()
}
}
func TestQueryEncodeError(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
rows, err := conn.Query(context.Background(), "select $1::integer", "wrong")
if err != nil {
t.Errorf("conn.Query failure: %v", err)
}
assert.False(t, pgconn.SafeToRetry(err))
defer rows.Close()
rows.Next()
if rows.Err() == nil {
t.Error("Expected rows.Err() to return error, but it didn't")
}
if conn.PgConn().ParameterStatus("crdb_version") != "" {
if !strings.Contains(rows.Err().Error(), "SQLSTATE 08P01") {
// CockroachDB returns protocol_violation instead of invalid_text_representation
t.Error("Expected rows.Err() to return different error:", rows.Err())
}
} else {
if !strings.Contains(rows.Err().Error(), "SQLSTATE 22P02") {
t.Error("Expected rows.Err() to return different error:", rows.Err())
}
}
}
func TestQueryRowCoreTypes(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
type allTypes struct {
s string
f32 float32
f64 float64
b bool
t time.Time
oid uint32
}
var actual, zero allTypes
tests := []struct {
sql string
queryArgs []interface{}
scanArgs []interface{}
expected allTypes
}{
{"select $1::text", []interface{}{"Jack"}, []interface{}{&actual.s}, allTypes{s: "Jack"}},
{"select $1::float4", []interface{}{float32(1.23)}, []interface{}{&actual.f32}, allTypes{f32: 1.23}},
{"select $1::float8", []interface{}{float64(1.23)}, []interface{}{&actual.f64}, allTypes{f64: 1.23}},
{"select $1::bool", []interface{}{true}, []interface{}{&actual.b}, allTypes{b: true}},
{"select $1::timestamptz", []interface{}{time.Unix(123, 5000)}, []interface{}{&actual.t}, allTypes{t: time.Unix(123, 5000)}},
{"select $1::timestamp", []interface{}{time.Date(2010, 1, 2, 3, 4, 5, 0, time.UTC)}, []interface{}{&actual.t}, allTypes{t: time.Date(2010, 1, 2, 3, 4, 5, 0, time.UTC)}},
{"select $1::date", []interface{}{time.Date(1987, 1, 2, 0, 0, 0, 0, time.UTC)}, []interface{}{&actual.t}, allTypes{t: time.Date(1987, 1, 2, 0, 0, 0, 0, time.UTC)}},
{"select $1::oid", []interface{}{uint32(42)}, []interface{}{&actual.oid}, allTypes{oid: 42}},
}
for i, tt := range tests {
actual = zero
err := conn.QueryRow(context.Background(), tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
if err != nil {
t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArgs -> %v)", i, err, tt.sql, tt.queryArgs)
}
if actual != tt.expected {
t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArgs -> %v)", i, tt.expected, actual, tt.sql, tt.queryArgs)
}
ensureConnValid(t, conn)
// Check that Scan errors when a core type is null
err = conn.QueryRow(context.Background(), tt.sql, nil).Scan(tt.scanArgs...)
if err == nil {
t.Errorf("%d. Expected null to cause error, but it didn't (sql -> %v)", i, tt.sql)
}
ensureConnValid(t, conn)
}
}
func TestQueryRowCoreIntegerEncoding(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
type allTypes struct {
ui uint
ui8 uint8
ui16 uint16
ui32 uint32
ui64 uint64
i int
i8 int8
i16 int16
i32 int32
i64 int64
}
var actual, zero allTypes
successfulEncodeTests := []struct {
sql string
queryArg interface{}
scanArg interface{}
expected allTypes
}{
// Check any integer type where value is within int2 range can be encoded
{"select $1::int2", int(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", int8(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", int16(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", int32(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", int64(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", uint(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", uint8(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", uint16(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", uint32(42), &actual.i16, allTypes{i16: 42}},
{"select $1::int2", uint64(42), &actual.i16, allTypes{i16: 42}},
// Check any integer type where value is within int4 range can be encoded
{"select $1::int4", int(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", int8(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", int16(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", int32(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", int64(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", uint(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", uint8(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", uint16(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", uint32(42), &actual.i32, allTypes{i32: 42}},
{"select $1::int4", uint64(42), &actual.i32, allTypes{i32: 42}},
// Check any integer type where value is within int8 range can be encoded
{"select $1::int8", int(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", int8(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", int16(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", int32(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", int64(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", uint(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", uint8(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", uint16(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", uint32(42), &actual.i64, allTypes{i64: 42}},
{"select $1::int8", uint64(42), &actual.i64, allTypes{i64: 42}},
}
for i, tt := range successfulEncodeTests {
actual = zero
err := conn.QueryRow(context.Background(), tt.sql, tt.queryArg).Scan(tt.scanArg)
if err != nil {
t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
continue
}
if actual != tt.expected {
t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArg -> %v)", i, tt.expected, actual, tt.sql, tt.queryArg)
}
ensureConnValid(t, conn)
}
failedEncodeTests := []struct {
sql string
queryArg interface{}
}{
// Check any integer type where value is outside pg:int2 range cannot be encoded
{"select $1::int2", int(32769)},
{"select $1::int2", int32(32769)},
{"select $1::int2", int32(32769)},
{"select $1::int2", int64(32769)},
{"select $1::int2", uint(32769)},
{"select $1::int2", uint16(32769)},
{"select $1::int2", uint32(32769)},
{"select $1::int2", uint64(32769)},
// Check any integer type where value is outside pg:int4 range cannot be encoded
{"select $1::int4", int64(2147483649)},
{"select $1::int4", uint32(2147483649)},
{"select $1::int4", uint64(2147483649)},
// Check any integer type where value is outside pg:int8 range cannot be encoded
{"select $1::int8", uint64(9223372036854775809)},
}
for i, tt := range failedEncodeTests {
err := conn.QueryRow(context.Background(), tt.sql, tt.queryArg).Scan(nil)
if err == nil {
t.Errorf("%d. Expected failure to encode, but unexpectedly succeeded: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
} else if !strings.Contains(err.Error(), "is greater than") {
t.Errorf("%d. Expected failure to encode, but got: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
}
ensureConnValid(t, conn)
}
}
func TestQueryRowCoreIntegerDecoding(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
type allTypes struct {
ui uint
ui8 uint8
ui16 uint16
ui32 uint32
ui64 uint64
i int
i8 int8
i16 int16
i32 int32
i64 int64
}
var actual, zero allTypes
successfulDecodeTests := []struct {
sql string
scanArg interface{}
expected allTypes
}{
// Check any integer type where value is within Go:int range can be decoded
{"select 42::int2", &actual.i, allTypes{i: 42}},
{"select 42::int4", &actual.i, allTypes{i: 42}},
{"select 42::int8", &actual.i, allTypes{i: 42}},
{"select -42::int2", &actual.i, allTypes{i: -42}},
{"select -42::int4", &actual.i, allTypes{i: -42}},
{"select -42::int8", &actual.i, allTypes{i: -42}},
// Check any integer type where value is within Go:int8 range can be decoded
{"select 42::int2", &actual.i8, allTypes{i8: 42}},
{"select 42::int4", &actual.i8, allTypes{i8: 42}},
{"select 42::int8", &actual.i8, allTypes{i8: 42}},
{"select -42::int2", &actual.i8, allTypes{i8: -42}},
{"select -42::int4", &actual.i8, allTypes{i8: -42}},
{"select -42::int8", &actual.i8, allTypes{i8: -42}},
// Check any integer type where value is within Go:int16 range can be decoded
{"select 42::int2", &actual.i16, allTypes{i16: 42}},
{"select 42::int4", &actual.i16, allTypes{i16: 42}},
{"select 42::int8", &actual.i16, allTypes{i16: 42}},
{"select -42::int2", &actual.i16, allTypes{i16: -42}},
{"select -42::int4", &actual.i16, allTypes{i16: -42}},
{"select -42::int8", &actual.i16, allTypes{i16: -42}},
// Check any integer type where value is within Go:int32 range can be decoded
{"select 42::int2", &actual.i32, allTypes{i32: 42}},
{"select 42::int4", &actual.i32, allTypes{i32: 42}},
{"select 42::int8", &actual.i32, allTypes{i32: 42}},
{"select -42::int2", &actual.i32, allTypes{i32: -42}},
{"select -42::int4", &actual.i32, allTypes{i32: -42}},
{"select -42::int8", &actual.i32, allTypes{i32: -42}},
// Check any integer type where value is within Go:int64 range can be decoded
{"select 42::int2", &actual.i64, allTypes{i64: 42}},
{"select 42::int4", &actual.i64, allTypes{i64: 42}},
{"select 42::int8", &actual.i64, allTypes{i64: 42}},
{"select -42::int2", &actual.i64, allTypes{i64: -42}},
{"select -42::int4", &actual.i64, allTypes{i64: -42}},
{"select -42::int8", &actual.i64, allTypes{i64: -42}},
// Check any integer type where value is within Go:uint range can be decoded
{"select 128::int2", &actual.ui, allTypes{ui: 128}},
{"select 128::int4", &actual.ui, allTypes{ui: 128}},
{"select 128::int8", &actual.ui, allTypes{ui: 128}},
// Check any integer type where value is within Go:uint8 range can be decoded
{"select 128::int2", &actual.ui8, allTypes{ui8: 128}},
{"select 128::int4", &actual.ui8, allTypes{ui8: 128}},
{"select 128::int8", &actual.ui8, allTypes{ui8: 128}},
// Check any integer type where value is within Go:uint16 range can be decoded
{"select 42::int2", &actual.ui16, allTypes{ui16: 42}},
{"select 32768::int4", &actual.ui16, allTypes{ui16: 32768}},
{"select 32768::int8", &actual.ui16, allTypes{ui16: 32768}},
// Check any integer type where value is within Go:uint32 range can be decoded
{"select 42::int2", &actual.ui32, allTypes{ui32: 42}},
{"select 42::int4", &actual.ui32, allTypes{ui32: 42}},
{"select 2147483648::int8", &actual.ui32, allTypes{ui32: 2147483648}},
// Check any integer type where value is within Go:uint64 range can be decoded
{"select 42::int2", &actual.ui64, allTypes{ui64: 42}},
{"select 42::int4", &actual.ui64, allTypes{ui64: 42}},
{"select 42::int8", &actual.ui64, allTypes{ui64: 42}},
}
for i, tt := range successfulDecodeTests {
actual = zero
err := conn.QueryRow(context.Background(), tt.sql).Scan(tt.scanArg)
if err != nil {
t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
continue
}
if actual != tt.expected {
t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
}
ensureConnValid(t, conn)
}
failedDecodeTests := []struct {
sql string
scanArg interface{}
expectedErr string
}{
// Check any integer type where value is outside Go:int8 range cannot be decoded
{"select 128::int2", &actual.i8, "is greater than"},
{"select 128::int4", &actual.i8, "is greater than"},
{"select 128::int8", &actual.i8, "is greater than"},
{"select -129::int2", &actual.i8, "is less than"},
{"select -129::int4", &actual.i8, "is less than"},
{"select -129::int8", &actual.i8, "is less than"},
// Check any integer type where value is outside Go:int16 range cannot be decoded
{"select 32768::int4", &actual.i16, "is greater than"},
{"select 32768::int8", &actual.i16, "is greater than"},
{"select -32769::int4", &actual.i16, "is less than"},
{"select -32769::int8", &actual.i16, "is less than"},
// Check any integer type where value is outside Go:int32 range cannot be decoded
{"select 2147483648::int8", &actual.i32, "is greater than"},
{"select -2147483649::int8", &actual.i32, "is less than"},
// Check any integer type where value is outside Go:uint range cannot be decoded
{"select -1::int2", &actual.ui, "is less than"},
{"select -1::int4", &actual.ui, "is less than"},
{"select -1::int8", &actual.ui, "is less than"},
// Check any integer type where value is outside Go:uint8 range cannot be decoded
{"select 256::int2", &actual.ui8, "is greater than"},
{"select 256::int4", &actual.ui8, "is greater than"},
{"select 256::int8", &actual.ui8, "is greater than"},
{"select -1::int2", &actual.ui8, "is less than"},
{"select -1::int4", &actual.ui8, "is less than"},
{"select -1::int8", &actual.ui8, "is less than"},
// Check any integer type where value is outside Go:uint16 cannot be decoded
{"select 65536::int4", &actual.ui16, "is greater than"},
{"select 65536::int8", &actual.ui16, "is greater than"},
{"select -1::int2", &actual.ui16, "is less than"},
{"select -1::int4", &actual.ui16, "is less than"},
{"select -1::int8", &actual.ui16, "is less than"},
// Check any integer type where value is outside Go:uint32 range cannot be decoded
{"select 4294967296::int8", &actual.ui32, "is greater than"},
{"select -1::int2", &actual.ui32, "is less than"},
{"select -1::int4", &actual.ui32, "is less than"},
{"select -1::int8", &actual.ui32, "is less than"},
// Check any integer type where value is outside Go:uint64 range cannot be decoded
{"select -1::int2", &actual.ui64, "is less than"},
{"select -1::int4", &actual.ui64, "is less than"},
{"select -1::int8", &actual.ui64, "is less than"},
}
for i, tt := range failedDecodeTests {
err := conn.QueryRow(context.Background(), tt.sql).Scan(tt.scanArg)
if err == nil {
t.Errorf("%d. Expected failure to decode, but unexpectedly succeeded: %v (sql -> %v)", i, err, tt.sql)
} else if !strings.Contains(err.Error(), tt.expectedErr) {
t.Errorf("%d. Expected failure to decode, but got: %v (sql -> %v)", i, err, tt.sql)
}
ensureConnValid(t, conn)
}
}
func TestQueryRowCoreByteSlice(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
tests := []struct {
sql string
queryArg interface{}
expected []byte
}{
{"select $1::text", "Jack", []byte("Jack")},
{"select $1::text", []byte("Jack"), []byte("Jack")},
{"select $1::varchar", []byte("Jack"), []byte("Jack")},
{"select $1::bytea", []byte{0, 15, 255, 17}, []byte{0, 15, 255, 17}},
}
for i, tt := range tests {
var actual []byte
err := conn.QueryRow(context.Background(), tt.sql, tt.queryArg).Scan(&actual)
if err != nil {
t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
}
if !bytes.Equal(actual, tt.expected) {
t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
}
ensureConnValid(t, conn)
}
}
func TestQueryRowErrors(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
type allTypes struct {
i16 int16
i int
s string
}
var actual, zero allTypes
tests := []struct {
sql string
queryArgs []interface{}
scanArgs []interface{}
err string
}{
// {"select $1::badtype", []interface{}{"Jack"}, []interface{}{&actual.i16}, `type "badtype" does not exist`},
// {"SYNTAX ERROR", []interface{}{}, []interface{}{&actual.i16}, "SQLSTATE 42601"},
{"select $1::text", []interface{}{"Jack"}, []interface{}{&actual.i16}, "unable to assign"},
// {"select $1::point", []interface{}{int(705)}, []interface{}{&actual.s}, "cannot convert 705 to Point"},
}
for i, tt := range tests {
actual = zero
err := conn.QueryRow(context.Background(), tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
if err == nil {
t.Errorf("%d. Unexpected success (sql -> %v, queryArgs -> %v)", i, tt.sql, tt.queryArgs)
}
if err != nil && !strings.Contains(err.Error(), tt.err) {
t.Errorf("%d. Expected error to contain %s, but got %v (sql -> %v, queryArgs -> %v)", i, tt.err, err, tt.sql, tt.queryArgs)
}
ensureConnValid(t, conn)
}
}
func TestQueryRowNoResults(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var n int32
err := conn.QueryRow(context.Background(), "select 1 where 1=0").Scan(&n)
if err != pgx.ErrNoRows {
t.Errorf("Expected pgx.ErrNoRows, got %v", err)
}
ensureConnValid(t, conn)
}
func TestQueryRowEmptyQuery(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
var n int32
err := conn.QueryRow(ctx, "").Scan(&n)
require.Error(t, err)
require.False(t, pgconn.Timeout(err))
ensureConnValid(t, conn)
}
func TestReadingValueAfterEmptyArray(t *testing.T) {
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var a []string
var b int32
err := conn.QueryRow(context.Background(), "select '{}'::text[], 42::integer").Scan(&a, &b)
if err != nil {
t.Fatalf("conn.QueryRow failed: %v", err)
}
if len(a) != 0 {
t.Errorf("Expected 'a' to have length 0, but it was: %d", len(a))
}
if b != 42 {
t.Errorf("Expected 'b' to 42, but it was: %d", b)
}
}
func TestReadingNullByteArray(t *testing.T) {
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var a []byte
err := conn.QueryRow(context.Background(), "select null::text").Scan(&a)
if err != nil {
t.Fatalf("conn.QueryRow failed: %v", err)
}
if a != nil {
t.Errorf("Expected 'a' to be nil, but it was: %v", a)
}
}
func TestReadingNullByteArrays(t *testing.T) {
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
rows, err := conn.Query(context.Background(), "select null::text union all select null::text")
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
count := 0
for rows.Next() {
count++
var a []byte
if err := rows.Scan(&a); err != nil {
t.Fatalf("failed to scan row: %v", err)
}
if a != nil {
t.Errorf("Expected 'a' to be nil, but it was: %v", a)
}
}
if count != 2 {
t.Errorf("Expected to read 2 rows, read: %d", count)
}
}
// Use github.com/shopspring/decimal as real-world database/sql custom type
// to test against.
func TestConnQueryDatabaseSQLScanner(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
var num decimal.Decimal
err := conn.QueryRow(context.Background(), "select '1234.567'::decimal").Scan(&num)
if err != nil {
t.Fatalf("Scan failed: %v", err)
}
expected, err := decimal.NewFromString("1234.567")
if err != nil {
t.Fatal(err)
}
if !num.Equals(expected) {
t.Errorf("Expected num to be %v, but it was %v", expected, num)
}
ensureConnValid(t, conn)
}
// Use github.com/shopspring/decimal as real-world database/sql custom type
// to test against.
func TestConnQueryDatabaseSQLDriverValuer(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
expected, err := decimal.NewFromString("1234.567")
if err != nil {
t.Fatal(err)
}
var num decimal.Decimal
err = conn.QueryRow(context.Background(), "select $1::decimal", &expected).Scan(&num)
if err != nil {
t.Fatalf("Scan failed: %v", err)
}
if !num.Equals(expected) {
t.Errorf("Expected num to be %v, but it was %v", expected, num)
}
ensureConnValid(t, conn)
}
// https://github.com/jackc/pgx/issues/339
func TestConnQueryDatabaseSQLDriverValuerWithAutoGeneratedPointerReceiver(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
mustExec(t, conn, "create temporary table t(n numeric)")
var d *apd.Decimal
commandTag, err := conn.Exec(context.Background(), `insert into t(n) values($1)`, d)
if err != nil {
t.Fatal(err)
}
if string(commandTag) != "INSERT 0 1" {
t.Fatalf("want %s, got %s", "INSERT 0 1", commandTag)
}
ensureConnValid(t, conn)
}
func TestConnQueryDatabaseSQLDriverValuerWithBinaryPgTypeThatAcceptsSameType(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
conn.ConnInfo().RegisterDataType(pgtype.DataType{
Value: &gofrs.UUID{},
Name: "uuid",
OID: 2950,
})
expected, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
if err != nil {
t.Fatal(err)
}
var u2 uuid.UUID
err = conn.QueryRow(context.Background(), "select $1::uuid", expected).Scan(&u2)
if err != nil {
t.Fatalf("Scan failed: %v", err)
}
if expected != u2 {
t.Errorf("Expected u2 to be %v, but it was %v", expected, u2)
}
ensureConnValid(t, conn)
}
func TestConnQueryDatabaseSQLNullX(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
type row struct {
boolValid sql.NullBool
boolNull sql.NullBool
int64Valid sql.NullInt64
int64Null sql.NullInt64
float64Valid sql.NullFloat64
float64Null sql.NullFloat64
stringValid sql.NullString
stringNull sql.NullString
}
expected := row{
boolValid: sql.NullBool{Bool: true, Valid: true},
int64Valid: sql.NullInt64{Int64: 123, Valid: true},
float64Valid: sql.NullFloat64{Float64: 3.14, Valid: true},
stringValid: sql.NullString{String: "pgx", Valid: true},
}
var actual row
err := conn.QueryRow(
context.Background(),
"select $1::bool, $2::bool, $3::int8, $4::int8, $5::float8, $6::float8, $7::text, $8::text",
expected.boolValid,
expected.boolNull,
expected.int64Valid,
expected.int64Null,
expected.float64Valid,
expected.float64Null,
expected.stringValid,
expected.stringNull,
).Scan(
&actual.boolValid,
&actual.boolNull,
&actual.int64Valid,
&actual.int64Null,
&actual.float64Valid,
&actual.float64Null,
&actual.stringValid,
&actual.stringNull,
)
if err != nil {
t.Fatalf("Scan failed: %v", err)
}
if expected != actual {
t.Errorf("Expected %v, but got %v", expected, actual)
}
ensureConnValid(t, conn)
}
func TestQueryContextSuccess(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
rows, err := conn.Query(ctx, "select 42::integer")
if err != nil {
t.Fatal(err)
}
var result, rowCount int
for rows.Next() {
err = rows.Scan(&result)
if err != nil {
t.Fatal(err)
}
rowCount++
}
if rows.Err() != nil {
t.Fatal(rows.Err())
}
if rowCount != 1 {
t.Fatalf("Expected 1 row, got %d", rowCount)
}
if result != 42 {
t.Fatalf("Expected result 42, got %d", result)
}
ensureConnValid(t, conn)
}
func TestQueryContextErrorWhileReceivingRows(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server uses numeric instead of int")
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
rows, err := conn.Query(ctx, "select 10/(10-n) from generate_series(1, 100) n")
if err != nil {
t.Fatal(err)
}
var result, rowCount int
for rows.Next() {
err = rows.Scan(&result)
if err != nil {
t.Fatal(err)
}
rowCount++
}
if rows.Err() == nil || rows.Err().Error() != "ERROR: division by zero (SQLSTATE 22012)" {
t.Fatalf("Expected division by zero error, but got %v", rows.Err())
}
if rowCount != 9 {
t.Fatalf("Expected 9 rows, got %d", rowCount)
}
if result != 10 {
t.Fatalf("Expected result 10, got %d", result)
}
ensureConnValid(t, conn)
}
func TestQueryRowContextSuccess(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var result int
err := conn.QueryRow(ctx, "select 42::integer").Scan(&result)
if err != nil {
t.Fatal(err)
}
if result != 42 {
t.Fatalf("Expected result 42, got %d", result)
}
ensureConnValid(t, conn)
}
func TestQueryRowContextErrorWhileReceivingRow(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var result int
err := conn.QueryRow(ctx, "select 10/0").Scan(&result)
if err == nil || err.Error() != "ERROR: division by zero (SQLSTATE 22012)" {
t.Fatalf("Expected division by zero error, but got %v", err)
}
ensureConnValid(t, conn)
}
func TestQueryCloseBefore(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
closeConn(t, conn)
_, err := conn.Query(context.Background(), "select 1")
require.Error(t, err)
assert.True(t, pgconn.SafeToRetry(err))
}
func TestScanRow(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
resultReader := conn.PgConn().ExecParams(context.Background(), "select generate_series(1,$1)", [][]byte{[]byte("10")}, nil, nil, nil)
var sum, rowCount int32
for resultReader.NextRow() {
var n int32
err := pgx.ScanRow(conn.ConnInfo(), resultReader.FieldDescriptions(), resultReader.Values(), &n)
assert.NoError(t, err)
sum += n
rowCount++
}
_, err := resultReader.Close()
require.NoError(t, err)
assert.EqualValues(t, 10, rowCount)
assert.EqualValues(t, 55, sum)
}
func TestConnSimpleProtocol(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
// Test all supported low-level types
{
expected := int64(42)
var actual int64
err := conn.QueryRow(
context.Background(),
"select $1::int8",
pgx.QuerySimpleProtocol(true),
expected,
).Scan(&actual)
if err != nil {
t.Error(err)
}
if expected != actual {
t.Errorf("expected %v got %v", expected, actual)
}
}
{
expected := float64(1.23)
var actual float64
err := conn.QueryRow(
context.Background(),
"select $1::float8",
pgx.QuerySimpleProtocol(true),
expected,
).Scan(&actual)
if err != nil {
t.Error(err)
}
if expected != actual {
t.Errorf("expected %v got %v", expected, actual)
}
}
{
expected := true
var actual bool
err := conn.QueryRow(
context.Background(),
"select $1",
pgx.QuerySimpleProtocol(true),
expected,
).Scan(&actual)
if err != nil {
t.Error(err)
}
if expected != actual {
t.Errorf("expected %v got %v", expected, actual)
}
}
{
expected := []byte{0, 1, 20, 35, 64, 80, 120, 3, 255, 240, 128, 95}
var actual []byte
err := conn.QueryRow(
context.Background(),
"select $1::bytea",
pgx.QuerySimpleProtocol(true),
expected,
).Scan(&actual)
if err != nil {
t.Error(err)
}
if bytes.Compare(actual, expected) != 0 {
t.Errorf("expected %v got %v", expected, actual)
}
}
{
expected := "test"
var actual string
err := conn.QueryRow(
context.Background(),
"select $1::text",
pgx.QuerySimpleProtocol(true),
expected,
).Scan(&actual)
if err != nil {
t.Error(err)
}
if expected != actual {
t.Errorf("expected %v got %v", expected, actual)
}
}
{
tests := []struct {
expected []string
}{
{[]string(nil)},
{[]string{}},
{[]string{"test", "foo", "bar"}},
{[]string{`foo'bar"\baz;quz`, `foo'bar"\baz;quz`}},
}
for i, tt := range tests {
var actual []string
err := conn.QueryRow(
context.Background(),
"select $1::text[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []int16
}{
{[]int16(nil)},
{[]int16{}},
{[]int16{1, 2, 3}},
}
for i, tt := range tests {
var actual []int16
err := conn.QueryRow(
context.Background(),
"select $1::smallint[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []int32
}{
{[]int32(nil)},
{[]int32{}},
{[]int32{1, 2, 3}},
}
for i, tt := range tests {
var actual []int32
err := conn.QueryRow(
context.Background(),
"select $1::int[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []int64
}{
{[]int64(nil)},
{[]int64{}},
{[]int64{1, 2, 3}},
}
for i, tt := range tests {
var actual []int64
err := conn.QueryRow(
context.Background(),
"select $1::bigint[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []int
}{
{[]int(nil)},
{[]int{}},
{[]int{1, 2, 3}},
}
for i, tt := range tests {
var actual []int
err := conn.QueryRow(
context.Background(),
"select $1::bigint[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []uint16
}{
{[]uint16(nil)},
{[]uint16{}},
{[]uint16{1, 2, 3}},
}
for i, tt := range tests {
var actual []uint16
err := conn.QueryRow(
context.Background(),
"select $1::smallint[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []uint32
}{
{[]uint32(nil)},
{[]uint32{}},
{[]uint32{1, 2, 3}},
}
for i, tt := range tests {
var actual []uint32
err := conn.QueryRow(
context.Background(),
"select $1::bigint[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []uint64
}{
{[]uint64(nil)},
{[]uint64{}},
{[]uint64{1, 2, 3}},
}
for i, tt := range tests {
var actual []uint64
err := conn.QueryRow(
context.Background(),
"select $1::bigint[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []uint
}{
{[]uint(nil)},
{[]uint{}},
{[]uint{1, 2, 3}},
}
for i, tt := range tests {
var actual []uint
err := conn.QueryRow(
context.Background(),
"select $1::bigint[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []float32
}{
{[]float32(nil)},
{[]float32{}},
{[]float32{1, 2, 3}},
}
for i, tt := range tests {
var actual []float32
err := conn.QueryRow(
context.Background(),
"select $1::float4[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
{
tests := []struct {
expected []float64
}{
{[]float64(nil)},
{[]float64{}},
{[]float64{1, 2, 3}},
}
for i, tt := range tests {
var actual []float64
err := conn.QueryRow(
context.Background(),
"select $1::float8[]",
pgx.QuerySimpleProtocol(true),
tt.expected,
).Scan(&actual)
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
}
// Test high-level type
{
if conn.PgConn().ParameterStatus("crdb_version") == "" {
// CockroachDB doesn't support circle type.
expected := pgtype.Circle{P: pgtype.Vec2{1, 2}, R: 1.5, Status: pgtype.Present}
actual := expected
err := conn.QueryRow(
context.Background(),
"select $1::circle",
pgx.QuerySimpleProtocol(true),
&expected,
).Scan(&actual)
if err != nil {
t.Error(err)
}
if expected != actual {
t.Errorf("expected %v got %v", expected, actual)
}
}
}
// Test multiple args in single query
{
expectedInt64 := int64(234423)
expectedFloat64 := float64(-0.2312)
expectedBool := true
expectedBytes := []byte{255, 0, 23, 16, 87, 45, 9, 23, 45, 223}
expectedString := "test"
var actualInt64 int64
var actualFloat64 float64
var actualBool bool
var actualBytes []byte
var actualString string
err := conn.QueryRow(
context.Background(),
"select $1::int8, $2::float8, $3, $4::bytea, $5::text",
pgx.QuerySimpleProtocol(true),
expectedInt64, expectedFloat64, expectedBool, expectedBytes, expectedString,
).Scan(&actualInt64, &actualFloat64, &actualBool, &actualBytes, &actualString)
if err != nil {
t.Error(err)
}
if expectedInt64 != actualInt64 {
t.Errorf("expected %v got %v", expectedInt64, actualInt64)
}
if expectedFloat64 != actualFloat64 {
t.Errorf("expected %v got %v", expectedFloat64, actualFloat64)
}
if expectedBool != actualBool {
t.Errorf("expected %v got %v", expectedBool, actualBool)
}
if bytes.Compare(expectedBytes, actualBytes) != 0 {
t.Errorf("expected %v got %v", expectedBytes, actualBytes)
}
if expectedString != actualString {
t.Errorf("expected %v got %v", expectedString, actualString)
}
}
// Test dangerous cases
{
expected := "foo';drop table users;"
var actual string
err := conn.QueryRow(
context.Background(),
"select $1",
pgx.QuerySimpleProtocol(true),
expected,
).Scan(&actual)
if err != nil {
t.Error(err)
}
if expected != actual {
t.Errorf("expected %v got %v", expected, actual)
}
}
ensureConnValid(t, conn)
}
func TestConnSimpleProtocolRefusesNonUTF8ClientEncoding(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server does not support changing client_encoding (https://www.cockroachlabs.com/docs/stable/set-vars.html)")
mustExec(t, conn, "set client_encoding to 'SQL_ASCII'")
var expected string
err := conn.QueryRow(
context.Background(),
"select $1",
pgx.QuerySimpleProtocol(true),
"test",
).Scan(&expected)
if err == nil {
t.Error("expected error when client_encoding not UTF8, but no error occurred")
}
ensureConnValid(t, conn)
}
func TestConnSimpleProtocolRefusesNonStandardConformingStrings(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server does not support standard_conforming_strings = off (https://github.com/cockroachdb/cockroach/issues/36215)")
mustExec(t, conn, "set standard_conforming_strings to off")
var expected string
err := conn.QueryRow(
context.Background(),
"select $1",
pgx.QuerySimpleProtocol(true),
`\'; drop table users; --`,
).Scan(&expected)
if err == nil {
t.Error("expected error when standard_conforming_strings is off, but no error occurred")
}
ensureConnValid(t, conn)
}
func TestQueryStatementCacheModes(t *testing.T) {
t.Parallel()
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
tests := []struct {
name string
buildStatementCache pgx.BuildStatementCacheFunc
}{
{
name: "disabled",
buildStatementCache: nil,
},
{
name: "prepare",
buildStatementCache: func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
},
},
{
name: "describe",
buildStatementCache: func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
},
},
}
for _, tt := range tests {
func() {
config.BuildStatementCache = tt.buildStatementCache
conn := mustConnect(t, config)
defer closeConn(t, conn)
var n int
err := conn.QueryRow(context.Background(), "select 1").Scan(&n)
assert.NoError(t, err, tt.name)
assert.Equal(t, 1, n, tt.name)
err = conn.QueryRow(context.Background(), "select 2").Scan(&n)
assert.NoError(t, err, tt.name)
assert.Equal(t, 2, n, tt.name)
err = conn.QueryRow(context.Background(), "select 1").Scan(&n)
assert.NoError(t, err, tt.name)
assert.Equal(t, 1, n, tt.name)
ensureConnValid(t, conn)
}()
}
}
// https://github.com/jackc/pgx/issues/895
func TestQueryErrorWithNilStatementCacheMode(t *testing.T) {
t.Parallel()
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = nil
conn := mustConnect(t, config)
defer closeConn(t, conn)
_, err := conn.Exec(context.Background(), "create temporary table t_unq(id text primary key);")
require.NoError(t, err)
_, err = conn.Exec(context.Background(), "insert into t_unq (id) values ($1)", "abc")
require.NoError(t, err)
rows, err := conn.Query(context.Background(), "insert into t_unq (id) values ($1)", "abc")
require.NoError(t, err)
rows.Close()
err = rows.Err()
require.Error(t, err)
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
assert.Equal(t, "23505", pgErr.Code)
} else {
t.Errorf("err is not a *pgconn.PgError: %T", err)
}
ensureConnValid(t, conn)
}
func TestConnQueryFunc(t *testing.T) {
t.Parallel()
testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) {
var actualResults []interface{}
var a, b int
ct, err := conn.QueryFunc(
context.Background(),
"select n, n * 2 from generate_series(1, $1) n",
[]interface{}{3},
[]interface{}{&a, &b},
func(pgx.QueryFuncRow) error {
actualResults = append(actualResults, []interface{}{a, b})
return nil
},
)
require.NoError(t, err)
expectedResults := []interface{}{
[]interface{}{1, 2},
[]interface{}{2, 4},
[]interface{}{3, 6},
}
require.Equal(t, expectedResults, actualResults)
require.EqualValues(t, 3, ct.RowsAffected())
})
}
func TestConnQueryFuncScanError(t *testing.T) {
t.Parallel()
testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) {
var actualResults []interface{}
var a, b int
ct, err := conn.QueryFunc(
context.Background(),
"select 'foo', 'bar' from generate_series(1, $1) n",
[]interface{}{3},
[]interface{}{&a, &b},
func(pgx.QueryFuncRow) error {
actualResults = append(actualResults, []interface{}{a, b})
return nil
},
)
require.EqualError(t, err, "can't scan into dest[0]: unable to assign to *int")
require.Nil(t, ct)
})
}
func TestConnQueryFuncAbort(t *testing.T) {
t.Parallel()
testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) {
var a, b int
ct, err := conn.QueryFunc(
context.Background(),
"select n, n * 2 from generate_series(1, $1) n",
[]interface{}{3},
[]interface{}{&a, &b},
func(pgx.QueryFuncRow) error {
return errors.New("abort")
},
)
require.EqualError(t, err, "abort")
require.Nil(t, ct)
})
}
func ExampleConn_QueryFunc() {
conn, err := pgx.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
fmt.Printf("Unable to establish connection: %v", err)
return
}
var a, b int
_, err = conn.QueryFunc(
context.Background(),
"select n, n * 2 from generate_series(1, $1) n",
[]interface{}{3},
[]interface{}{&a, &b},
func(pgx.QueryFuncRow) error {
fmt.Printf("%v, %v\n", a, b)
return nil
},
)
if err != nil {
fmt.Printf("QueryFunc error: %v", err)
return
}
// Output:
// 1, 2
// 2, 4
// 3, 6
}
| [
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\""
]
| []
| [
"PGX_TEST_DATABASE"
]
| [] | ["PGX_TEST_DATABASE"] | go | 1 | 0 | |
charmhelpers/contrib/charmsupport/volumes.py | # Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Functions for managing volumes in juju units. One volume is supported per unit.
Subordinates may have their own storage, provided it is on its own partition.
Configuration stanzas::
volume-ephemeral:
type: boolean
default: true
description: >
If false, a volume is mounted as specified in "volume-map"
If true, ephemeral storage will be used, meaning that log data
will only exist as long as the machine. YOU HAVE BEEN WARNED.
volume-map:
type: string
default: {}
description: >
YAML map of units to device names, e.g:
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
Service units will raise a configure-error if volume-ephemeral
is 'true' and no volume-map value is set. Use 'juju set' to set a
value and 'juju resolved' to complete configuration.
Usage::
from charmsupport.volumes import configure_volume, VolumeConfigurationError
from charmsupport.hookenv import log, ERROR
def post_mount_hook():
stop_service('myservice')
def post_mount_hook():
start_service('myservice')
if __name__ == '__main__':
try:
configure_volume(before_change=pre_mount_hook,
after_change=post_mount_hook)
except VolumeConfigurationError:
log('Storage could not be configured', ERROR)
'''
# XXX: Known limitations
# - fstab is neither consulted nor updated
import os
from charmhelpers.core import hookenv
from charmhelpers.core import host
import yaml
MOUNT_BASE = '/srv/juju/volumes'
class VolumeConfigurationError(Exception):
'''Volume configuration data is missing or invalid'''
pass
def get_config():
'''Gather and sanity-check volume configuration data'''
volume_config = {}
config = hookenv.config()
errors = False
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
volume_config['ephemeral'] = True
else:
volume_config['ephemeral'] = False
try:
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
except yaml.YAMLError as e:
hookenv.log("Error parsing YAML volume-map: {}".format(e),
hookenv.ERROR)
errors = True
if volume_map is None:
# probably an empty string
volume_map = {}
elif not isinstance(volume_map, dict):
hookenv.log("Volume-map should be a dictionary, not {}".format(
type(volume_map)))
errors = True
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
if volume_config['device'] and volume_config['ephemeral']:
# asked for ephemeral storage but also defined a volume ID
hookenv.log('A volume is defined for this unit, but ephemeral '
'storage was requested', hookenv.ERROR)
errors = True
elif not volume_config['device'] and not volume_config['ephemeral']:
# asked for permanent storage but did not define volume ID
hookenv.log('Ephemeral storage was requested, but there is no volume '
'defined for this unit.', hookenv.ERROR)
errors = True
unit_mount_name = hookenv.local_unit().replace('/', '-')
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
if errors:
return None
return volume_config
def mount_volume(config):
if os.path.exists(config['mountpoint']):
if not os.path.isdir(config['mountpoint']):
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
raise VolumeConfigurationError()
else:
host.mkdir(config['mountpoint'])
if os.path.ismount(config['mountpoint']):
unmount_volume(config)
if not host.mount(config['device'], config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def unmount_volume(config):
if os.path.ismount(config['mountpoint']):
if not host.umount(config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def managed_mounts():
'''List of all mounted managed volumes'''
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
def configure_volume(before_change=lambda: None, after_change=lambda: None):
'''Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
'''
config = get_config()
if not config:
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
raise VolumeConfigurationError()
if config['ephemeral']:
if os.path.ismount(config['mountpoint']):
before_change()
unmount_volume(config)
after_change()
return 'ephemeral'
else:
# persistent storage
if os.path.ismount(config['mountpoint']):
mounts = dict(managed_mounts())
if mounts.get(config['mountpoint']) != config['device']:
before_change()
unmount_volume(config)
mount_volume(config)
after_change()
else:
before_change()
mount_volume(config)
after_change()
return config['mountpoint']
| []
| []
| [
"JUJU_UNIT_NAME"
]
| [] | ["JUJU_UNIT_NAME"] | python | 1 | 0 | |
metricbeat/module/kibana/mtest/testing.go | package mtest
import (
"net"
"os"
)
// GetEnvHost returns host for Kibana
func GetEnvHost() string {
host := os.Getenv("KIBANA_HOST")
if len(host) == 0 {
host = "127.0.0.1"
}
return host
}
// GetEnvPort returns port for Kibana
func GetEnvPort() string {
port := os.Getenv("KIBANA_PORT")
if len(port) == 0 {
port = "5601"
}
return port
}
// GetConfig returns config for kibana module
func GetConfig(metricset string) map[string]interface{} {
return map[string]interface{}{
"module": "kibana",
"metricsets": []string{metricset},
"hosts": []string{net.JoinHostPort(GetEnvHost(), GetEnvPort())},
}
}
| [
"\"KIBANA_HOST\"",
"\"KIBANA_PORT\""
]
| []
| [
"KIBANA_PORT",
"KIBANA_HOST"
]
| [] | ["KIBANA_PORT", "KIBANA_HOST"] | go | 2 | 0 | |
api/app/app.go | package app
import (
"fmt"
"log"
"net/http"
"os"
"github.com/drklee3/polls-api/api/app/handler"
"github.com/drklee3/polls-api/api/app/model"
"github.com/drklee3/polls-api/api/config"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/jinzhu/gorm"
"github.com/justinas/alice"
)
// App has router and db instances
type App struct {
Router *mux.Router
DB *gorm.DB
}
// Initialize initializes the app with predefined configuration
func (a *App) Initialize(config *config.Config) {
dbURI := fmt.Sprintf("postgres://%s:%s@%s/%s",
config.DB.Username,
config.DB.Password,
config.DB.Host,
config.DB.Dbname)
db, err := gorm.Open("postgres", dbURI)
db.LogMode(config.DB.LogMode)
if err != nil {
log.Fatal("Could not connect database", err.Error())
}
// test connection
_, err = db.Raw("SELECT 1 + 1 AS result").Rows()
if err != nil {
log.Fatalf("Failed to ping DB: %s", err)
}
// run migrations
a.DB = model.DBMigrate(db)
a.Router = mux.NewRouter()
a.setRouters()
}
// setRouters sets the all required routers
func (a *App) setRouters() {
// Routing for handling the polls
a.Get("/polls", a.GetAllPolls)
a.Post("/polls", a.CreatePoll)
a.Get("/polls/{id:[0-9]+}", a.GetPoll)
a.Put("/polls/{id:[0-9]+}", a.UpdatePoll)
a.Post("/polls/{id:[0-9]+}/vote", a.VotePoll)
a.Delete("/polls/{id:[0-9]+}", a.DeletePoll)
a.Put("/polls/{id:[0-9]+}/archive", a.ArchivePoll)
a.Delete("/polls/{id:[0-9]+}/archive", a.RestorePoll)
}
// Get wraps the router for GET method
func (a *App) Get(path string, f func(w http.ResponseWriter, r *http.Request)) {
a.Router.HandleFunc(path, f).Methods("GET")
}
// Post wraps the router for POST method
func (a *App) Post(path string, f func(w http.ResponseWriter, r *http.Request)) {
a.Router.HandleFunc(path, f).Methods("POST")
}
// Put wraps the router for PUT method
func (a *App) Put(path string, f func(w http.ResponseWriter, r *http.Request)) {
a.Router.HandleFunc(path, f).Methods("PUT")
}
// Delete wraps the router for DELETE method
func (a *App) Delete(path string, f func(w http.ResponseWriter, r *http.Request)) {
a.Router.HandleFunc(path, f).Methods("DELETE")
}
/*
** Polls Handlers
*/
// GetAllPolls gets all the polls
func (a *App) GetAllPolls(w http.ResponseWriter, r *http.Request) {
handler.GetAllPolls(a.DB, w, r)
}
// CreatePoll creates a new poll
func (a *App) CreatePoll(w http.ResponseWriter, r *http.Request) {
handler.CreatePoll(a.DB, w, r)
}
// GetPoll gets a single poll
func (a *App) GetPoll(w http.ResponseWriter, r *http.Request) {
handler.GetPoll(a.DB, w, r)
}
// VotePoll creates a vote on a poll
func (a *App) VotePoll(w http.ResponseWriter, r *http.Request) {
handler.VotePoll(a.DB, w, r)
}
// UpdatePoll updates a single poll
func (a *App) UpdatePoll(w http.ResponseWriter, r *http.Request) {
handler.UpdatePoll(a.DB, w, r)
}
// DeletePoll deletes a single poll
func (a *App) DeletePoll(w http.ResponseWriter, r *http.Request) {
handler.DeletePoll(a.DB, w, r)
}
// ArchivePoll disables a single poll submissions
func (a *App) ArchivePoll(w http.ResponseWriter, r *http.Request) {
handler.ArchivePoll(a.DB, w, r)
}
// RestorePoll re-enables a single poll submissions
func (a *App) RestorePoll(w http.ResponseWriter, r *http.Request) {
handler.RestorePoll(a.DB, w, r)
}
// Run the app on it's router
func (a *App) Run(host string) {
log.Printf("Listening on %s", host)
originsOk := handlers.AllowedOrigins([]string{os.Getenv("ORIGIN_ALLOWED")})
chain := alice.New(handlers.CORS(originsOk)).Then(handlers.CombinedLoggingHandler(os.Stdout, a.Router))
log.Fatal(http.ListenAndServe(host, chain))
}
| [
"\"ORIGIN_ALLOWED\""
]
| []
| [
"ORIGIN_ALLOWED"
]
| [] | ["ORIGIN_ALLOWED"] | go | 1 | 0 | |
python/catkin/environment_cache.py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import ast
import os
import platform
import subprocess
import sys
def generate_environment_script(env_script):
"""
Generate script code to cache environment changes of a script.
This code assumes that the script does nothing else than changing
variables that contain colon separated lists of PATHs, by
replacing or prepending.
:param env_script: str The path to the script which changes the environment
:returns: list script lines
"""
code = []
_append_header(code)
_append_comment(code, 'based on a snapshot of the environment before and after calling the setup script')
_append_comment(code, 'it emulates the modifications of the setup script without recurring computations')
# fetch current environment
env = os.environ
# fetch environment after calling setup
python_code = 'import os; print(dict(os.environ))'
output = subprocess.check_output([env_script, sys.executable, '-c', python_code])
env_after = ast.literal_eval(output.decode('utf8'))
# calculate added and modified environment variables
added = {}
modified = {}
for key, value in env_after.items():
if key not in env:
added[key] = value
elif env[key] != value:
modified[key] = [env[key], value]
code.append('')
_append_comment(code, 'new environment variables')
for key in sorted(added.keys()):
_set_variable(code, key, added[key])
code.append('')
_append_comment(code, 'modified environment variables')
for key in sorted(modified.keys()):
(old_value, new_value) = modified[key]
if new_value.endswith(os.pathsep + old_value):
variable = ('$%s' if _is_not_windows() else '%%%s%%') % key
new_value = new_value[:-len(old_value)] + variable
if _is_not_windows():
new_value = '"%s"' % new_value
_set_variable(code, key, new_value)
else:
_set_variable(code, key, new_value)
return code
def _is_not_windows():
return platform.system() != 'Windows'
def _append_header(code):
if _is_not_windows():
code.append('#!/usr/bin/env sh')
else:
code.append('@echo off')
_append_comment(code, 'generated from catkin/python/catkin/environment_cache.py')
code.append('')
def _append_comment(code, value):
if _is_not_windows():
comment_prefix = '#'
else:
comment_prefix = 'REM'
code.append('%s %s' % (comment_prefix, value))
def _set_variable(code, key, value):
if _is_not_windows():
if not value.startswith('"') or not value.endswith('"'):
value = "'%s'" % value
code.append('export %s=%s' % (key, value))
else:
code.append('set %s=%s' % (key, value))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
day05/go/razziel89/grid.go | package main
import (
"fmt"
"os"
"strings"
)
const (
vecSep = ","
tokensPerPoint = 2
lineSep = "->"
tokensPerLine = 2
)
var (
// Set PART to "1" to select only part 1.
partSelect = os.Getenv("PART")
)
// tag::grid[]
// Vec is a 2D vector. Most of it has been taken from a previous solution.
type Vec struct {
x, y int
}
// VecFromStr converts a sring into a vector.
func VecFromStr(str string) (Vec, error) {
fields := trimStrings(strings.Split(str, vecSep))
if len(fields) != tokensPerPoint {
return Vec{}, fmt.Errorf("cannot parse %v as vector, wrong number of fields", str)
}
ints, err := strSliceToIntSlice(fields)
if err != nil {
return Vec{}, fmt.Errorf("cannot parse %s as vector, %s", str, err.Error())
}
result := Vec{
x: ints[0],
y: ints[1],
}
return result, nil
}
// Add adds one vector to another one.
func (v Vec) Add(delta Vec) Vec {
result := Vec{
x: v.x + delta.x,
y: v.y + delta.y,
}
return result
}
// Mul multiplies each component of a vector with a number.
func (v Vec) Mul(factor int) Vec {
result := Vec{
x: v.x * factor,
y: v.y * factor,
}
return result
}
// Inv inverts a vector.
func (v Vec) Inv() Vec {
return v.Mul(-1)
}
// Sub subtracts a vector'v data from another one'v.
func (v Vec) Sub(delta Vec) Vec {
return v.Add(delta.Inv())
}
func abs(num int) int {
if num < 0 {
return -num
}
return num
}
func max(i1, i2 int) int {
if i1 > i2 {
return i1
}
return i2
}
// Normalize returns a unit vector with the same direction as the original vector. For now, this
// does not support diagonals.
func (v Vec) Normalize() (Vec, error) {
if partSelect == "1" {
if v.x != 0 && v.y != 0 {
return Vec{}, fmt.Errorf("cannot normalize %v", v)
}
} else {
// Default to part 2.
if v.x != 0 && v.y != 0 && abs(v.x) != abs(v.y) {
return Vec{}, fmt.Errorf("cannot normalize %v", v)
}
}
length := max(abs(v.x), abs(v.y))
norm := Vec{
x: v.x / length,
y: v.y / length,
}
return norm, nil
}
// Line is a line in 2D with a start and an end.
type Line struct {
start, end Vec
}
// LineFromStr converts a sring into a line.
func LineFromStr(str string) (Line, error) {
fields := trimStrings(strings.Split(str, lineSep))
if len(fields) != tokensPerLine {
return Line{}, fmt.Errorf("cannot parse %v as line, wrong number of fields", str)
}
start, err := VecFromStr(fields[0])
if err != nil {
return Line{}, fmt.Errorf("cannot parse %v as line, %v", str, err.Error())
}
end, err := VecFromStr(fields[1])
if err != nil {
return Line{}, fmt.Errorf("cannot parse %v as line, %v", str, err.Error())
}
result := Line{
start: start,
end: end,
}
return result, nil
}
// Points determines all points on this line.
func (l Line) Points() ([]Vec, error) {
result := []Vec{}
direction, err := l.end.Sub(l.start).Normalize()
if err != nil {
// We ignore lines whose direction we cannot determine.
return []Vec{}, nil
}
pos := l.start
for pos != l.end {
result = append(result, pos)
pos = pos.Add(direction)
}
result = append(result, pos)
return result, nil
}
// Grid is a lazily evaluated grid that supports marking points on it. Most of it has been taken
// from a previous solution.
type Grid map[Vec]int
// Mark marks a point on the grid once.
func (g *Grid) Mark(entry Vec) {
// We don't have to handle non-existing values here since Go returns the zero value (0 for
// integers) for such entries.
(*g)[entry] = (*g)[entry] + 1
}
// Count determines how often a point has been marked.
func (g *Grid) Count(entry Vec) int {
return (*g)[entry]
}
// RemoveAll removes all markings for a specific point.
func (g *Grid) RemoveAll(entry Vec) {
delete(*g, entry)
}
// FilterFn is a type that can be used for FilterCounts to filter counts that fulfil a predicate.
type FilterFn = func(int) bool
// FilterCounts allow to filter points based counts using a FilterFn.
func (g *Grid) FilterCounts(filterFn FilterFn) []Vec {
result := []Vec{}
for point, count := range *g {
if filterFn(count) {
result = append(result, point)
}
}
return result
}
// end::grid[]
| [
"\"PART\""
]
| []
| [
"PART"
]
| [] | ["PART"] | go | 1 | 0 | |
GPy/util/datasets.py | from __future__ import print_function
import csv
import os
import copy
import numpy as np
import GPy
import scipy.io
import zipfile
import tarfile
import datetime
import json
import re
import sys
from io import open
from .config import *
ipython_available=True
try:
import IPython
except ImportError:
ipython_available=False
try:
#In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used
#if available
import cPickle as pickle
except ImportError:
import pickle
#A Python2/3 import handler - urllib2 changed its name in Py3 and was also reorganised
try:
from urllib2 import urlopen
from urllib2 import URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import URLError
def reporthook(a,b,c):
# ',' at the end of the line is important!
#print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
#you can also use sys.stdout.write
sys.stdout.write("\r% 3.1f%% of %d bytes" % (min(100, float(a * b) / c * 100), c))
sys.stdout.flush()
# Global variables
data_path = os.path.expandvars(config.get('datasets', 'dir'))
#data_path = os.path.join(os.path.dirname(__file__), 'datasets')
default_seed = 10000
overide_manual_authorize=False
neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/'
# Read data resources from json file.
# Don't do this when ReadTheDocs is scanning as it breaks things
on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'data_resources.json')
json_data = open(path, encoding='utf-8').read()
data_resources = json.loads(json_data)
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'football_teams.json')
json_data = open(path, encoding='utf-8').read()
football_dict = json.loads(json_data)
def prompt_user(prompt):
"""Ask user for agreeing to data set licenses."""
# raw_input returns the empty string for "enter"
yes = set(['yes', 'y'])
no = set(['no','n'])
try:
print(prompt)
choice = input().lower()
# would like to test for exception here, but not sure if we can do that without importing IPython
except:
print('Stdin is not implemented.')
print('You need to set')
print('overide_manual_authorize=True')
print('to proceed with the download. Please set that variable and continue.')
raise
if choice in yes:
return True
elif choice in no:
return False
else:
print(("Your response was a " + choice))
print("Please respond with 'yes', 'y' or 'no', 'n'")
#return prompt_user()
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if 'save_names' in dr: zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in zip_longest(*zip_urls, fillvalue=[]):
for f, s in zip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True
def download_url(url, store_directory, save_name=None, messages=True, suffix=''):
"""Download a file from a url and save it to disk."""
i = url.rfind('/')
file = url[i+1:]
print(file)
dir_name = os.path.join(data_path, store_directory)
if save_name is None: save_name = os.path.join(dir_name, file)
else: save_name = os.path.join(dir_name, save_name)
if suffix is None: suffix=''
print("Downloading ", url, "->", save_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
response = urlopen(url+suffix)
except URLError as e:
if not hasattr(e, "code"):
raise
response = e
if response.code > 399 and response.code<500:
raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code))
elif response.code > 499:
raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code))
with open(save_name, 'wb') as f:
meta = response.info()
content_length_str = meta.get("Content-Length")
if content_length_str:
file_size = int(content_length_str)
else:
file_size = None
status = ""
file_size_dl = 0
block_sz = 8192
line_length=30
while True:
buff = response.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
sys.stdout.write(" "*(len(status)) + "\r")
if file_size:
status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.),
full=file_size/(1048576.), ll=line_length,
perc="="*int(line_length*float(file_size_dl)/file_size))
else:
status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.),
ll=line_length,
perc="."*int(line_length*float(file_size_dl/(10*1048576.))))
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write(" "*(len(status)) + "\r")
print(status)
# if we wanted to get more sophisticated maybe we should check the response code here again even for successes.
#with open(save_name, 'wb') as f:
# f.write(response.read())
#urllib.urlretrieve(url+suffix, save_name, reporthook)
def authorize_download(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set."""
print(('Acquiring resource: ' + dataset_name))
# TODO, check resource is in dictionary!
print('')
dr = data_resources[dataset_name]
print('Details of data: ')
print((dr['details']))
print('')
if dr['citation']:
print('Please cite:')
print((dr['citation']))
print('')
if dr['size']:
print(('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.'))
print('')
print(('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.'))
print('')
if overide_manual_authorize:
if dr['license']:
print('You have agreed to the following license:')
print((dr['license']))
print('')
return True
else:
if dr['license']:
print('You must also agree to the following license:')
print((dr['license']))
print('')
return prompt_user('Do you wish to proceed with the download? [yes/no]')
def download_data(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
dr = data_resources[dataset_name]
if not authorize_download(dataset_name):
raise Exception("Permission to download data set denied.")
zip_urls = (dr['urls'], dr['files'])
if 'save_names' in dr: zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
if 'suffices' in dr: zip_urls += (dr['suffices'], )
else: zip_urls += ([],)
for url, files, save_names, suffices in zip_longest(*zip_urls, fillvalue=[]):
for f, save_name, suffix in zip_longest(files, save_names, suffices, fillvalue=None):
download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix)
return True
def data_details_return(data, data_set):
"""Update the data component of the data dictionary with details drawn from the data_resources."""
data.update(data_resources[data_set])
return data
def cmu_urls_files(subj_motions, messages = True):
'''
Find which resources are missing on the local disk for the requested CMU motion capture motions.
'''
dr = data_resources['cmu_mocap_full']
cmu_url = dr['urls'][0]
subjects_num = subj_motions[0]
motions_num = subj_motions[1]
resource = {'urls' : [], 'files' : []}
# Convert numbers to strings
subjects = []
motions = [list() for _ in range(len(subjects_num))]
for i in range(len(subjects_num)):
curSubj = str(int(subjects_num[i]))
if int(subjects_num[i]) < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motions_num[i])):
curMot = str(int(motions_num[i][j]))
if int(motions_num[i][j]) < 10:
curMot = '0' + curMot
motions[i].append(curMot)
all_skels = []
assert len(subjects) == len(motions)
all_motions = []
for i in range(len(subjects)):
skel_dir = os.path.join(data_path, 'cmu_mocap')
cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf')
url_required = False
file_download = []
if not os.path.exists(cur_skel_file):
# Current skel file doesn't exist.
if not os.path.isdir(skel_dir):
os.makedirs(skel_dir)
# Add skel file to list.
url_required = True
file_download.append(subjects[i] + '.asf')
for j in range(len(motions[i])):
file_name = subjects[i] + '_' + motions[i][j] + '.amc'
cur_motion_file = os.path.join(skel_dir, file_name)
if not os.path.exists(cur_motion_file):
url_required = True
file_download.append(subjects[i] + '_' + motions[i][j] + '.amc')
if url_required:
resource['urls'].append(cmu_url + '/' + subjects[i] + '/')
resource['files'].append(file_download)
return resource
try:
import gpxpy
import gpxpy.gpx
gpxpy_available = True
except ImportError:
gpxpy_available = False
if gpxpy_available:
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4):
if not data_available(data_set):
download_data(data_set)
files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet']
X = []
for file in files:
gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r')
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [point for track in gpx.tracks for segment in track.segments for point in segment.points]
data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
#del gpxpy_available
# Some general utilities.
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
def boston_housing(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return data_details_return({'X' : X, 'Y': Y}, data_set)
def brendan_faces(data_set='brendan_faces'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'frey_rawface.mat'))
Y = mat_data['ff'].T
return data_details_return({'Y': Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'DellaGattadata.mat'))
X = np.double(mat_data['timepoints'])
if gene_number == None:
Y = mat_data['exprs_tp53_RMA']
else:
Y = mat_data['exprs_tp53_RMA'][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def football_data(season='1314', data_set='football_data'):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
def league2num(string):
league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4}
return league_dict[string]
def football2num(string):
if string in football_dict:
return football_dict[string]
else:
football_dict[string] = len(football_dict)+1
return len(football_dict)+1
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0]+=season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year>4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
from matplotlib import pyplot as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, 'rb')
output = open(writename, 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',')
X = table[:, :4]
Y = table[:, 4:]
return data_details_return({'X': X, 'Y': Y}, data_set)
def sod1_mouse(data_set='sod1_mouse'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'sod1_C57_129_exprs.csv')
Y = read_csv(filename, header=0, index_col=0)
num_repeats=4
num_time=4
num_cond=4
X = 1
return data_details_return({'X': X, 'Y': Y}, data_set)
def spellman_yeast(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
return data_details_return({'Y': Y}, data_set)
def spellman_yeast_cdc15(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
t = np.asarray([10, 30, 50, 70, 80, 90, 100, 110, 120, 130, 140, 150, 170, 180, 190, 200, 210, 220, 230, 240, 250, 270, 290])
times = ['cdc15_'+str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return data_details_return({'Y' : Y, 't': t, 'info': 'Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).'}, data_set)
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
import zipfile
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'binding_by_gene.tsv')
S = read_csv(filename, header=1, index_col=0, sep='\t')
transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed']
annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']]
S = S[transcription_factors]
return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
def fruitfly_tomancak(data_set='fruitfly_tomancak', gene_number=None):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'tomancak_exprs.csv')
Y = read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time-1, num_time)
xr = np.linspace(0, num_repeats-1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def drosophila_protein(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
Y = read_csv(filename, header=0)
return data_details_return({'Y': Y}, data_set)
def drosophila_knirps(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = read_csv(filename, header=0)
t = df['t'][:,None]
x = df['x'][:,None]
g = df['expression1'][:,None]
p = df['expression2'][:,None]
leng = x.shape[0]
T = np.vstack([t,t])
S = np.vstack([x,x])
inx = np.zeros(leng*2)[:,None]
inx[leng*2/2:leng*2]=1
X = np.hstack([T,S,inx])
Y = np.vstack([g,p])
return data_details_return({'Y': Y, 'X': X}, data_set)
# This will be for downloading google trends data.
def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends', refresh_data=False):
"""Data downloaded from Google trends for given query terms. Warning, if you use this function multiple times in a row you get blocked due to terms of service violations. The function will cache the result of your query, if you wish to refresh an old query set refresh_data to True. The function is inspired by this notebook: http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb"""
query_terms.sort()
import pandas
# Create directory name for data
dir_path = os.path.join(data_path,'google_trends')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = '-'.join(query_terms)
dir_name = dir_name.replace(' ', '_')
dir_path = os.path.join(dir_path,dir_name)
file = 'data.csv'
file_name = os.path.join(dir_path,file)
if not os.path.exists(file_name) or refresh_data:
print("Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks.")
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(urllib2.quote(term))
print("Query terms: ", ', '.join(query_terms))
print("Fetching query:")
query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms)
data = urlopen(query).read()
print("Done.")
# In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD.
header = """// Data table response\ngoogle.visualization.Query.setResponse("""
data = data[len(header):-2]
data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data)
timeseries = json.loads(data)
columns = [k['label'] for k in timeseries['table']['cols']]
rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows'])
df = pandas.DataFrame(rows, columns=columns)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df.to_csv(file_name)
else:
print("Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function.")
print("Query terms: ", ', '.join(query_terms))
df = pandas.read_csv(file_name, parse_dates=[0])
columns = df.columns
terms = len(query_terms)
import datetime
X = np.asarray([(row, i) for i in range(terms) for row in df.index])
Y = np.asarray([[df.ix[row][query_terms[i]]] for i in range(terms) for row in df.index ])
output_info = columns[1:]
return data_details_return({'data frame' : df, 'X': X, 'Y': Y, 'query_terms': output_info, 'info': "Data downloaded from google trends with query terms: " + ', '.join(output_info) + '.'}, data_set)
# The data sets
def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)
#else:
# throw an error
def oil_100(seed=default_seed, data_set = 'three_phase_oil_flow'):
np.random.seed(seed=seed)
data = oil()
indices = np.random.permutation(1000)
indices = indices[0:100]
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return data_details_return({'X': X, 'Y': Y, 'info': "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was " + str(seed)}, data_set)
def pumadyn(seed=default_seed, data_set='pumadyn-32nm'):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar = tarfile.open(os.path.join(path, 'pumadyn-32nm.tar.gz'))
print('Extracting file.')
tar.extractall(path=path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(os.path.join(data_path, data_set, 'pumadyn-32nm', 'Dataset.data.gz'))
indices = np.random.permutation(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed': seed}, data_set)
def robot_wireless(data_set='robot_wireless'):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not data_available(data_set):
download_data(data_set)
file_name = os.path.join(data_path, data_set, 'uw-floor.txt')
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype=str)
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 2))
allY[:]=-92.
strengths={}
for address, j in zip(addresses, range(len(addresses))):
ind = np.nonzero(address==macaddress)
temp_strengths=strength[ind]
temp_x=x[ind]
temp_y=y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time==temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time==times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = temp_x[ind2]
allX[i, 1] = temp_y[ind2]
allY = (allY + 85.)/15.
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'addresses' : addresses, 'times' : times}, data_set)
def silhouette(data_set='ankur_pose_data'):
# Ankur Agarwal and Bill Trigg's silhoutte data.
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'ankurDataPoseSilhouette.mat'))
inMean = np.mean(mat_data['Y'])
inScales = np.sqrt(np.var(mat_data['Y']))
X = mat_data['Y'] - inMean
X = X / inScales
Xtest = mat_data['Y_test'] - inMean
Xtest = Xtest / inScales
Y = mat_data['Z']
Ytest = mat_data['Z_test']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest}, data_set)
def decampos_digits(data_set='decampos_characters', which_digits=[0,1,2,3,4,5,6,7,8,9]):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
digits = np.load(os.path.join(path, 'digits.npy'))
digits = digits[which_digits,:,:,:]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape((digits.shape[0]*digits.shape[1],digits.shape[2]*digits.shape[3]))
lbls = np.array([[l]*num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)]*num_samples for l in which_digits])
return data_details_return({'Y': Y, 'lbls': lbls, 'str_lbls' : str_lbls, 'info': 'Digits data set from the de Campos characters data'}, data_set)
def ripley_synth(data_set='ripley_prnn_data'):
if not data_available(data_set):
download_data(data_set)
train = np.genfromtxt(os.path.join(data_path, data_set, 'synth.tr'), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(data_path, data_set, 'synth.te'), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return data_details_return({'X': X, 'Y': y, 'Xtest': Xtest, 'Ytest': ytest, 'info': 'Synthetic data generated by Ripley for a two class classification problem.'}, data_set)
def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'GLBTS.long.data'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def mauna_loa(data_set='mauna_loa', num_train=545, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'co2_mm_mlo.txt'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def boxjenkins_airline(data_set='boxjenkins_airline', num_train=96):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'boxjenkins_airline.csv'), delimiter=',')
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Montly airline passenger data from Box & Jenkins 1976."}, data_set)
def osu_run1(data_set='osu_run1', sample_every=4):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y, connect = GPy.util.mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(os.path.join(os.path.dirname(__file__), 'datasets', 'swiss_roll.pickle')) as f:
data = pickle.load(f)
Na = data['Y'].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data['Y'][perm, :]
t = data['t'][perm]
c = data['colors'][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {'Y':Y, 't':t, 'colors':c}
def hapmap3(data_set='hapmap3'):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from pandas import read_pickle, DataFrame
from sys import stdout
import bz2
except ImportError as i:
raise i("Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset")
dir_path = os.path.join(data_path,'hapmap3')
hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly'
unpacked_files = [os.path.join(dir_path, hapmap_file_name+ending) for ending in ['.ped', '.map']]
unpacked_files_exist = reduce(lambda a, b:a and b, map(os.path.exists, unpacked_files))
if not unpacked_files_exist and not data_available(data_set):
download_data(data_set)
preprocessed_data_paths = [os.path.join(dir_path,hapmap_file_name + file_name) for file_name in \
['.snps.pickle',
'.info.pickle',
'.nan.pickle']]
if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)):
if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"):
print("Preprocessing required for further usage.")
return
status = "Preprocessing data, please be patient..."
print(status)
def write_status(message, progress, status):
stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20,
perc="="*int(20.*progress/100.))
stdout.write(status); stdout.flush()
return status
if not unpacked_files_exist:
status=write_status('unpacking...', 0, '')
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + '.bz2'
file_size = os.path.getsize(filepath)
with open(newfilepath, 'wb') as new_file, open(filepath, 'rb') as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda : f.read(buffsize), b''):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status=write_status('unpacking...', curr+12.*file_processed/(file_size), status)
curr += 12
status=write_status('unpacking...', curr, status)
os.remove(filepath)
status=write_status('reading .ped...', 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status=write_status('reading .map...', 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status=write_status('reading relationships.txt...', 42, status)
# and metainfo:
infodf = DataFrame.from_csv(os.path.join(dir_path,'./relationships_w_pops_121708.txt'), header=0, sep='\t')
infodf.set_index('IID', inplace=1)
status=write_status('filtering nan...', 45, status)
snpstr = snpstrnp[:,6:].astype('S1').reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:,:,0] == '0'
status=write_status('filtering reference alleles...', 55, status)
ref = np.array(map(lambda x: np.unique(x)[-2:], snpstr.swapaxes(0,1)[:,:,:]))
status=write_status('encoding snps...', 70, status)
# Encode the information for each gene in {-1,0,1}:
status=write_status('encoding snps...', 73, status)
snps = (snpstr==ref[None,:,:])
status=write_status('encoding snps...', 76, status)
snps = (snps*np.array([1,-1])[None,None,:])
status=write_status('encoding snps...', 78, status)
snps = snps.sum(-1)
status=write_status('encoding snps...', 81, status)
snps = snps.astype('i8')
status=write_status('marking nan values...', 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status=write_status('setting up meta...', 94, status)
# get meta information:
metaheader = np.r_[['family_id', 'iid', 'paternal_id', 'maternal_id', 'sex', 'phenotype']]
metadf = DataFrame(columns=metaheader, data=snpstrnp[:,:6])
metadf.set_index('iid', inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_data_paths[1])
# put everything together:
status=write_status('setting up snps...', 96, status)
snpsdf = DataFrame(index=metadf.index, data=snps, columns=mapnp[:,1])
with open(preprocessed_data_paths[0], 'wb') as f:
pickle.dump(f, snpsdf, protocoll=-1)
status=write_status('setting up snps...', 98, status)
inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1])
inandf.to_pickle(preprocessed_data_paths[2])
status=write_status('done :)', 100, status)
print('')
else:
print("loading snps...")
snpsdf = read_pickle(preprocessed_data_paths[0])
print("loading metainfo...")
metadf = read_pickle(preprocessed_data_paths[1])
print("loading nan entries...")
inandf = read_pickle(preprocessed_data_paths[2])
snps = snpsdf.values
populations = metadf.population.values.astype('S3')
hapmap = dict(name=data_set,
description='The HapMap phase three SNP dataset - '
'1184 samples out of 11 populations. inan is a '
'boolean array, containing wheather or not the '
'given entry is nan (nans are masked as '
'-128 in snps).',
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations)
return hapmap
def singlecell(data_set='singlecell'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'singlecell.csv')
Y = read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return data_details_return({'Y': Y, 'info' : "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
'genes': genes, 'labels':labels,
}, data_set)
def singlecell_rna_seq_islam(dataset='singlecell_islam'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, DataFrame, concat
dir_path = os.path.join(data_path, dataset)
filename = os.path.join(dir_path, 'GSE29087_L139_expression_tab.txt.gz')
data = read_csv(filename, sep='\t', skiprows=6, compression='gzip', header=None)
header1 = read_csv(filename, sep='\t', header=None, skiprows=5, nrows=1, compression='gzip')
header2 = read_csv(filename, sep='\t', header=None, skiprows=3, nrows=1, compression='gzip')
data.columns = np.concatenate((header1.ix[0, :], header2.ix[0, 7:]))
Y = data.set_index("Feature").ix[8:, 6:-4].T.astype(float)
# read the info .soft
filename = os.path.join(dir_path, 'GSE29087_family.soft.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None)
# split at ' = '
info = DataFrame(info.ix[:,0].str.split(' = ').tolist())
# only take samples:
info = info[info[0].str.contains("!Sample")]
info[0] = info[0].apply(lambda row: row[len("!Sample_"):])
groups = info.groupby(0).groups
# remove 'GGG' from barcodes
barcode = info[1][groups['barcode']].apply(lambda row: row[:-3])
title = info[1][groups['title']]
title.index = barcode
title.name = 'title'
geo_accession = info[1][groups['geo_accession']]
geo_accession.index = barcode
geo_accession.name = 'geo_accession'
case_id = info[1][groups['source_name_ch1']]
case_id.index = barcode
case_id.name = 'source_name_ch1'
info = concat([title, geo_accession, case_id], axis=1)
labels = info.join(Y).source_name_ch1[:-4]
labels[labels=='Embryonic stem cell'] = "ES"
labels[labels=='Embryonic fibroblast'] = "MEF"
return data_details_return({'Y': Y,
'info': '92 single cells (48 mouse ES cells, 44 mouse embryonic fibroblasts and 4 negative controls) were analyzed by single-cell tagged reverse transcription (STRT)',
'genes': Y.columns,
'labels': labels,
'datadf': data,
'infodf': info}, dataset)
def singlecell_rna_seq_deng(dataset='singlecell_deng'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, isnull
dir_path = os.path.join(data_path, dataset)
# read the info .soft
filename = os.path.join(dir_path, 'GSE45719_series_matrix.txt.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None, nrows=29, index_col=0)
summary = info.loc['!Series_summary'][1]
design = info.loc['!Series_overall_design']
# only take samples:
sample_info = read_csv(filename, sep='\t', skiprows=30, compression='gzip', header=0, index_col=0).T
sample_info.columns = sample_info.columns.to_series().apply(lambda row: row[len("!Sample_"):])
sample_info.columns.name = sample_info.columns.name[len("!Sample_"):]
sample_info = sample_info[['geo_accession', 'characteristics_ch1', 'description']]
sample_info = sample_info.iloc[:, np.r_[0:4, 5:sample_info.shape[1]]]
c = sample_info.columns.to_series()
c[1:4] = ['strain', 'cross', 'developmental_stage']
sample_info.columns = c
# get the labels right:
rep = re.compile('\(.*\)')
def filter_dev_stage(row):
if isnull(row):
row = "2-cell stage embryo"
if row.startswith("developmental stage: "):
row = row[len("developmental stage: "):]
if row == 'adult':
row += " liver"
row = row.replace(' stage ', ' ')
row = rep.sub(' ', row)
row = row.strip(' ')
return row
labels = sample_info.developmental_stage.apply(filter_dev_stage)
# Extract the tar file
filename = os.path.join(dir_path, 'GSE45719_Raw.tar')
with tarfile.open(filename, 'r') as files:
print("Extracting Archive {}...".format(files.name))
data = None
gene_info = None
message = ''
members = files.getmembers()
overall = len(members)
for i, file_info in enumerate(members):
f = files.extractfile(file_info)
inner = read_csv(f, sep='\t', header=0, compression='gzip', index_col=0)
print(' '*(len(message)+1) + '\r', end=' ')
message = "{: >7.2%}: Extracting: {}".format(float(i+1)/overall, file_info.name[:20]+"...txt.gz")
print(message, end=' ')
if data is None:
data = inner.RPKM.to_frame()
data.columns = [file_info.name[:-18]]
gene_info = inner.Refseq_IDs.to_frame()
gene_info.columns = ['NCBI Reference Sequence']
else:
data[file_info.name[:-18]] = inner.RPKM
#gene_info[file_info.name[:-18]] = inner.Refseq_IDs
# Strip GSM number off data index
rep = re.compile('GSM\d+_')
from pandas import MultiIndex
columns = MultiIndex.from_tuples([row.split('_', 1) for row in data.columns])
columns.names = ['GEO Accession', 'index']
data.columns = columns
data = data.T
# make sure the same index gets used
sample_info.index = data.index
# get the labels from the description
#rep = re.compile('fibroblast|\d+-cell|embryo|liver|early blastocyst|mid blastocyst|late blastocyst|blastomere|zygote', re.IGNORECASE)
sys.stdout.write(' '*len(message) + '\r')
sys.stdout.flush()
print()
print("Read Archive {}".format(files.name))
return data_details_return({'Y': data,
'series_info': info,
'sample_info': sample_info,
'gene_info': gene_info,
'summary': summary,
'design': design,
'genes': data.columns,
'labels': labels,
}, dataset)
def swiss_roll_1000():
return swiss_roll(num_samples=1000)
def swiss_roll(num_samples=3000, data_set='swiss_roll'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'swiss_roll_data.mat'))
Y = mat_data['X_data'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'X': mat_data['X_data'], 'info': "The first " + str(num_samples) + " points from the swiss roll data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def isomap_faces(num_samples=698, data_set='isomap_face_data'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'face_data.mat'))
Y = mat_data['images'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'poses' : mat_data['poses'], 'lights': mat_data['lights'], 'info': "The first " + str(num_samples) + " points from the face data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def simulation_BGPLVM():
mat_data = scipy.io.loadmat(os.path.join(data_path, 'BGPLVMSimulation.mat'))
Y = np.array(mat_data['Y'], dtype=float)
S = np.array(mat_data['initS'], dtype=float)
mu = np.array(mat_data['initMu'], dtype=float)
#return data_details_return({'S': S, 'Y': Y, 'mu': mu}, data_set)
return {'Y': Y, 'S': S,
'mu' : mu,
'info': "Simulated test dataset generated in MATLAB to compare BGPLVM between python and MATLAB"}
def toy_rbf_1d(seed=default_seed, num_samples=500):
"""
Samples values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.
:param seed: seed to use for random sampling.
:type seed: int
:param num_samples: number of samples to sample in the function (default 500).
:type num_samples: int
"""
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low= -1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1., lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=1e-2)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples, 1))
return {'X':X, 'Y':y, 'info': "Sampled " + str(num_samples) + " values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1."}
def toy_rbf_1d_50(seed=default_seed):
np.random.seed(seed=seed)
data = toy_rbf_1d()
indices = np.random.permutation(data['X'].shape[0])
indices = indices[0:50]
indices.sort(axis=0)
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return {'X': X, 'Y': Y, 'info': "Subsamples the toy_rbf_sample with 50 values randomly taken from the original sample.", 'seed' : seed}
def toy_linear_1d_classification(seed=default_seed):
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'seed' : seed}
def olivetti_glasses(data_set='olivetti_glasses', num_training=200, seed=default_seed):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
y = np.load(os.path.join(path, 'has_glasses.np'))
y = np.where(y=='y',1,0).reshape(-1,1)
faces = scipy.io.loadmat(os.path.join(path, 'olivettifaces.mat'))['faces'].T
np.random.seed(seed=seed)
index = np.random.permutation(faces.shape[0])
X = faces[index[:num_training],:]
Xtest = faces[index[num_training:],:]
Y = y[index[:num_training],:]
Ytest = y[index[num_training:]]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "ORL Faces with labels identifiying who is wearing glasses and who isn't. Data is randomly partitioned according to given seed. Presence or absence of glasses was labelled by James Hensman."}, 'olivetti_faces')
def olivetti_faces(data_set='olivetti_faces'):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(path, 'att_faces.zip'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y = []
lbls = []
for subject in range(40):
for image in range(10):
image_path = os.path.join(path, 'orl_faces', 's'+str(subject+1), str(image+1) + '.pgm')
from GPy.util import netpbmfile
Y.append(netpbmfile.imread(image_path).flatten())
lbls.append(subject)
Y = np.asarray(Y)
lbls = np.asarray(lbls)[:, None]
return data_details_return({'Y': Y, 'lbls' : lbls, 'info': "ORL Faces processed to 64x64 images."}, data_set)
def xw_pen(data_set='xw_pen'):
if not data_available(data_set):
download_data(data_set)
Y = np.loadtxt(os.path.join(data_path, data_set, 'xw_pen_15.csv'), delimiter=',')
X = np.arange(485)[:, None]
return data_details_return({'Y': Y, 'X': X, 'info': "Tilt data from a personalized digital assistant pen. Plot in original paper showed regression between time steps 175 and 275."}, data_set)
def download_rogers_girolami_data(data_set='rogers_girolami_data'):
if not data_available('rogers_girolami_data'):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'firstcoursemldata.tar.gz')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
def olympic_100m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m men from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_100m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m women from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_marathon_men(data_set='olympic_marathon_men'):
if not data_available(data_set):
download_data(data_set)
olympics = np.genfromtxt(os.path.join(data_path, data_set, 'olympicMarathonTimes.csv'), delimiter=',')
X = olympics[:, 0:1]
Y = olympics[:, 1:2]
return data_details_return({'X': X, 'Y': Y}, data_set)
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set)
# def movielens_small(partNo=1,seed=default_seed):
# np.random.seed(seed=seed)
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.base')
# fid = open(fileName)
# uTrain = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# maxVals = np.amax(uTrain, axis=0)
# numUsers = maxVals[0]
# numFilms = maxVals[1]
# numRatings = uTrain.shape[0]
# Y = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTrain[:, 0]==i+1)
# Y[uTrain[ind, 1]-1, i] = uTrain[ind, 2]
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.test')
# fid = open(fileName)
# uTest = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# numTestRatings = uTest.shape[0]
# Ytest = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTest[:, 0]==i+1)
# Ytest[uTest[ind, 1]-1, i] = uTest[ind, 2]
# lbls = np.empty((1,1))
# lblstest = np.empty((1,1))
# return {'Y':Y, 'lbls':lbls, 'Ytest':Ytest, 'lblstest':lblstest}
def crescent_data(num_data=200, seed=default_seed):
"""
Data set formed from a mixture of four Gaussians. In each class two of the Gaussians are elongated at right angles to each other and offset to form an approximation to the crescent data that is popular in semi-supervised learning as a toy problem.
:param num_data_part: number of data to be sampled (default is 200).
:type num_data: int
:param seed: random seed to be used for data generation.
:type seed: int
"""
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
# Rotation matrix
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
# Scaling matrices
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round(((i + 1) * num_data) / 4.))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))
return {'X':X, 'Y':Y, 'info': "Two separate classes of data formed approximately in the shape of two crescents."}
def creep_data(data_set='creep_rupture'):
"""Brun and Yoshida's metal creep rupture data."""
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'creeprupt.tar')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka'))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(range(2, 31))
X = all_data[:, features].copy()
return data_details_return({'X': X, 'y': y}, data_set)
def cifar10_patches(data_set='cifar-10'):
"""The Candian Institute for Advanced Research 10 image data set. Code for loading in this data is taken from this Boris Babenko's blog post, original code available here: http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code"""
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')
if not data_available(data_set):
download_data(data_set)
import tarfile
# This code is from Boris Babenko's blog post.
# http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(dir_path)
with open(os.path.join(dir_path, 'cifar-10-batches-py','data_batch_1'),'rb') as f:
data = pickle.load(f)
images = data['data'].reshape((-1,3,32,32)).astype('float32')/255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0,5,5,3))
for x in range(0,32-5,5):
for y in range(0,32-5,5):
patches = np.concatenate((patches, images[:,x:x+5,y:y+5,:]), axis=0)
patches = patches.reshape((patches.shape[0],-1))
return data_details_return({'Y': patches, "info" : "32x32 pixel patches extracted from the CIFAR-10 data by Boris Babenko to demonstrate k-means features."}, data_set)
def cmu_mocap_49_balance(data_set='cmu_mocap'):
"""Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009."""
train_motions = ['18', '19']
test_motions = ['20']
data = cmu_mocap('49', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of " + data['info']
return data
def cmu_mocap_35_walk_jog(data_set='cmu_mocap'):
"""Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007."""
train_motions = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12',
'13', '14', '15', '16', '17', '19',
'20', '21', '22', '23', '24', '25',
'26', '28', '30', '31', '32', '33', '34']
test_motions = ['18', '29']
data = cmu_mocap('35', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of " + data['info']
return data
def cmu_mocap(subject, train_motions, test_motions=[], sample_every=4, data_set='cmu_mocap'):
"""Load a given subject's training and test motions from the CMU motion capture data."""
# Load in subject skeleton.
subject_dir = os.path.join(data_path, data_set)
# Make sure the data is downloaded.
all_motions = train_motions + test_motions
resource = cmu_urls_files(([subject], [all_motions]))
data_resources[data_set] = data_resources['cmu_mocap_full'].copy()
data_resources[data_set]['files'] = resource['files']
data_resources[data_set]['urls'] = resource['urls']
if resource['urls']:
download_data(data_set)
skel = GPy.util.mocap.acclaim_skeleton(os.path.join(subject_dir, subject + '.asf'))
# Set up labels for each sequence
exlbls = np.eye(len(train_motions))
# Load sequences
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + train_motions[i] + '.amc'))
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + test_motions[i] + '.amc'))
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
# Load test data
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = 'Subject: ' + subject + '. Training motions: '
for motion in train_motions:
info += motion + ', '
info = info[:-2]
if len(test_motions) > 0:
info += '. Test motions: '
for motion in test_motions:
info += motion + ', '
info = info[:-2] + '.'
else:
info += '.'
if sample_every != 1:
info += ' Data is sub-sampled to every ' + str(sample_every) + ' frames.'
return data_details_return({'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': info, 'skel': skel}, data_set)
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
files/lib/go/src/helper/lib.go | package helper
import "C"
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"github.com/protolambda/zrnt/eth2/beacon/attestations"
"github.com/protolambda/zrnt/eth2/beacon/deposits"
"github.com/protolambda/zrnt/eth2/beacon/exits"
"github.com/protolambda/zrnt/eth2/beacon/header"
"github.com/protolambda/zrnt/eth2/beacon/slashings/attslash"
"github.com/protolambda/zrnt/eth2/beacon/slashings/propslash"
"github.com/protolambda/zrnt/eth2/beacon/validator"
"github.com/protolambda/zrnt/eth2/core"
"github.com/protolambda/zrnt/eth2/phase0"
zrnt_ssz "github.com/protolambda/zrnt/eth2/util/ssz"
"github.com/protolambda/zssz"
"github.com/protolambda/zssz/types"
)
type inputType uint64
const (
INPUT_TYPE_INVALID inputType = iota
INPUT_TYPE_ATTESTATION
INPUT_TYPE_ATTESTER_SLASHING
INPUT_TYPE_BLOCK_HEADER
INPUT_TYPE_DEPOSIT
INPUT_TYPE_VOLUNTARY_EXIT
INPUT_TYPE_PROPOSER_SLASHING
INPUT_TYPE_BLOCK
)
var curInputType inputType = INPUT_TYPE_INVALID
// TODO I hate having to copy paste all this, but no generic functions/types
// is there 1 function I can do that will convert from these types to
// types with states?
// I think not, would have to return a more deeply embedded struct with similar members
// which might not serialize in the same way?
// or can I have them both serialize similarly?
//type InputWrapper struct {
// StateID uint16
// Other interface{}
//}
// TODO move types to separate file
// Input passed to implementations after preprocessing
type InputAttestation struct {
Pre phase0.BeaconState
Attestation attestations.Attestation
}
type InputAttesterSlashing struct {
Pre phase0.BeaconState
AttesterSlashing attslash.AttesterSlashing
}
type InputDeposit struct {
Pre phase0.BeaconState
Deposit deposits.Deposit
}
type InputVoluntaryExit struct {
Pre phase0.BeaconState
VoluntaryExit exits.VoluntaryExit
}
type InputProposerSlashing struct {
Pre phase0.BeaconState
ProposerSlashing propslash.ProposerSlashing
}
type InputBlockHeader struct {
Pre phase0.BeaconState
Block phase0.BeaconBlock
}
type InputBlock struct {
Pre phase0.BeaconState
Block phase0.BeaconBlock
}
// Types to be read from fuzzer
type InputBlockWrapper struct {
StateID uint16
Block phase0.BeaconBlock
}
// Same as for Block
type InputBlockHeaderWrapper InputBlockWrapper
type InputAttestationWrapper struct {
StateID uint16
Attestation attestations.Attestation
}
type InputAttesterSlashingWrapper struct {
StateID uint16
AttesterSlashing attslash.AttesterSlashing
}
type InputDepositWrapper struct {
StateID uint16
Deposit deposits.Deposit
}
type InputVoluntaryExitWrapper struct {
StateID uint16
VoluntaryExit exits.VoluntaryExit
}
type InputProposerSlashingWrapper struct {
StateID uint16
ProposerSlashing propslash.ProposerSlashing
}
// NOTE I think we want to avoid embedding here to ensure consistent serialization,
// so have all these functions
// TODO change to pointers to avoid copying? e.g. InputBlock struct { ... *phase0.BeaconBlock }
// I think that might screw with current serialization etc
func (w *InputBlockWrapper) unwrap() (*InputBlock, error) {
state, err := GetStateByID(w.StateID)
if err != nil {
return nil, err
}
return &InputBlock{Pre: state, Block: w.Block}, nil
}
func (w *InputBlockHeaderWrapper) unwrap() (*InputBlockHeader, error) {
state, err := GetStateByID(w.StateID)
if err != nil {
return nil, err
}
return &InputBlockHeader{Pre: state, Block: w.Block}, nil
}
func (w *InputAttestationWrapper) unwrap() (*InputAttestation, error) {
state, err := GetStateByID(w.StateID)
if err != nil {
return nil, err
}
return &InputAttestation{Pre: state, Attestation: w.Attestation}, nil
}
func (w *InputAttesterSlashingWrapper) unwrap() (*InputAttesterSlashing, error) {
state, err := GetStateByID(w.StateID)
if err != nil {
return nil, err
}
return &InputAttesterSlashing{Pre: state, AttesterSlashing: w.AttesterSlashing}, nil
}
func (w *InputDepositWrapper) unwrap() (*InputDeposit, error) {
state, err := GetStateByID(w.StateID)
if err != nil {
return nil, err
}
return &InputDeposit{Pre: state, Deposit: w.Deposit}, nil
}
func (w *InputVoluntaryExitWrapper) unwrap() (*InputVoluntaryExit, error) {
state, err := GetStateByID(w.StateID)
if err != nil {
return nil, err
}
return &InputVoluntaryExit{Pre: state, VoluntaryExit: w.VoluntaryExit}, nil
}
func (w *InputProposerSlashingWrapper) unwrap() (*InputProposerSlashing, error) {
state, err := GetStateByID(w.StateID)
if err != nil {
return nil, err
}
return &InputProposerSlashing{Pre: state, ProposerSlashing: w.ProposerSlashing}, nil
}
var PreloadedStates = make([]phase0.BeaconState, 0)
// used internally by getSSZType
var sszTypeCache = make(map[reflect.Type]types.SSZ)
func loadPrestates() {
stateCorpusPath := os.Getenv("ETH2_FUZZER_STATE_CORPUS_PATH")
if len(stateCorpusPath) == 0 {
panic("Environment variable \"ETH2_FUZZER_STATE_CORPUS_PATH\" not set or empty")
}
stateID := 0
for {
var state phase0.BeaconState
filename := path.Join(stateCorpusPath, fmt.Sprintf("%v", stateID))
data, err := ioutil.ReadFile(filename)
if err != nil {
break
}
reader := bytes.NewReader(data)
if err := zssz.Decode(reader, uint64(len(data)), &state, phase0.BeaconStateSSZ); err != nil {
panic(fmt.Sprintf("Cannot decode prestate %v: %v", filename, err))
}
PreloadedStates = append(PreloadedStates, state)
fmt.Printf("Loaded and decoded prestate %v\n", filename)
stateID++
}
if stateID == 0 {
panic("No prestates found")
}
}
func init() {
loadPrestates()
}
func SetInputType(inputType_ inputType) {
curInputType = inputType_
}
// NOTE: as input types do not necessarily have a unique `String()` representation,
// generally not an issue
// TODO add checks to avoid corruption
// thanks to https://stackoverflow.com/a/55321744
// dest should be a pointer to a value we want the associated SSZ type for
// NOTE: will panic if argument is not a pointer type
func getSSZType(dest interface{}) types.SSZ {
t := reflect.TypeOf(dest).Elem()
r, set := sszTypeCache[t]
if set == true {
return r
}
ssztyp := zssz.GetSSZ(dest)
sszTypeCache[t] = ssztyp
return ssztyp
}
// NOTE we couldn't actually correct/modify any changes if passing a copy of the struct
func CheckInvariants(state *phase0.BeaconState, correct bool) error {
if correct == true {
// need to have at least as many validators as slots per epoch
// TODO initState requires this (why?)
for core.Slot(len(state.Validators)) < core.SLOTS_PER_EPOCH {
var tmp validator.Validator
state.RegistryState.Validators = append(state.RegistryState.Validators, &tmp)
}
}
/* Balances and ValidatorRegistry must be the same length */
if len(state.RegistryState.Balances) != len(state.RegistryState.Validators) {
if correct == false {
return fmt.Errorf("Balances/ValidatorRegistry length mismatch (%v and %v)", len(state.RegistryState.Balances), len(state.RegistryState.Validators))
}
for len(state.RegistryState.Balances) < len(state.RegistryState.Validators) {
state.RegistryState.Balances = append(state.RegistryState.Balances, 0)
}
for len(state.RegistryState.Validators) < len(state.RegistryState.Balances) {
var tmp validator.Validator
state.RegistryState.Validators = append(state.RegistryState.Validators, &tmp)
}
}
// TODO
// ensure committeeCount <= uint64(SHARD_COUNT)
// TODO ensure number of active validators > committeeCount for current, prev and next epoch
// NOTE: because committeeCount is calculated based on num active validators,
// we just need to ensure that some validators are active?
// based on zrnt validator.go CommitteeCount, we need to ensure number of active validators
// is greater than SLOTS_PER_EPOCH
/*
// NOTE: Not currently used
ffstate := phase0.NewFullFeaturedState(state)
ffstate.LoadPrecomputedData()
*/
/*
// TODO(gnattishness) check whether any of this is worth using
// not useful while we use trusted states as input
// relied on GetCrosslinkCommitee (not present in 0.9.x), but can't
// see any division by 0 that this would resolve
// I think unnecessary:
// get_beacon_proposer_index used to call get_crosslink_committee and `%` by its length
// resulting in div by 0, where now (0.9.1) compute_proposer_index checks the length
// Avoid division by zero in ProcessBlockHeader
{
epoch := ffstate.VersioningState.CurrentEpoch()
committeesPerSlot := ffstate.GetCommitteeCount(epoch) / uint64(core.SLOTS_PER_EPOCH)
offset := core.Shard(committeesPerSlot) * core.Shard(ffstate.Slot%core.SLOTS_PER_EPOCH)
// TODO this typechecks but may not be correct/intended operation?
shard := (ffstate.GetStartShard(epoch) + offset) % core.SHARD_COUNT
// TODO now takes in a slot and index
firstCommittee := ffstate.ShufflingStatus.GetBeaconCommitee(epoch, shard)
if len(firstCommittee) == 0 {
if correct == false {
return errors.New("Empty firstCommittee")
} else {
// TODO correct
}
}
}
*/
return nil
}
func CorrectInvariants(state *phase0.BeaconState) {
if err := CheckInvariants(state, true); err != nil {
panic(fmt.Sprintf("CorrectInvariants failed: %v", err))
}
}
func AssertInvariants(state *phase0.BeaconState) {
if err := CheckInvariants(state, false); err != nil {
panic(fmt.Sprintf("Invariant check failed: %v", err))
}
}
func decodeOfType(data []byte, dest interface{}, fuzzer bool, sszType types.SSZ) error {
reader := bytes.NewReader(data)
if fuzzer == true {
if _, err := zssz.DecodeFuzzBytes(reader, uint64(len(data)), dest, sszType); err != nil {
return errors.New("Cannot decode")
}
} else {
if err := zssz.Decode(reader, uint64(len(data)), dest, sszType); err != nil {
panic(fmt.Sprintf("Decoding that should always succeed failed: %v", err))
}
}
return nil
}
func Decode(data []byte, destPtr interface{}, fuzzer bool) error {
return decodeOfType(data, destPtr, fuzzer, getSSZType(destPtr))
}
func DecodeAttestation(data []byte, fuzzer bool) (*InputAttestation, error) {
var input InputAttestation
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeAttesterSlashing(data []byte, fuzzer bool) (*InputAttesterSlashing, error) {
var input InputAttesterSlashing
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeBlockHeader(data []byte, fuzzer bool) (*InputBlockHeader, error) {
var input InputBlockHeader
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeDeposit(data []byte, fuzzer bool) (*InputDeposit, error) {
var input InputDeposit
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeVoluntaryExit(data []byte, fuzzer bool) (*InputVoluntaryExit, error) {
var input InputVoluntaryExit
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeProposerSlashing(data []byte, fuzzer bool) (*InputProposerSlashing, error) {
var input InputProposerSlashing
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeBlock(data []byte, fuzzer bool) (*InputBlock, error) {
var input InputBlock
err := Decode(data, &input, fuzzer)
return &input, err
}
// Wrapper Decoding
func DecodeBlockWrapper(data []byte, fuzzer bool) (*InputBlockWrapper, error) {
var input InputBlockWrapper
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeBlockHeaderWrapper(data []byte, fuzzer bool) (*InputBlockHeaderWrapper, error) {
var input InputBlockHeaderWrapper
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeAttestationWrapper(data []byte, fuzzer bool) (*InputAttestationWrapper, error) {
var input InputAttestationWrapper
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeAttesterSlashingWrapper(data []byte, fuzzer bool) (*InputAttesterSlashingWrapper, error) {
var input InputAttesterSlashingWrapper
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeDepositWrapper(data []byte, fuzzer bool) (*InputDepositWrapper, error) {
var input InputDepositWrapper
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeVoluntaryExitWrapper(data []byte, fuzzer bool) (*InputVoluntaryExitWrapper, error) {
var input InputVoluntaryExitWrapper
err := Decode(data, &input, fuzzer)
return &input, err
}
func DecodeProposerSlashingWrapper(data []byte, fuzzer bool) (*InputProposerSlashingWrapper, error) {
var input InputProposerSlashingWrapper
err := Decode(data, &input, fuzzer)
return &input, err
}
func encodeOfType(src interface{}, sszType types.SSZ) []byte {
var ret bytes.Buffer
writer := bufio.NewWriter(&ret)
// TODO can handle the number of bytes written if an error occurs?
if _, err := zssz.Encode(writer, src, sszType); err != nil {
panic("Cannot encode")
}
if err := writer.Flush(); err != nil {
panic("Cannot flush encoded output")
}
return ret.Bytes()
}
func Encode(srcPtr interface{}) []byte {
return encodeOfType(srcPtr, getSSZType(srcPtr))
}
func EncodePoststate(state *phase0.BeaconState) []byte {
AssertInvariants(state)
return Encode(state)
}
// TODO should this return a pointer to the state, or are we wanting a new copy
// created?
func GetStateByID(stateID uint16) (phase0.BeaconState, error) {
var state phase0.BeaconState
if stateID >= uint16(len(PreloadedStates)) {
return state, fmt.Errorf("Invalid prestate ID: %v", stateID)
}
return PreloadedStates[stateID], nil
}
func randomlyValid(valid []byte, random []byte, chance float32) {
chanceRNG := binary.LittleEndian.Uint32(random[:4])
bit := random[4]
// make random all valid
copy(random, valid)
v := float32(float64(chanceRNG) / float64(^uint32(0)))
// now mutate random bit based on chance
if v > chance || chance == 0 {
random[bit>>3] ^= 1 << (bit & 0x7)
}
}
func correctBlock(state *phase0.BeaconState, block *phase0.BeaconBlock) {
{
block.Slot = state.Slot + (block.Slot % 10)
}
{
latestHeaderCopy := state.LatestBlockHeader
latestHeaderCopy.StateRoot = zrnt_ssz.HashTreeRoot(state, phase0.BeaconStateSSZ)
prevRoot := zrnt_ssz.SigningRoot(latestHeaderCopy, header.BeaconBlockHeaderSSZ)
randomlyValid(prevRoot[:], block.ParentRoot[:], 0.9)
}
// TODO eth1data??
}
var g_return_data = make([]byte, 0)
// TODO move external/"exported" functions to their own file
//export SSZPreprocessGetReturnData
func SSZPreprocessGetReturnData(return_data []byte) {
copy(return_data, g_return_data)
}
//export SSZPreprocess
func SSZPreprocess(data []byte) int {
// returns relevant "unwrapped" type
switch curInputType {
case INPUT_TYPE_ATTESTATION:
wrapped, err := DecodeAttestationWrapper(data, true)
if err != nil {
return 0
}
input, err := wrapped.unwrap()
if err != nil {
return 0
}
CorrectInvariants(&input.Pre)
if err := CheckInvariants(&input.Pre, false); err != nil {
// TODO is this checking necessary if we have trusted state inputs?
return 0
}
g_return_data = Encode(input)
return len(g_return_data)
case INPUT_TYPE_ATTESTER_SLASHING:
wrapped, err := DecodeAttesterSlashingWrapper(data, true)
if err != nil {
return 0
}
input, err := wrapped.unwrap()
if err != nil {
return 0
}
CorrectInvariants(&input.Pre)
if err := CheckInvariants(&input.Pre, false); err != nil {
return 0
}
g_return_data = Encode(input)
return len(g_return_data)
case INPUT_TYPE_BLOCK_HEADER:
wrapped, err := DecodeBlockHeaderWrapper(data, true)
if err != nil {
return 0
}
input, err := wrapped.unwrap()
if err != nil {
return 0
}
CorrectInvariants(&input.Pre)
if err := CheckInvariants(&input.Pre, false); err != nil {
return 0
}
/* BlockHeader-specific invariants */
{
input.Block.ParentRoot = zrnt_ssz.SigningRoot(input.Pre.LatestBlockHeader, header.BeaconBlockHeaderSSZ)
}
g_return_data = Encode(input)
return len(g_return_data)
case INPUT_TYPE_DEPOSIT:
wrapped, err := DecodeDepositWrapper(data, true)
if err != nil {
return 0
}
input, err := wrapped.unwrap()
if err != nil {
return 0
}
CorrectInvariants(&input.Pre)
if err := CheckInvariants(&input.Pre, false); err != nil {
// TODO log error here? if we've corrected invariants, they should be correct
return 0
}
// TODO update state.eth1data to atleast have length > 0, and index < length?
// also merkle root or not?
// TODO discuss
g_return_data = Encode(input)
return len(g_return_data)
case INPUT_TYPE_VOLUNTARY_EXIT:
wrapped, err := DecodeVoluntaryExitWrapper(data, true)
if err != nil {
return 0
}
input, err := wrapped.unwrap()
if err != nil {
return 0
}
CorrectInvariants(&input.Pre)
if err := CheckInvariants(&input.Pre, false); err != nil {
return 0
}
g_return_data = Encode(input)
return len(g_return_data)
case INPUT_TYPE_PROPOSER_SLASHING:
wrapped, err := DecodeProposerSlashingWrapper(data, true)
if err != nil {
return 0
}
input, err := wrapped.unwrap()
if err != nil {
return 0
}
CorrectInvariants(&input.Pre)
if err := CheckInvariants(&input.Pre, false); err != nil {
return 0
}
g_return_data = Encode(input)
return len(g_return_data)
case INPUT_TYPE_BLOCK:
wrapped, err := DecodeBlockWrapper(data, true)
if err != nil {
return 0
}
input, err := wrapped.unwrap()
if err != nil {
return 0
}
CorrectInvariants(&input.Pre)
if err := CheckInvariants(&input.Pre, false); err != nil {
return 0
}
// TODO update eth1data to match deposits?
correctBlock(&input.Pre, &input.Block)
g_return_data = Encode(input)
return len(g_return_data)
default:
panic("Invalid type configured")
}
}
| [
"\"ETH2_FUZZER_STATE_CORPUS_PATH\""
]
| []
| [
"ETH2_FUZZER_STATE_CORPUS_PATH"
]
| [] | ["ETH2_FUZZER_STATE_CORPUS_PATH"] | go | 1 | 0 | |
pkg/util/system/config.go | /*
Copyright 2022 The Koordinator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package system
import (
"flag"
"os"
)
const (
DS_MODE = "dsMode"
HOST_MODE = "hostMode"
)
var Conf = NewDsModeConfig()
var AgentMode = DS_MODE
type Config struct {
CgroupRootDir string
CgroupKubePath string
SysRootDir string
SysFSRootDir string
ProcRootDir string
VarRunRootDir string
NodeNameOverride string
ContainerdEndPoint string
DockerEndPoint string
}
func NewHostModeConfig() *Config {
return &Config{
CgroupKubePath: "kubepods/",
CgroupRootDir: "/sys/fs/cgroup/",
ProcRootDir: "/proc/",
SysRootDir: "/sys/",
SysFSRootDir: "/sys/fs/",
VarRunRootDir: "/var/run/",
}
}
func NewDsModeConfig() *Config {
return &Config{
CgroupKubePath: "kubepods/",
CgroupRootDir: "/host-cgroup/",
// some dirs are not covered by ns, or unused with `hostPID` is on
ProcRootDir: "/proc/",
SysRootDir: "/host-sys/",
SysFSRootDir: "/host-sys-fs/",
VarRunRootDir: "/host-var-run/",
}
}
func init() {
Conf = NewDsModeConfig()
agentMode := os.Getenv("agent_mode")
if agentMode == HOST_MODE {
Conf = NewHostModeConfig()
AgentMode = agentMode
}
initFilePath()
}
func SetConf(config Config) {
Conf = &config
HostSystemInfo = collectVersionInfo()
initFilePath()
}
func (c *Config) InitFlags(fs *flag.FlagSet) {
fs.StringVar(&c.CgroupRootDir, "CgroupRootDir", c.CgroupRootDir, "Cgroup root dir")
fs.StringVar(&c.SysFSRootDir, "SysRootDir", c.SysFSRootDir, "host /sys dir in container")
fs.StringVar(&c.SysFSRootDir, "SysFSRootDir", c.SysFSRootDir, "host /sys/fs dir in container, used by resctrl fs")
fs.StringVar(&c.ProcRootDir, "ProcRootDir", c.ProcRootDir, "host /proc dir in container")
fs.StringVar(&c.VarRunRootDir, "VarRunRootDir", c.VarRunRootDir, "host /var/run dir in container")
fs.StringVar(&c.CgroupKubePath, "CgroupKubeDir", c.CgroupKubePath, "Cgroup kube dir")
fs.StringVar(&c.NodeNameOverride, "node-name-override", c.NodeNameOverride, "If non-empty, will use this string as identification instead of the actual machine name. ")
fs.StringVar(&c.ContainerdEndPoint, "containerdEndPoint", c.ContainerdEndPoint, "containerd endPoint")
fs.StringVar(&c.DockerEndPoint, "dockerEndPoint", c.DockerEndPoint, "docker endPoint")
HostSystemInfo = collectVersionInfo()
initFilePath()
}
| [
"\"agent_mode\""
]
| []
| [
"agent_mode"
]
| [] | ["agent_mode"] | go | 1 | 0 | |
config/nvcc.py | #
# Copyright 2012 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""SCons.Tool.nvcc
Tool-specific initialization for NVIDIA CUDA Compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
import SCons.Tool
import SCons.Scanner.C
import SCons.Defaults
import os
import platform
def get_cuda_paths():
"""Determines CUDA {bin,lib,include} paths
returns (bin_path,lib_path,inc_path)
"""
# determine defaults
if os.name == 'nt':
bin_path = 'C:/CUDA/bin'
lib_path = 'C:/CUDA/lib'
inc_path = 'C:/CUDA/include'
elif os.name == 'posix':
bin_path = '/usr/local/cuda/bin'
lib_path = '/usr/local/cuda/lib'
inc_path = '/usr/local/cuda/include'
else:
raise ValueError, 'Error: unknown OS. Where is nvcc installed?'
if platform.machine()[-2:] == '64':
lib_path += '64'
# override with environement variables
if 'CUDA_BIN_PATH' in os.environ:
bin_path = os.path.abspath(os.environ['CUDA_BIN_PATH'])
if 'CUDA_LIB_PATH' in os.environ:
lib_path = os.path.abspath(os.environ['CUDA_LIB_PATH'])
if 'CUDA_INC_PATH' in os.environ:
inc_path = os.path.abspath(os.environ['CUDA_INC_PATH'])
return (bin_path,lib_path,inc_path)
CUDASuffixes = ['.cu']
# make a CUDAScanner for finding #includes
# cuda uses the c preprocessor, so we can use the CScanner
CUDAScanner = SCons.Scanner.C.CScanner()
def add_common_nvcc_variables(env):
"""
Add underlying common "NVIDIA CUDA compiler" variables that
are used by multiple builders.
"""
# "NVCC common command line"
if not env.has_key('_NVCCCOMCOM'):
# nvcc needs '-I' prepended before each include path, regardless of platform
env['_NVCCWRAPCPPPATH'] = '${_concat("-I ", CPPPATH, "", __env__)}'
# prepend -Xcompiler before each flag
env['_NVCCWRAPCFLAGS'] = '${_concat("-Xcompiler ", CFLAGS, "", __env__)}'
env['_NVCCWRAPSHCFLAGS'] = '${_concat("-Xcompiler ", SHCFLAGS, "", __env__)}'
env['_NVCCWRAPCCFLAGS'] = '${_concat("-Xcompiler ", CCFLAGS, "", __env__)}'
env['_NVCCWRAPSHCCFLAGS'] = '${_concat("-Xcompiler ", SHCCFLAGS, "", __env__)}'
# assemble the common command line
env['_NVCCCOMCOM'] = '${_concat("-Xcompiler ", CPPFLAGS, "", __env__)} $_CPPDEFFLAGS $_NVCCWRAPCPPPATH'
def generate(env):
"""
Add Builders and construction variables for CUDA compilers to an Environment.
"""
# create a builder that makes PTX files from .cu files
ptx_builder = SCons.Builder.Builder(action = '$NVCC -ptx $NVCCFLAGS $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES -o $TARGET',
emitter = {},
suffix = '.ptx',
src_suffix = CUDASuffixes)
env['BUILDERS']['PTXFile'] = ptx_builder
# create builders that make static & shared objects from .cu files
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CUDASuffixes:
# Add this suffix to the list of things buildable by Object
static_obj.add_action('$CUDAFILESUFFIX', '$NVCCCOM')
shared_obj.add_action('$CUDAFILESUFFIX', '$SHNVCCCOM')
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
# Add this suffix to the list of things scannable
SCons.Tool.SourceFileScanner.add_scanner(suffix, CUDAScanner)
add_common_nvcc_variables(env)
# set the "CUDA Compiler Command" environment variable
# windows is picky about getting the full filename of the executable
if os.name == 'nt':
env['NVCC'] = 'nvcc.exe'
env['SHNVCC'] = 'nvcc.exe'
else:
env['NVCC'] = 'nvcc'
env['SHNVCC'] = 'nvcc'
# set the include path, and pass both c compiler flags and c++ compiler flags
env['NVCCFLAGS'] = SCons.Util.CLVar('')
env['SHNVCCFLAGS'] = SCons.Util.CLVar('') + ' -shared'
# 'NVCC Command'
env['NVCCCOM'] = '$NVCC -o $TARGET -c $NVCCFLAGS $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES'
env['SHNVCCCOM'] = '$SHNVCC -o $TARGET -c $SHNVCCFLAGS $_NVCCWRAPSHCFLAGS $_NVCCWRAPSHCCFLAGS $_NVCCCOMCOM $SOURCES'
# the suffix of CUDA source files is '.cu'
env['CUDAFILESUFFIX'] = '.cu'
# XXX add code to generate builders for other miscellaneous
# CUDA files here, such as .gpu, etc.
# XXX intelligently detect location of nvcc and cuda libraries here
(bin_path,lib_path,inc_path) = get_cuda_paths()
env.PrependENVPath('PATH', bin_path)
def exists(env):
return env.Detect('nvcc')
| []
| []
| [
"CUDA_BIN_PATH",
"CUDA_INC_PATH",
"CUDA_LIB_PATH"
]
| [] | ["CUDA_BIN_PATH", "CUDA_INC_PATH", "CUDA_LIB_PATH"] | python | 3 | 0 | |
pkg/vsphere/integration/integration_test.go | /*
* Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package integration
import (
"context"
"io/ioutil"
"os"
"testing"
api "github.com/gardener/machine-controller-manager-provider-vsphere/pkg/vsphere/apis"
"github.com/gardener/machine-controller-manager-provider-vsphere/pkg/vsphere/errors"
"github.com/gardener/machine-controller-manager-provider-vsphere/pkg/vsphere/internal"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/yaml"
)
// TODO: Update secret field from api.Secrets to corev1.Secret in integration tests
type integrationConfig struct {
MachineName string `json:"machineName"`
ProviderSpec *api.VsphereProviderSpec `json:"providerSpec"`
Secrets *corev1.Secret `json:"secrets"`
}
// TestPluginSPIImpl tests creation and deleting of a VM via vSphere API.
// Path to configuration needs to be specified as environment variable MCM_PROVIDER_VSPHERE_CONFIG.
func TestPluginSPIImpl(t *testing.T) {
configPath := os.Getenv("MCM_PROVIDER_VSPHERE_CONFIG")
if configPath == "" {
t.Skipf("No path to integrationConfig specified by environmental variable MCM_PROVIDER_VSPHERE_CONFIG")
return
}
content, err := ioutil.ReadFile(configPath)
if err != nil {
t.Errorf("reading integrationConfig from %s failed with %s", configPath, err)
return
}
cfg := integrationConfig{}
err = yaml.Unmarshal([]byte(content), &cfg)
if err != nil {
t.Errorf("Unmarshalling integrationConfig failed with %s", err)
return
}
spi := &internal.PluginSPIImpl{}
ctx := context.TODO()
providerID, err := spi.GetMachineStatus(ctx, cfg.MachineName, "", cfg.ProviderSpec, cfg.Secrets)
if err == nil {
t.Errorf("Machine name %s already existing", cfg.MachineName)
return
}
switch err.(type) {
case *errors.MachineNotFoundError:
// expected
default:
t.Errorf("Unexpected error on GetMachineStatus %v", err)
return
}
providerID, err = spi.DeleteMachine(ctx, cfg.MachineName, providerID, cfg.ProviderSpec, cfg.Secrets)
switch err.(type) {
case *errors.MachineNotFoundError:
// expected
default:
t.Errorf("Unexpected error on DeleteMachine")
return
}
providerID, err = spi.CreateMachine(ctx, cfg.MachineName, cfg.ProviderSpec, cfg.Secrets)
if err != nil {
t.Errorf("CreateMachine failed with %s", err)
return
}
providerID2, err := spi.GetMachineStatus(ctx, cfg.MachineName, "", cfg.ProviderSpec, cfg.Secrets)
if err != nil {
t.Errorf("GetMachineStatus by machine name failed with %s", err)
return
}
if providerID != providerID2 {
t.Errorf("ProviderID mismatch %s != %s", providerID, providerID2)
}
providerID2, err = spi.GetMachineStatus(ctx, cfg.MachineName, providerID, cfg.ProviderSpec, cfg.Secrets)
if err != nil {
t.Errorf("GetMachineStatus by providerID failed with %s", err)
return
}
if providerID != providerID2 {
t.Errorf("ProviderID mismatch %s != %s", providerID, providerID2)
}
providerIDList, err := spi.ListMachines(ctx, cfg.ProviderSpec, cfg.Secrets)
if err != nil {
t.Errorf("ListMachines failed with %s", err)
}
found := false
for id, name := range providerIDList {
if id == providerID {
if name != cfg.MachineName {
t.Errorf("MachineName mismatch %s != %s", providerID, id)
}
found = true
}
}
if !found {
t.Errorf("Created machine with ID %s not found", providerID)
}
providerID2, err = spi.ShutDownMachine(ctx, cfg.MachineName, providerID, cfg.ProviderSpec, cfg.Secrets)
if err != nil {
t.Errorf("ShutDownMachine failed with %s", err)
}
if providerID != providerID2 {
t.Errorf("ProviderID mismatch %s != %s", providerID, providerID2)
}
providerID2, err = spi.DeleteMachine(ctx, cfg.MachineName, providerID, cfg.ProviderSpec, cfg.Secrets)
if err != nil {
t.Errorf("DeleteMachine failed with %s", err)
}
if providerID != providerID2 {
t.Errorf("ProviderID mismatch %s != %s", providerID, providerID2)
}
}
| [
"\"MCM_PROVIDER_VSPHERE_CONFIG\""
]
| []
| [
"MCM_PROVIDER_VSPHERE_CONFIG"
]
| [] | ["MCM_PROVIDER_VSPHERE_CONFIG"] | go | 1 | 0 | |
cmd/main.go | package main
import (
"fmt"
"os"
"path/filepath"
"github.com/argoproj-labs/argocd-notifications/cmd/tools"
argocert "github.com/argoproj/argo-cd/v2/util/cert"
"github.com/argoproj/notifications-engine/pkg/util/http"
"github.com/spf13/cobra"
)
func init() {
// resolve certificates using injected "argocd-tls-certs-cm" ConfigMap
http.SetCertResolver(argocert.GetCertificateForConnect)
}
func main() {
binaryName := filepath.Base(os.Args[0])
if val := os.Getenv("ARGOCD_NOTIFICATIONS_BINARY"); val != "" {
binaryName = val
}
var command *cobra.Command
switch binaryName {
case "argocd-notifications-backend":
command = &cobra.Command{
Use: "argocd-notifications-backend",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(newControllerCommand())
command.AddCommand(newBotCommand())
default:
command = tools.NewToolsCommand()
}
if err := command.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
| [
"\"ARGOCD_NOTIFICATIONS_BINARY\""
]
| []
| [
"ARGOCD_NOTIFICATIONS_BINARY"
]
| [] | ["ARGOCD_NOTIFICATIONS_BINARY"] | go | 1 | 0 | |
ipdbx/__main__.py | # Copyright (c) 2011-2016 Godefroid Chapelle and ipdb development team
#
# This file is part of ipdb-extended.
# GNU package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# GNU package is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
import os
import sys
from contextlib import contextmanager
__version__ = '1.0.5'
from IPython import get_ipython
from IPython.core.debugger import BdbQuit_excepthook
from IPython.terminal.ipapp import TerminalIPythonApp
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.debugger import Pdb
import configparser
shell = get_ipython()
if shell is None:
# Not inside IPython
# Build a terminal app in order to force ipython to load the
# configuration
ipapp = TerminalIPythonApp()
# Avoid output (banner, prints)
ipapp.interact = False
ipapp.initialize(['--no-term-title'])
shell = ipapp.shell
else:
# Running inside IPython
# Detect if embed shell or not and display a message
if isinstance(shell, InteractiveShellEmbed):
sys.stderr.write(
"\nYou are currently into an embedded ipython shell,\n"
"the configuration will not be loaded.\n\n"
)
# Let IPython decide about which debugger class to use
# This is especially important for tools that fiddle with stdout
debugger_cls = shell.debugger_cls
def _init_pdb(context=None, prebreak=None, commands=[]) -> Pdb:
if context is None:
context = os.getenv("IPDBX_CONTEXT_SIZE", get_context_from_config())
try:
p = debugger_cls(context=context)
except TypeError:
p = debugger_cls()
p: Pdb # probably TerminalPdb
# Interesting:
# p.postcmd(stop, line) # Hook method executed just after a command dispatch is finished.
# p.preloop(): Hook method executed once when the cmdloop() method is called.
# commands += [f"from rich.console import Console; con = Console(); con.print_exception(show_locals=True)"]
p.rcLines.extend(commands)
# TODO: use p.run() | p.runcall() | p.runeval().
# also checkout pdb.preloop, pdb._runscript
# support passing e.g. `function, arg0, arg1, kwarg='foo'` ?
_exec_prebreak(prebreak)
return p
def wrap_sys_excepthook():
# make sure we wrap it only once or we would end up with a cycle
# BdbQuit_excepthook.excepthook_ori == BdbQuit_excepthook
if sys.excepthook != BdbQuit_excepthook:
BdbQuit_excepthook.excepthook_ori = sys.excepthook
sys.excepthook = BdbQuit_excepthook
def wrap_sys_breakpointhook(*set_trace_args, **set_trace_kwargs):
if sys.breakpointhook.__module__ == 'sys':
if set_trace_args or set_trace_kwargs:
from functools import partial
set_trace_fn = partial(set_trace, *set_trace_args, **set_trace_kwargs)
else:
set_trace_fn = set_trace
sys.breakpointhook = set_trace_fn
print('wrapped sys.breakpointhook')
else:
print(f'sys.breakpointhook already patched: {sys.breakpointhook}')
def unwrap_sys_breakpointhook():
if sys.breakpointhook.__module__ == 'sys':
print(f'sys.breakpointhook already reset: {sys.breakpointhook}')
return
if sys.__breakpointhook__.__module__ != 'sys':
print('ERROR | ipdbx.unwrap_sys_breakpointhook() | "backup" sys.__breakpointhook__ is itself patched. Cannot unwrap.')
return
sys.breakpointhook = sys.__breakpointhook__
print('reset sys.breakpointhook')
def set_trace(frame=None, context=None, cond=True, prebreak=None):
if not cond:
return
wrap_sys_excepthook()
if frame is None:
frame = sys._getframe().f_back
p = _init_pdb(context, prebreak).set_trace(frame)
if p and hasattr(p, 'shell'):
p.shell.restore_sys_module_state()
def _exec_prebreak(prebreak=None):
"""Can handle a python file path, string representing a python statement, or a code object"""
# todo: support executing .ipy files
print('ipdbx _exec_prebreak(%s)' % repr(prebreak))
if prebreak is False:
# prebreak=False means explicitly not to run prebreak
return
prebreak = prebreak or os.getenv("IPDBX_PREBREAK", get_prebreak_from_config())
if prebreak is None:
return
try:
with open(prebreak, 'rb') as f:
exec(compile(f.read(), prebreak, 'exec'))
except FileNotFoundError:
try:
# either a string or a code object
exec(prebreak)
except TypeError:
print('ipdbx _exec_prebreak(): prebreak is not None but failed compilation and execution: ', repr(prebreak))
def get_prebreak_from_config():
"""`prebreak` field can be a python file path, or string representing a python statement"""
# todo: support multiple statements (list of strings?)
parser = get_config()
try:
prebreak = parser.get('ipdbx', 'prebreak')
print(f"ipdbx get_prebreak_from_config(): prebreak from {getattr(parser, 'filepath', parser)}: ", prebreak)
return prebreak
except (configparser.NoSectionError, configparser.NoOptionError) as e:
print('ipdbx get_prebreak_from_config(): NO prebreak from ', getattr(parser, 'filepath', parser))
return None
def get_context_from_config():
parser = get_config()
try:
return parser.getint("tool.ipdbx", "context")
except (configparser.NoSectionError, configparser.NoOptionError):
return 10
except ValueError:
value = parser.get("tool.ipdbx", "context")
raise ValueError(f"In {getattr(parser,'filepath',parser)}, context value [{value}] cannot be converted into an integer.")
class ConfigFile(object):
"""
Filehandle wrapper that adds a "[ipdbx]" section to the start of a config
file so that users don't actually have to manually add a [ipdbx] section.
Works with configparser versions from both Python 2 and 3
"""
def __init__(self, filepath):
self.first = True
with open(filepath) as f:
self.lines = f.readlines()
def __iter__(self):
return self
def __next__(self):
if self.first:
self.first = False
return "[ipdbx]\n"
if self.lines:
return self.lines.pop(0)
raise StopIteration
def get_config() -> configparser.ConfigParser:
"""
Get ipdbx config file settings.
All available config files are read. If settings are in multiple configs,
the last value encountered wins. Values specified on the command-line take
precedence over all config file settings.
Returns: A ConfigParser object.
"""
parser = configparser.ConfigParser()
filepaths = []
# Low priority goes first in the list
for cfg_file in ("setup.cfg", ".ipdbx", "pyproject.toml"):
cwd_filepath = os.path.join(os.getcwd(), cfg_file)
if os.path.isfile(cwd_filepath):
filepaths.append(cwd_filepath)
# Medium priority (whenever user wants to set a specific path to config file)
home = os.getenv("HOME")
if home:
default_filepath = os.path.join(home, ".ipdbx")
if os.path.isfile(default_filepath):
filepaths.append(default_filepath)
# High priority (default files)
env_filepath = os.getenv("IPDBX_CONFIG")
if env_filepath and os.path.isfile(env_filepath):
filepaths.append(env_filepath)
if filepaths:
for filepath in filepaths:
parser.filepath = filepath
# Users are expected to put an [ipdbx] section
# only if they use setup.cfg
if filepath.endswith('setup.cfg') or filepath.endswith('pyproject.toml'):
with open(filepath) as f:
parser.remove_section("ipdbx")
parser.read_file(f)
else:
parser.remove_section("tool.ipdbx")
parser.read_file(ConfigFile(filepath))
return parser
def post_mortem(tb=None):
wrap_sys_excepthook()
p = _init_pdb()
p.reset()
if tb is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
tb = sys.exc_info()[2]
if tb:
p.interaction(None, tb)
def pm():
post_mortem(sys.last_traceback)
def run(statement, globals=None, locals=None):
_init_pdb().run(statement, globals, locals)
def runcall(*args, **kwargs):
return _init_pdb().runcall(*args, **kwargs)
def runeval(expression, globals=None, locals=None):
return _init_pdb().runeval(expression, globals, locals)
@contextmanager
def launch_ipdb_on_exception():
try:
yield
except Exception:
e, m, tb = sys.exc_info()
print(m.__repr__(), file=sys.stderr)
post_mortem(tb)
finally:
pass
_usage = """\
usage: python -m ipdbx [-m] [-c COMMAND] [-h, --help] [-V, --version] [-p, --prebreak PREBREAK] pyfile [arg] ...
Debug the Python program given by pyfile.
Initial commands are read from .pdbrc files in your home directory
and in the current directory, if they exist. Commands supplied with
-c are executed after commands from .pdbrc files.
Looks for config files in the following order (last overruns first):
- cwd: 'setup.cfg', '.ipdbx'
- $HOME: '.ipdbx'
- $IPDBX_CONFIG
Config files support the following fields:
- context (number)
- prebreak
Supported env vars:
- IPDBX_CONFIG
- IPDBX_CONTEXT_SIZE
- IPDBX_PREBREAK
To let the script run until an exception occurs, use "-c continue".
To let the script run up to a given line X in the debugged file, use
"-c 'until X'"
Option -m is available only in Python 3.7 and later.
ipdbx version %s.""" % __version__
def main():
import traceback
import sys
import getopt
import os
import logging
logger = logging.Logger("root", level=logging.DEBUG)
logger.debug(f"ipdbx | main({', '.join(sys.argv[1:])})")
try:
from pdb import Restart
except ImportError:
class Restart(Exception):
pass
if sys.version_info >= (3, 7):
opts, args = getopt.getopt(sys.argv[1:], 'mhVp:c:', ['help', 'version', 'prebreak=', 'command='])
else:
opts, args = getopt.getopt(sys.argv[1:], 'hVp:c:', ['help', 'version', 'prebreak=', 'command='])
commands = []
prebreak = None
run_as_module = False
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-c', '--command']:
breakpoint()
commands.append(optarg)
elif opt in ['-p', '--prebreak']:
prebreak = optarg
elif opt in ['-V', '--version']:
print(f"ipdbx version: {__version__}")
sys.exit()
elif opt in ['-m']:
run_as_module = True
if not args:
print(_usage)
sys.exit(2)
mainpyfile = args[0] # Get script filename
if not run_as_module and not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv = args # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
if not run_as_module:
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = _init_pdb(prebreak=prebreak, commands=commands)
while 1:
try:
if run_as_module:
pdb._runmodule(mainpyfile)
else:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(sys.argv[1:]))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status: ", end='')
print(sys.exc_info()[1])
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print("Post mortem debugger finished. The " + mainpyfile +
" will be restarted")
if __name__ == '__main__':
main()
| []
| []
| [
"IPDBX_CONFIG",
"IPDBX_CONTEXT_SIZE",
"HOME",
"IPDBX_PREBREAK"
]
| [] | ["IPDBX_CONFIG", "IPDBX_CONTEXT_SIZE", "HOME", "IPDBX_PREBREAK"] | python | 4 | 0 | |
integration_tests/test_suites/celery-k8s-integration-test-suite/conftest.py | # pylint: disable=unused-import
import os
import docker
import pytest
from dagster_celery_k8s.launcher import CeleryK8sRunLauncher
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import build_and_tag_test_image, get_test_project_docker_image
from dagster_k8s_test_infra.cluster import ( # isort:skip
dagster_instance,
dagster_instance_for_user_deployments_subchart_disabled,
dagster_instance_for_daemon,
define_cluster_provider_fixture,
helm_postgres_url,
helm_postgres_url_for_user_deployments_subchart_disabled,
helm_postgres_url_for_daemon,
)
pytest_plugins = ["dagster_k8s_test_infra.helm"]
cluster_provider = define_cluster_provider_fixture()
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
@pytest.fixture(scope="session")
def dagster_docker_image():
docker_image = get_test_project_docker_image()
if not IS_BUILDKITE:
try:
client = docker.from_env()
client.images.get(docker_image)
print( # pylint: disable=print-call
"Found existing image tagged {image}, skipping image build. To rebuild, first run: "
"docker rmi {image}".format(image=docker_image)
)
except docker.errors.ImageNotFound:
build_and_tag_test_image(docker_image)
return docker_image
# See: https://stackoverflow.com/a/31526934/324449
def pytest_addoption(parser):
# We catch the ValueError to support cases where we are loading multiple test suites, e.g., in
# the VSCode test explorer. When pytest tries to add an option twice, we get, e.g.
#
# ValueError: option names {'--cluster-provider'} already added
# Use kind or some other cluster provider?
try:
parser.addoption("--cluster-provider", action="store", default="kind")
except ValueError:
pass
# Specify an existing kind cluster name to use
try:
parser.addoption("--kind-cluster", action="store")
except ValueError:
pass
# Keep resources around after tests are done
try:
parser.addoption("--no-cleanup", action="store_true", default=False)
except ValueError:
pass
# Use existing Helm chart/namespace
try:
parser.addoption("--existing-helm-namespace", action="store")
except ValueError:
pass
| []
| []
| [
"BUILDKITE"
]
| [] | ["BUILDKITE"] | python | 1 | 0 | |
Web Application Technologies and Django/Week 2/coursera/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'coursera.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
internal/db/dbutil/dbutil.go | package dbutil
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"fmt"
"net/url"
"os"
"strconv"
"time"
// Register driver
_ "github.com/lib/pq"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
bindata "github.com/golang-migrate/migrate/v4/source/go_bindata"
multierror "github.com/hashicorp/go-multierror"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/migrations"
log15 "gopkg.in/inconshreveable/log15.v2"
)
// Transaction calls f within a transaction, rolling back if any error is
// returned by the function.
func Transaction(ctx context.Context, db *sql.DB, f func(tx *sql.Tx) error) (err error) {
finish := func(tx *sql.Tx) {
if err != nil {
if err2 := tx.Rollback(); err2 != nil {
err = multierror.Append(err, err2)
}
return
}
err = tx.Commit()
}
span, ctx := opentracing.StartSpanFromContext(ctx, "Transaction")
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.SetTag("err", err.Error())
}
span.Finish()
}()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer finish(tx)
return f(tx)
}
// A DB captures the essential method of a sql.DB: QueryContext.
type DB interface {
QueryContext(ctx context.Context, q string, args ...interface{}) (*sql.Rows, error)
}
// A Tx captures the essential methods of a sql.Tx.
type Tx interface {
Rollback() error
Commit() error
}
// A TxBeginner captures BeginTx method of a sql.DB
type TxBeginner interface {
BeginTx(context.Context, *sql.TxOptions) (*sql.Tx, error)
}
// NewDB returns a new *sql.DB from the given dsn (data source name).
func NewDB(dsn, app string) (*sql.DB, error) {
cfg, err := url.Parse(dsn)
if err != nil {
return nil, errors.Wrap(err, "failed to parse dsn")
}
qry := cfg.Query()
// Force PostgreSQL session timezone to UTC.
qry.Set("timezone", "UTC")
// Force application name.
qry.Set("application_name", app)
// Set max open and idle connections
maxOpen, _ := strconv.Atoi(qry.Get("max_conns"))
if maxOpen == 0 {
maxOpen = 30
}
qry.Del("max_conns")
cfg.RawQuery = qry.Encode()
db, err := sql.Open("postgres", cfg.String())
if err != nil {
return nil, errors.Wrap(err, "failed to connect to database")
}
if err := db.Ping(); err != nil {
return nil, errors.Wrap(err, "failed to ping database")
}
db.SetMaxOpenConns(maxOpen)
db.SetMaxIdleConns(maxOpen)
db.SetConnMaxLifetime(time.Minute)
return db, nil
}
func NewMigrationSourceLoader(dataSource string) *bindata.AssetSource {
return bindata.Resource(migrations.AssetNames(), migrations.Asset)
}
func NewMigrate(db *sql.DB, dataSource string) (*migrate.Migrate, error) {
var cfg postgres.Config
driver, err := postgres.WithInstance(db, &cfg)
if err != nil {
return nil, err
}
d, err := bindata.WithInstance(NewMigrationSourceLoader(dataSource))
if err != nil {
return nil, err
}
m, err := migrate.NewWithInstance("go-bindata", d, "postgres", driver)
if err != nil {
return nil, err
}
// In case another process was faster and runs migrations, we will wait
// this long
m.LockTimeout = 5 * time.Minute
if os.Getenv("LOG_MIGRATE_TO_STDOUT") != "" {
m.Log = stdoutLogger{}
}
return m, nil
}
// DoMigrate runs all up migrations.
func DoMigrate(m *migrate.Migrate) (err error) {
err = m.Up()
if err == nil || err == migrate.ErrNoChange {
return nil
}
if os.IsNotExist(err) {
// This should only happen if the DB is ahead of the migrations available
version, dirty, verr := m.Version()
if verr != nil {
return verr
}
if dirty { // this shouldn't happen, but checking anyways
return err
}
log15.Warn("WARNING: Detected an old version of Sourcegraph. The database has migrated to a newer version. If you have applied a rollback, this is expected and you can ignore this warning. If not, please contact [email protected] for further assistance.", "db_version", version)
return nil
}
return err
}
type stdoutLogger struct{}
func (stdoutLogger) Printf(format string, v ...interface{}) {
fmt.Printf(format, v...)
}
func (logger stdoutLogger) Verbose() bool {
return true
}
// NullTime represents a time.Time that may be null. nullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, Time is set to the zero value.
type NullTime struct{ *time.Time }
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
*nt.Time, _ = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if nt.Time == nil {
return nil, nil
}
return *nt.Time, nil
}
// NullString represents a string that may be null. NullString implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, String is set to the zero value.
type NullString struct{ S *string }
// Scan implements the Scanner interface.
func (nt *NullString) Scan(value interface{}) error {
switch v := value.(type) {
case []byte:
*nt.S = string(v)
case string:
*nt.S = v
}
return nil
}
// Value implements the driver Valuer interface.
func (nt NullString) Value() (driver.Value, error) {
if nt.S == nil {
return nil, nil
}
return *nt.S, nil
}
// NullInt32 represents an int32 that may be null. NullInt32 implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, int32 is set to the zero value.
type NullInt32 struct{ N *int32 }
// Scan implements the Scanner interface.
func (n *NullInt32) Scan(value interface{}) error {
switch value := value.(type) {
case int64:
*n.N = int32(value)
case int32:
*n.N = value
case nil:
return nil
default:
return fmt.Errorf("value is not int64: %T", value)
}
return nil
}
// Value implements the driver Valuer interface.
func (n NullInt32) Value() (driver.Value, error) {
if n.N == nil {
return nil, nil
}
return *n.N, nil
}
// NullInt64 represents an int64 that may be null. NullInt64 implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, int64 is set to the zero value.
type NullInt64 struct{ N *int64 }
// Scan implements the Scanner interface.
func (n *NullInt64) Scan(value interface{}) error {
switch value := value.(type) {
case int64:
*n.N = value
case int32:
*n.N = int64(value)
case nil:
return nil
default:
return fmt.Errorf("value is not int64: %T", value)
}
return nil
}
// Value implements the driver Valuer interface.
func (n NullInt64) Value() (driver.Value, error) {
if n.N == nil {
return nil, nil
}
return *n.N, nil
}
// JSONInt64Set represents an int64 set as a JSONB object where the keys are
// the ids and the values are null. It implements the sql.Scanner interface so
// it can be used as a scan destination, similar to
// sql.NullString.
type JSONInt64Set struct{ Set *[]int64 }
// Scan implements the Scanner interface.
func (n *JSONInt64Set) Scan(value interface{}) error {
set := make(map[int64]*struct{})
switch value := value.(type) {
case nil:
case []byte:
if err := json.Unmarshal(value, &set); err != nil {
return err
}
default:
return fmt.Errorf("value is not []byte: %T", value)
}
if *n.Set == nil {
*n.Set = make([]int64, 0, len(set))
} else {
*n.Set = (*n.Set)[:0]
}
for id := range set {
*n.Set = append(*n.Set, id)
}
return nil
}
// Value implements the driver Valuer interface.
func (n JSONInt64Set) Value() (driver.Value, error) {
if n.Set == nil {
return nil, nil
}
return *n.Set, nil
}
func PostgresDSN(currentUser string, getenv func(string) string) string {
// PGDATASOURCE is a sourcegraph specific variable for just setting the DSN
if dsn := getenv("PGDATASOURCE"); dsn != "" {
return dsn
}
// TODO match logic in lib/pq
// https://sourcegraph.com/github.com/lib/pq@d6156e141ac6c06345c7c73f450987a9ed4b751f/-/blob/connector.go#L42
dsn := &url.URL{
Scheme: "postgres",
Host: "127.0.0.1:5432",
}
// Username preference: PGUSER, $USER, postgres
username := "postgres"
if currentUser != "" {
username = currentUser
}
if user := getenv("PGUSER"); user != "" {
username = user
}
if password := getenv("PGPASSWORD"); password != "" {
dsn.User = url.UserPassword(username, password)
} else {
dsn.User = url.User(username)
}
if host := getenv("PGHOST"); host != "" {
dsn.Host = host
}
if port := getenv("PGPORT"); port != "" {
dsn.Host += ":" + port
}
if db := getenv("PGDATABASE"); db != "" {
dsn.Path = db
}
if sslmode := getenv("PGSSLMODE"); sslmode != "" {
qry := dsn.Query()
qry.Set("sslmode", sslmode)
dsn.RawQuery = qry.Encode()
}
return dsn.String()
}
| [
"\"LOG_MIGRATE_TO_STDOUT\""
]
| []
| [
"LOG_MIGRATE_TO_STDOUT"
]
| [] | ["LOG_MIGRATE_TO_STDOUT"] | go | 1 | 0 | |
backend/config.py | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
""" Retain all config variables """
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
JWT_SECRET_KEY = os.environ.get("JWT_SECRET_KEY") or "you-will-never-guess-the-jwt-key"
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ["access", "refresh"]
MONGODB_SETTINGS = {
'db': 'phonebook',
'host': os.environ.get('MONGODB_HOST'),
'username': os.environ.get('MONGODB_USER'),
'password': os.environ.get('MONGODB_PASSWORD')
} | []
| []
| [
"MONGODB_PASSWORD",
"MONGODB_USER",
"MONGODB_HOST",
"SECRET_KEY",
"JWT_SECRET_KEY"
]
| [] | ["MONGODB_PASSWORD", "MONGODB_USER", "MONGODB_HOST", "SECRET_KEY", "JWT_SECRET_KEY"] | python | 5 | 0 | |
example/server.py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def main():
return "Hello world!"
if __name__ == "__main__":
app.run()
| []
| []
| []
| [] | [] | python | null | null | null |
flog.go | package main
import (
"compress/gzip"
"io"
"os"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"math/rand"
)
func Worker(option *Option, writer io.WriteCloser) {
x := os.Getenv("MAX_SLEEP")
maxSleep := 30
if x != "" {
var err error
if maxSleep, err = strconv.Atoi(x); err != nil {
fmt.Println("MAX_SLEEP: " + x + " is not valid")
os.Exit(1)
}
}
fmt.Println("using max sleep: " + strconv.Itoa(maxSleep))
var loc *time.Location
var err error
loc, err = time.LoadLocation("America/New_York")
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
for {
created := time.Now().In(loc)
log := NewLog(option.Format, created)
_, _ = writer.Write([]byte(log + "\n"))
rand.Seed(time.Now().UnixNano())
n := rand.Intn(maxSleep) // n will be between 0 and 10
time.Sleep(time.Duration(n)*time.Second)
}
}
// Generate generates the logs with given options
func Generate(option *Option) error {
logFileName := option.Output
writer, err := NewWriter(option.Type, logFileName)
if err != nil {
return err
}
go Worker(option, writer)
go Worker(option, writer)
go Worker(option, writer)
go Worker(option, writer)
for {
time.Sleep(time.Hour * 1)
}
return nil
}
// NewWriter returns a closeable writer corresponding to given log type
func NewWriter(logType string, logFileName string) (io.WriteCloser, error) {
switch logType {
case "stdout":
return os.Stdout, nil
case "log":
logFile, err := os.Create(logFileName)
if err != nil {
return nil, err
}
return logFile, nil
case "gz":
logFile, err := os.Create(logFileName)
if err != nil {
return nil, err
}
return gzip.NewWriter(logFile), nil
default:
return nil, nil
}
}
// NewLog creates a log for given format
func NewLog(format string, t time.Time) string {
switch format {
case "apache_common":
return NewApacheCommonLog(t)
case "apache_combined":
return NewApacheCombinedLog(t)
case "apache_error":
return NewApacheErrorLog(t)
case "rfc3164":
return NewRFC3164Log(t)
case "rfc5424":
return NewRFC5424Log(t)
case "common_log":
return NewCommonLogFormat(t)
case "json":
return NewJSONLogFormat(t)
default:
return ""
}
}
// NewSplitFileName creates a new file path with split count
func NewSplitFileName(path string, count int) string {
logFileNameExt := filepath.Ext(path)
pathWithoutExt := strings.TrimSuffix(path, logFileNameExt)
return pathWithoutExt + strconv.Itoa(count) + logFileNameExt
}
| [
"\"MAX_SLEEP\""
]
| []
| [
"MAX_SLEEP"
]
| [] | ["MAX_SLEEP"] | go | 1 | 0 | |
pkg/terraform/exec/vendor/github.com/hashicorp/terraform/communicator/ssh/provisioner.go | package ssh
import (
"bytes"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/terraform/communicator/shared"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/mapstructure"
"github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh/knownhosts"
)
const (
// DefaultUser is used if there is no user given
DefaultUser = "root"
// DefaultPort is used if there is no port given
DefaultPort = 22
// DefaultScriptPath is used as the path to copy the file to
// for remote execution if not provided otherwise.
DefaultScriptPath = "/tmp/terraform_%RAND%.sh"
// DefaultTimeout is used if there is no timeout given
DefaultTimeout = 5 * time.Minute
)
// connectionInfo is decoded from the ConnInfo of the resource. These are the
// only keys we look at. If a PrivateKey is given, that is used instead
// of a password.
type connectionInfo struct {
User string
Password string
PrivateKey string `mapstructure:"private_key"`
Certificate string `mapstructure:"certificate"`
Host string
HostKey string `mapstructure:"host_key"`
Port int
Agent bool
Timeout string
ScriptPath string `mapstructure:"script_path"`
TimeoutVal time.Duration `mapstructure:"-"`
BastionUser string `mapstructure:"bastion_user"`
BastionPassword string `mapstructure:"bastion_password"`
BastionPrivateKey string `mapstructure:"bastion_private_key"`
BastionHost string `mapstructure:"bastion_host"`
BastionHostKey string `mapstructure:"bastion_host_key"`
BastionPort int `mapstructure:"bastion_port"`
AgentIdentity string `mapstructure:"agent_identity"`
}
// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into
// a ConnectionInfo struct
func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) {
connInfo := &connectionInfo{}
decConf := &mapstructure.DecoderConfig{
WeaklyTypedInput: true,
Result: connInfo,
}
dec, err := mapstructure.NewDecoder(decConf)
if err != nil {
return nil, err
}
if err := dec.Decode(s.Ephemeral.ConnInfo); err != nil {
return nil, err
}
// To default Agent to true, we need to check the raw string, since the
// decoded boolean can't represent "absence of config".
//
// And if SSH_AUTH_SOCK is not set, there's no agent to connect to, so we
// shouldn't try.
if s.Ephemeral.ConnInfo["agent"] == "" && os.Getenv("SSH_AUTH_SOCK") != "" {
connInfo.Agent = true
}
if connInfo.User == "" {
connInfo.User = DefaultUser
}
// Format the host if needed.
// Needed for IPv6 support.
connInfo.Host = shared.IpFormat(connInfo.Host)
if connInfo.Port == 0 {
connInfo.Port = DefaultPort
}
if connInfo.ScriptPath == "" {
connInfo.ScriptPath = DefaultScriptPath
}
if connInfo.Timeout != "" {
connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout)
} else {
connInfo.TimeoutVal = DefaultTimeout
}
// Default all bastion config attrs to their non-bastion counterparts
if connInfo.BastionHost != "" {
// Format the bastion host if needed.
// Needed for IPv6 support.
connInfo.BastionHost = shared.IpFormat(connInfo.BastionHost)
if connInfo.BastionUser == "" {
connInfo.BastionUser = connInfo.User
}
if connInfo.BastionPassword == "" {
connInfo.BastionPassword = connInfo.Password
}
if connInfo.BastionPrivateKey == "" {
connInfo.BastionPrivateKey = connInfo.PrivateKey
}
if connInfo.BastionPort == 0 {
connInfo.BastionPort = connInfo.Port
}
}
return connInfo, nil
}
// safeDuration returns either the parsed duration or a default value
func safeDuration(dur string, defaultDur time.Duration) time.Duration {
d, err := time.ParseDuration(dur)
if err != nil {
log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur)
return defaultDur
}
return d
}
// prepareSSHConfig is used to turn the *ConnectionInfo provided into a
// usable *SSHConfig for client initialization.
func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) {
sshAgent, err := connectToAgent(connInfo)
if err != nil {
return nil, err
}
host := fmt.Sprintf("%s:%d", connInfo.Host, connInfo.Port)
sshConf, err := buildSSHClientConfig(sshClientConfigOpts{
user: connInfo.User,
host: host,
privateKey: connInfo.PrivateKey,
password: connInfo.Password,
hostKey: connInfo.HostKey,
certificate: connInfo.Certificate,
sshAgent: sshAgent,
})
if err != nil {
return nil, err
}
connectFunc := ConnectFunc("tcp", host)
var bastionConf *ssh.ClientConfig
if connInfo.BastionHost != "" {
bastionHost := fmt.Sprintf("%s:%d", connInfo.BastionHost, connInfo.BastionPort)
bastionConf, err = buildSSHClientConfig(sshClientConfigOpts{
user: connInfo.BastionUser,
host: bastionHost,
privateKey: connInfo.BastionPrivateKey,
password: connInfo.BastionPassword,
hostKey: connInfo.HostKey,
sshAgent: sshAgent,
})
if err != nil {
return nil, err
}
connectFunc = BastionConnectFunc("tcp", bastionHost, bastionConf, "tcp", host)
}
config := &sshConfig{
config: sshConf,
connection: connectFunc,
sshAgent: sshAgent,
}
return config, nil
}
type sshClientConfigOpts struct {
privateKey string
password string
sshAgent *sshAgent
certificate string
user string
host string
hostKey string
}
func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) {
hkCallback := ssh.InsecureIgnoreHostKey()
if opts.hostKey != "" {
// The knownhosts package only takes paths to files, but terraform
// generally wants to handle config data in-memory. Rather than making
// the known_hosts file an exception, write out the data to a temporary
// file to create the HostKeyCallback.
tf, err := ioutil.TempFile("", "tf-known_hosts")
if err != nil {
return nil, fmt.Errorf("failed to create temp known_hosts file: %s", err)
}
defer tf.Close()
defer os.RemoveAll(tf.Name())
// we mark this as a CA as well, but the host key fallback will still
// use it as a direct match if the remote host doesn't return a
// certificate.
if _, err := tf.WriteString(fmt.Sprintf("@cert-authority %s %s\n", opts.host, opts.hostKey)); err != nil {
return nil, fmt.Errorf("failed to write temp known_hosts file: %s", err)
}
tf.Sync()
hkCallback, err = knownhosts.New(tf.Name())
if err != nil {
return nil, err
}
}
conf := &ssh.ClientConfig{
HostKeyCallback: hkCallback,
User: opts.user,
}
if opts.privateKey != "" {
if opts.certificate != "" {
log.Println("using client certificate for authentication")
certSigner, err := signCertWithPrivateKey(opts.privateKey, opts.certificate)
if err != nil {
return nil, err
}
conf.Auth = append(conf.Auth, certSigner)
} else {
log.Println("using private key for authentication")
pubKeyAuth, err := readPrivateKey(opts.privateKey)
if err != nil {
return nil, err
}
conf.Auth = append(conf.Auth, pubKeyAuth)
}
}
if opts.password != "" {
conf.Auth = append(conf.Auth, ssh.Password(opts.password))
conf.Auth = append(conf.Auth, ssh.KeyboardInteractive(
PasswordKeyboardInteractive(opts.password)))
}
if opts.sshAgent != nil {
conf.Auth = append(conf.Auth, opts.sshAgent.Auth())
}
return conf, nil
}
// Create a Cert Signer and return ssh.AuthMethod
func signCertWithPrivateKey(pk string, certificate string) (ssh.AuthMethod, error) {
rawPk, err := ssh.ParseRawPrivateKey([]byte(pk))
if err != nil {
return nil, fmt.Errorf("failed to parse private key %q: %s", pk, err)
}
pcert, _, _, _, err := ssh.ParseAuthorizedKey([]byte(certificate))
if err != nil {
return nil, fmt.Errorf("failed to parse certificate %q: %s", certificate, err)
}
usigner, err := ssh.NewSignerFromKey(rawPk)
if err != nil {
return nil, fmt.Errorf("failed to create signer from raw private key %q: %s", rawPk, err)
}
ucertSigner, err := ssh.NewCertSigner(pcert.(*ssh.Certificate), usigner)
if err != nil {
return nil, fmt.Errorf("failed to create cert signer %q: %s", usigner, err)
}
return ssh.PublicKeys(ucertSigner), nil
}
func readPrivateKey(pk string) (ssh.AuthMethod, error) {
// We parse the private key on our own first so that we can
// show a nicer error if the private key has a password.
block, _ := pem.Decode([]byte(pk))
if block == nil {
return nil, errors.New("Failed to read ssh private key: no key found")
}
if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
return nil, errors.New(
"Failed to read ssh private key: password protected keys are\n" +
"not supported. Please decrypt the key prior to use.")
}
signer, err := ssh.ParsePrivateKey([]byte(pk))
if err != nil {
return nil, fmt.Errorf("Failed to parse ssh private key: %s", err)
}
return ssh.PublicKeys(signer), nil
}
func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) {
if connInfo.Agent != true {
// No agent configured
return nil, nil
}
agent, conn, err := sshagent.New()
if err != nil {
return nil, err
}
// connection close is handled over in Communicator
return &sshAgent{
agent: agent,
conn: conn,
id: connInfo.AgentIdentity,
}, nil
}
// A tiny wrapper around an agent.Agent to expose the ability to close its
// associated connection on request.
type sshAgent struct {
agent agent.Agent
conn net.Conn
id string
}
func (a *sshAgent) Close() error {
if a.conn == nil {
return nil
}
return a.conn.Close()
}
// make an attempt to either read the identity file or find a corresponding
// public key file using the typical openssh naming convention.
// This returns the public key in wire format, or nil when a key is not found.
func findIDPublicKey(id string) []byte {
for _, d := range idKeyData(id) {
signer, err := ssh.ParsePrivateKey(d)
if err == nil {
log.Println("[DEBUG] parsed id private key")
pk := signer.PublicKey()
return pk.Marshal()
}
// try it as a publicKey
pk, err := ssh.ParsePublicKey(d)
if err == nil {
log.Println("[DEBUG] parsed id public key")
return pk.Marshal()
}
// finally try it as an authorized key
pk, _, _, _, err = ssh.ParseAuthorizedKey(d)
if err == nil {
log.Println("[DEBUG] parsed id authorized key")
return pk.Marshal()
}
}
return nil
}
// Try to read an id file using the id as the file path. Also read the .pub
// file if it exists, as the id file may be encrypted. Return only the file
// data read. We don't need to know what data came from which path, as we will
// try parsing each as a private key, a public key and an authorized key
// regardless.
func idKeyData(id string) [][]byte {
idPath, err := filepath.Abs(id)
if err != nil {
return nil
}
var fileData [][]byte
paths := []string{idPath}
if !strings.HasSuffix(idPath, ".pub") {
paths = append(paths, idPath+".pub")
}
for _, p := range paths {
d, err := ioutil.ReadFile(p)
if err != nil {
log.Printf("[DEBUG] error reading %q: %s", p, err)
continue
}
log.Printf("[DEBUG] found identity data at %q", p)
fileData = append(fileData, d)
}
return fileData
}
// sortSigners moves a signer with an agent comment field matching the
// agent_identity to the head of the list when attempting authentication. This
// helps when there are more keys loaded in an agent than the host will allow
// attempts.
func (s *sshAgent) sortSigners(signers []ssh.Signer) {
if s.id == "" || len(signers) < 2 {
return
}
// if we can locate the public key, either by extracting it from the id or
// locating the .pub file, then we can more easily determine an exact match
idPk := findIDPublicKey(s.id)
// if we have a signer with a connect field that matches the id, send that
// first, otherwise put close matches at the front of the list.
head := 0
for i := range signers {
pk := signers[i].PublicKey()
k, ok := pk.(*agent.Key)
if !ok {
continue
}
// check for an exact match first
if bytes.Equal(pk.Marshal(), idPk) || s.id == k.Comment {
signers[0], signers[i] = signers[i], signers[0]
break
}
// no exact match yet, move it to the front if it's close. The agent
// may have loaded as a full filepath, while the config refers to it by
// filename only.
if strings.HasSuffix(k.Comment, s.id) {
signers[head], signers[i] = signers[i], signers[head]
head++
continue
}
}
ss := []string{}
for _, signer := range signers {
pk := signer.PublicKey()
k := pk.(*agent.Key)
ss = append(ss, k.Comment)
}
}
func (s *sshAgent) Signers() ([]ssh.Signer, error) {
signers, err := s.agent.Signers()
if err != nil {
return nil, err
}
s.sortSigners(signers)
return signers, nil
}
func (a *sshAgent) Auth() ssh.AuthMethod {
return ssh.PublicKeysCallback(a.Signers)
}
func (a *sshAgent) ForwardToAgent(client *ssh.Client) error {
return agent.ForwardToAgent(client, a.agent)
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
third_party/github.com/syndtr/goleveldb/leveldb/storage_test.go | // Copyright (c) 2012, Suryandaru Triandana <[email protected]>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENE file.
package leveldb
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"testing"
"github.com/borgenk/qdo/third_party/github.com/syndtr/goleveldb/leveldb/storage"
"github.com/borgenk/qdo/third_party/github.com/syndtr/goleveldb/leveldb/util"
)
const typeShift = 3
var (
tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument")
tsErrFileOpen = errors.New("leveldb.testStorage: file still open")
)
var (
tsFSEnv = os.Getenv("GOLEVELDB_USEFS")
tsKeepFS = tsFSEnv == "2"
tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
tsMU = &sync.Mutex{}
tsNum = 0
)
type tsLock struct {
ts *testStorage
r util.Releaser
}
func (l tsLock) Release() {
l.r.Release()
l.ts.t.Log("I: storage lock released")
}
type tsReader struct {
tf tsFile
storage.Reader
}
func (tr tsReader) Read(b []byte) (n int, err error) {
ts := tr.tf.ts
ts.countRead(tr.tf.Type())
n, err = tr.Reader.Read(b)
if err != nil && err != io.EOF {
ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err)
}
return
}
func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) {
ts := tr.tf.ts
ts.countRead(tr.tf.Type())
n, err = tr.Reader.ReadAt(b, off)
if err != nil && err != io.EOF {
ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err)
}
return
}
func (tr tsReader) Close() (err error) {
err = tr.Reader.Close()
tr.tf.close("reader", err)
return
}
type tsWriter struct {
tf tsFile
storage.Writer
}
func (tw tsWriter) Write(b []byte) (n int, err error) {
ts := tw.tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
if ts.emuWriteErr&tw.tf.Type() != 0 {
return 0, errors.New("leveldb.testStorage: emulated write error")
}
n, err = tw.Writer.Write(b)
if err != nil {
ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
}
return
}
func (tw tsWriter) Sync() (err error) {
ts := tw.tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
for ts.emuDelaySync&tw.tf.Type() != 0 {
ts.cond.Wait()
}
if ts.emuSyncErr&tw.tf.Type() != 0 {
return errors.New("leveldb.testStorage: emulated sync error")
}
err = tw.Writer.Sync()
if err != nil {
ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
}
return
}
func (tw tsWriter) Close() (err error) {
err = tw.Writer.Close()
tw.tf.close("reader", err)
return
}
type tsFile struct {
ts *testStorage
storage.File
}
func (tf tsFile) x() uint64 {
return tf.Num()<<typeShift | uint64(tf.Type())
}
func (tf tsFile) checkOpen(m string) error {
ts := tf.ts
if writer, ok := ts.opens[tf.x()]; ok {
if writer {
ts.t.Errorf("E: cannot %s file, num=%d type=%v: a writer still open", m, tf.Num(), tf.Type())
} else {
ts.t.Errorf("E: cannot %s file, num=%d type=%v: a reader still open", m, tf.Num(), tf.Type())
}
return tsErrFileOpen
}
return nil
}
func (tf tsFile) close(m string, err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
if _, ok := ts.opens[tf.x()]; !ok {
ts.t.Errorf("E: %s: redudant file closing, num=%d type=%v", m, tf.Num(), tf.Type())
} else if err == nil {
ts.t.Logf("I: %s: file closed, num=%d type=%v", m, tf.Num(), tf.Type())
}
delete(ts.opens, tf.x())
if err != nil {
ts.t.Errorf("E: %s: cannot close file, num=%d type=%v: %v", m, tf.Num(), tf.Type(), err)
}
}
func (tf tsFile) Open() (r storage.Reader, err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
err = tf.checkOpen("open")
if err != nil {
return
}
r, err = tf.File.Open()
if err != nil {
ts.t.Errorf("E: cannot open file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
} else {
ts.t.Logf("I: file opened, num=%d type=%v", tf.Num(), tf.Type())
ts.opens[tf.x()] = false
r = tsReader{tf, r}
}
return
}
func (tf tsFile) Create() (w storage.Writer, err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
err = tf.checkOpen("create")
if err != nil {
return
}
w, err = tf.File.Create()
if err != nil {
ts.t.Errorf("E: cannot create file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
} else {
ts.t.Logf("I: file created, num=%d type=%v", tf.Num(), tf.Type())
ts.opens[tf.x()] = true
w = tsWriter{tf, w}
}
return
}
func (tf tsFile) Remove() (err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
err = tf.checkOpen("remove")
if err != nil {
return
}
err = tf.File.Remove()
if err != nil {
ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
} else {
ts.t.Logf("I: file removed, num=%d type=%v", tf.Num(), tf.Type())
}
return
}
type testStorage struct {
t *testing.T
storage.Storage
closeFn func() error
mu sync.Mutex
cond sync.Cond
// Open files, true=writer, false=reader
opens map[uint64]bool
emuDelaySync storage.FileType
emuWriteErr storage.FileType
emuSyncErr storage.FileType
readCnt uint64
readCntEn storage.FileType
}
func (ts *testStorage) DelaySync(t storage.FileType) {
ts.mu.Lock()
ts.emuDelaySync |= t
ts.cond.Broadcast()
ts.mu.Unlock()
}
func (ts *testStorage) ReleaseSync(t storage.FileType) {
ts.mu.Lock()
ts.emuDelaySync &= ^t
ts.cond.Broadcast()
ts.mu.Unlock()
}
func (ts *testStorage) SetWriteErr(t storage.FileType) {
ts.mu.Lock()
ts.emuWriteErr = t
ts.mu.Unlock()
}
func (ts *testStorage) SetSyncErr(t storage.FileType) {
ts.mu.Lock()
ts.emuSyncErr = t
ts.mu.Unlock()
}
func (ts *testStorage) ReadCounter() uint64 {
ts.mu.Lock()
defer ts.mu.Unlock()
return ts.readCnt
}
func (ts *testStorage) ResetReadCounter() {
ts.mu.Lock()
ts.readCnt = 0
ts.mu.Unlock()
}
func (ts *testStorage) SetReadCounter(t storage.FileType) {
ts.mu.Lock()
ts.readCntEn = t
ts.mu.Unlock()
}
func (ts *testStorage) countRead(t storage.FileType) {
ts.mu.Lock()
if ts.readCntEn&t != 0 {
ts.readCnt++
}
ts.mu.Unlock()
}
func (ts *testStorage) Lock() (r util.Releaser, err error) {
r, err = ts.Storage.Lock()
if err != nil {
ts.t.Logf("W: storage locking failed: %v", err)
} else {
ts.t.Log("I: storage locked")
r = tsLock{ts, r}
}
return
}
func (ts *testStorage) Log(str string) {
ts.t.Log("L: " + str)
ts.Storage.Log(str)
}
func (ts *testStorage) GetFile(num uint64, t storage.FileType) storage.File {
return tsFile{ts, ts.Storage.GetFile(num, t)}
}
func (ts *testStorage) GetFiles(t storage.FileType) (ff []storage.File, err error) {
ff0, err := ts.Storage.GetFiles(t)
if err != nil {
ts.t.Errorf("E: get files failed: %v", err)
return
}
ff = make([]storage.File, len(ff0))
for i, f := range ff0 {
ff[i] = tsFile{ts, f}
}
ts.t.Logf("I: get files, type=0x%x count=%d", int(t), len(ff))
return
}
func (ts *testStorage) GetManifest() (f storage.File, err error) {
f0, err := ts.Storage.GetManifest()
if err != nil {
if !os.IsNotExist(err) {
ts.t.Errorf("E: get manifest failed: %v", err)
}
return
}
f = tsFile{ts, f0}
ts.t.Logf("I: get manifest, num=%d", f.Num())
return
}
func (ts *testStorage) SetManifest(f storage.File) error {
tf, ok := f.(tsFile)
if !ok {
ts.t.Error("E: set manifest failed: type assertion failed")
return tsErrInvalidFile
} else if tf.Type() != storage.TypeManifest {
ts.t.Errorf("E: set manifest failed: invalid file type: %s", tf.Type())
return tsErrInvalidFile
}
err := ts.Storage.SetManifest(tf.File)
if err != nil {
ts.t.Errorf("E: set manifest failed: %v", err)
} else {
ts.t.Logf("I: set manifest, num=%d", tf.Num())
}
return err
}
func (ts *testStorage) Close() error {
ts.CloseCheck()
err := ts.Storage.Close()
if err != nil {
ts.t.Errorf("E: closing storage failed: %v", err)
} else {
ts.t.Log("I: storage closed")
}
if ts.closeFn != nil {
if err := ts.closeFn(); err != nil {
ts.t.Errorf("E: close function: %v", err)
}
}
return err
}
func (ts *testStorage) CloseCheck() {
ts.mu.Lock()
if len(ts.opens) == 0 {
ts.t.Log("I: all files are closed")
} else {
ts.t.Errorf("E: %d files still open", len(ts.opens))
for x, writer := range ts.opens {
num, tt := x>>typeShift, storage.FileType(x)&storage.TypeAll
ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer)
}
}
ts.mu.Unlock()
}
func newTestStorage(t *testing.T) *testStorage {
var stor storage.Storage
var closeFn func() error
if tsFS {
for {
tsMU.Lock()
num := tsNum
tsNum++
tsMU.Unlock()
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
if _, err := os.Stat(path); err != nil {
stor, err = storage.OpenFile(path)
if err != nil {
t.Fatalf("F: cannot create storage: %v", err)
}
t.Logf("I: storage created: %s", path)
closeFn = func() error {
for _, name := range []string{"LOG.old", "LOG"} {
f, err := os.Open(filepath.Join(path, name))
if err != nil {
continue
}
if log, err := ioutil.ReadAll(f); err != nil {
t.Logf("---------------------- %s ----------------------", name)
t.Logf("cannot read log: %v", err)
t.Logf("---------------------- %s ----------------------", name)
} else if len(log) > 0 {
t.Logf("---------------------- %s ----------------------\n%s", name, string(log))
t.Logf("---------------------- %s ----------------------", name)
}
f.Close()
}
if tsKeepFS {
return nil
}
return os.RemoveAll(path)
}
break
}
}
} else {
stor = storage.NewMemStorage()
}
ts := &testStorage{
t: t,
Storage: stor,
closeFn: closeFn,
opens: make(map[uint64]bool),
}
ts.cond.L = &ts.mu
return ts
}
| [
"\"GOLEVELDB_USEFS\""
]
| []
| [
"GOLEVELDB_USEFS"
]
| [] | ["GOLEVELDB_USEFS"] | go | 1 | 0 | |
ostap/utils/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file
# Module with some simple but useful utilities for
# - timing
# - memory
# - profiling
# - ...
# @author Vanya BELYAEV [email protected]
# @date 2013-02-10
#
# =============================================================================
"""Module with some simple but useful utilities for
- timing
- memory
- profiling
- etc
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV [email protected]"
__date__ = "2013-02-10"
# =============================================================================
__all__ = (
#
'virtualMemory' , ## context manager to count virtual memory increase
'memory' , ## ditto
'timing' , ## context manager to count time
'timer' , ## ditto
'profiler' , ## context manager to perform profiling
'rootException' , ## context manager to perform ROOT Error -> C++/Python exception
#
'Profiler' , ## context manager to perform profiling
'RootError2Exception', ## context manager to perform ROOT Error -> C++/Python exception
##
'takeIt' , ## take and later delete ...
'isatty' , ## is the stream ``isatty'' ?
'with_ipython' , ## do we run IPython?
##
'batch' , ## context manager to keep/force certain ROOT ``batch''-mode
##
'keepCanvas' , ## context manager to keep the current ROOT canvas
'invisibleCanvas' , ## context manager to use the invisible current ROOT canvas
##
'keepArgs' , ## context manager to keep sys.argv
##
'keepCWD' , ## context manager to keep current working directory
##
'implicitMT' , ## context manager to enable/disable implicit MT in ROOT
##
'Batch' , ## context manager to keep ROOT ``batch''-mode
##
'KeepCanvas' , ## context manager to keep the current ROOT canvas
'InvisibleCanvas' , ## context manager to use the invisible current ROOT canvas
##
'KeepArgs' , ## context manager to keep sys.argv
##
'Wait' , ## conitext manager to wait soem tiem bvefore and/or after action
##
'wait' , ## conitext manager to wait soem tiem bvefore and/or after action
##
'ImplicitMT' , ## context manager to enable/disable implicit MT in ROOT
##
'counted' , ## decorator to create 'counted'-function
##
'cmd_exists' , ## check the existence of the certain command/executable
##
'which' , ## which command (from shutil)
##
'gen_password' , ## generate password/secret
##
'vrange' , ## helper loop over values between xmin and xmax
##
'log_range' , ## helper loop over values between xmin and xmax in log
##
'lrange' , ## helper loop over values between xmin and xmax in log
##
'split_range' , ## helper generator to splti large range into smaller chunks
##
'chunked' , ## break *iterable* into chunks of length *n*:
'divide' , ## divide the elements from *iterable* into *n* parts
'grouper' , ## collect data into fixed-length chunks or blocks"
##
'make_iterable' , ## create infinite or finite iterable
##
'checksum_files' , ## get SHA512 sum for sequence of files
##
'balanced' , ## Simple utility to check balanced parenthesis/brackets, etc...
##
'random_name' , ## get some random name
'short_hash_name' , ## get some short hash name
##
'choices' , ## `random.choices` function
##
'memoize' , ## Simple lightweight unbounded cache
'absproperty' , ## abstract property decorator
'classprop' , ## class property decorator
'numcalls' , ## decoratro for #ncalls
##
'hadd' , ## merge ROOT files using command `hadd`
)
# =============================================================================
import ROOT, time, os , sys, math, time, functools, abc, random ## attention here!!
from builtins import range
from itertools import repeat, chain, islice
# =============================================================================
from sys import version_info as python_version
## timing stuff
from ostap.utils.timing import timing, timer
## other useful stuff
from ostap.utils.basic import isatty, with_ipython
from ostap.core.ostap_types import integer_types
## ... and more useful stuff
from ostap.utils.memory import memory, virtualMemory, Memory
# =============================================================================
try :
from string import ascii_letters, digits
except ImportError :
from string import letters as ascii_letters
from string import digits
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger( 'ostap.utils.utils' )
else : logger = getLogger( __name__ )
del getLogger
# =============================================================================
## symbols for name generation
all_symbols = ascii_letters + digits
# =============================================================================
## @class Profiler
# Very simple profiler, based on cProfile module
# @see https://docs.python.org/2/library/profile.html
# @code
# with profiler() :
# ... some code here ...
# with profiler('output.file') :
# ... some code here ...
# @endcode
# @author Vanya Belyaev [email protected]
# @date 2016-07-25
class Profiler(object) :
"""Very simple profiler, based on cProfile module
- see https://docs.python.org/2/library/profile.html
with profiler() :
#
# ... some code here ...
#
with profiler( 'output.file' ) :
#
# ... some code here ...
#
"""
def __init__ ( self , fname = '' ) :
self.fname = fname
## enter the context
def __enter__ ( self ) :
import cProfile as profile
self._profile = profile.Profile()
self._profile.enable()
return self
## exit the context
def __exit__ ( self , *_ ) :
## end of profiling
self._profile.disable()
import pstats
if self.fname :
try :
with open ( self.fname , 'w' ) as out :
stat = pstats.Stats( self._profile , stream = out ).sort_stats( 'cumulative' )
stat.print_stats()
del self._profile
return
except : pass
## show on screen
stat = pstats.Stats( self._profile ).sort_stats( 'cumulative' )
stat.print_stats()
del self._profile
# =============================================================================
## Very simple profiler, based on cProfile module
# @see https://docs.python.org/2/library/profile.html
# @code
# with profiler() :
# ... some code here ...
# @endcode
# @author Vanya Belyaev [email protected]
# @date 2016-07-25
def profiler( name = '' ) :
"""Very simple profiler, based on cProfile module
- see https://docs.python.org/2/library/profile.html
with profiler() :
#
# ... some code here ...
#
"""
return Profiler ( name )
# =============================================================================
## @class NoContext
# Fake empty context manager to be used as empty placeholder
# @code
# with NoContext() :
# ... do_something()
# @endcode
# @author Vanya BELYAEV [email protected]
# date 2013-01-12
class NoContext(object) :
"""Fake (empty) context manager to be used as empty placeholder
>>> with NoContext() :
... do_something()
"""
def __init__ ( self , *args , **kwargs ) : pass
## context manager
def __enter__ ( self ) : return self
## context manager
def __exit__ ( self , *args ) : pass
# =============================================================================
## @class TakeIt
# Take some object, keep it and delete at the exit
# @author Vanya BELYAEV [email protected]
# date 2014-08-03
class TakeIt(object):
"""Take some object, keep it and delete at the exit
>>> ds = dataset.reduce('pt>1')
>>> with takeIt ( ds ) :
...
"""
def __init__ ( self , other ) :
self.other = other
def __enter__ ( self ) :
ROOT.SetOwnership ( self.other , True )
return self.other
def __exit__ ( self , *args ) :
o = self.other
## delete it!
del self.other
if o and hasattr ( o , 'reset' ) : o.reset ()
if o and hasattr ( o , 'Reset' ) : o.Reset ()
if o and hasattr ( o , 'Delete' ) : o.Delete ()
if o : del o
# =============================================================================
## Take some object, keep it and delete at the exit
# @author Vanya BELYAEV [email protected]
# date 2014-08-03
def takeIt ( other ):
"""Take some object, keep it and delete at the exit
>>> ds = dataset.reduce('pt>1')
>>> with takeIt ( ds ) :
...
"""
return TakeIt ( other )
# =============================================================================
## get all open file descriptors
# The actual code is copied from http://stackoverflow.com/a/13624412
def get_open_fds():
"""Get all open file descriptors
The actual code is copied from http://stackoverflow.com/a/13624412
"""
#
import resource
import fcntl
#
fds = []
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
for fd in range(0, soft):
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fds.append(fd)
return fds
# =============================================================================
## get the actual file name form file descriptor
# The actual code is copied from http://stackoverflow.com/a/13624412
# @warning: it is likely to be "Linux-only" function
def get_file_names_from_file_number(fds):
"""Get the actual file name from file descriptor
The actual code is copied from http://stackoverflow.com/a/13624412
"""
names = []
for fd in fds:
names.append(os.readlink('/proc/self/fd/%d' % fd))
return names
# =============================================================================
## helper context manager to activate ROOT Error -> Python exception converter
# @see Ostap::Utils::useErrorHandler
# @see Ostap::Utils::ErrorSentry
# @code
# with RootError2Exception() :
# .... do something here
# @endcode
class RootError2Exception (object) :
"""Helper context manager to activate ROOT Error -> Python exception converter
#
with RootError2Exception() :
... do something here
"""
def __init__ ( self ) :
import ROOT,cppyy
Ostap = cppyy.gbl.Ostap
self.e_handler = Ostap.Utils.useErrorHandler
self.m_previous = False
## context manager entry point
def __enter__ ( self ) :
self.m_previous = self.e_handler ( True )
return self
## context manager exit point
def __exit__ ( self , *_ ) :
if self.m_previous : self.e_handler ( False )
self.m_previous = False
def __del__ ( self ) :
if self.m_previous : self.e_handler ( False )
# =============================================================================
## helper context manager to activate ROOT Error -> Python exception converter
# @see Ostap::Utils::useErrorHandler
# @see Ostap::Utils::ErrorSentry
# @code
# with rootException () :
# .... do something here
# @endcode
def rootException () :
"""Helper context manager to activate ROOT Error -> Python exception converter
#
with rootException() :
... do something here
"""
return RootError2Exception()
# =============================================================================
## context manager to keep ROOT ``batch'' state
# @code
# with Batch() :
# ... do something here
# @endcode
class Batch(object) :
"""Context manager to keep ROOT ``batch'' state
>>> with Batch() :
... do something here
"""
def __init__ ( self , batch = True ) :
self.__batch = batch
## contex manahger: ENTER
def __enter__ ( self ) :
import ROOT
groot = ROOT.ROOT.GetROOT()
self.old_state = groot.IsBatch()
if self.old_state != self.__batch : groot.SetBatch ( self.__batch )
return self
## contex manager: EXIT
def __exit__ ( self , *_ ) :
import ROOT
groot = ROOT.ROOT.GetROOT()
if self.old_state != groot.IsBatch() : groot.SetBatch( self.old_state )
# =============================================================================
## context manager to keep ROOT ``batch'' state
# @code
# with batch() :
# ... do something here
# @endcode
def batch( batch = True ) :
"""Context manager to keep ROOT ``batch'' state
>>> with batch() :
... do something here
"""
return Batch ( batch )
# =============================================================================
## context manager to keep the current working directory
# @code
# with KeepCWD ( new_dir ) :
# ....
# @endcode
# - No action if no directory is specified
class KeepCWD(object) :
"""context manager to keep the current working directory
>>> with KeepCWD( new_dir ) :
...
- No action if no directory is specified
"""
def __init__ ( self , new_dir = '' ) :
self.__old_dir = os.getcwd ()
self.__new_dir = new_dir
## ENTER : context mamager
def __enter__ ( self ) :
self.__old_dir = os.getcwd()
if self.new_dir :
os.chdir ( self.new_dir )
return self
## EXIT : context mamager
def __exit__ ( self , *_ ) :
if os.path.exists ( self.old_dir ) and os.path.isdir ( self.old_dir ) :
os.chdir ( self.old_dir )
@property
def old_dir ( self ) :
"""``old_dir'' : old working directory"""
return self.__old_dir
@property
def new_dir ( self ) :
"""``new_dir'' : new current working directory"""
return self.__new_dir
# =============================================================================
## context manager to keep the current working directory
# @code
# with keepCWD ( new_dir ) :
# ....
# @endcode
# - No action if no directory is specified
def keepCWD ( new_dir = '' ) :
"""Context manager to keep the current working directory
>>> with keepCWD( new_dir ) :
...
- No action if no directory is specified
"""
return KeepCWD ( new_dir )
# =============================================================================
## @class KeepCanvas
# helper class to keep the current canvas
# @code
# with KeepCanvas() :
# ... do something here
# @endcode
class KeepCanvas(object) :
"""Helper class to keep the current canvas
>>> with KeepCanvas() :
... do something here
"""
def __init__ ( self ) :
self.__old_canvas = None
def __enter__ ( self ) :
import ROOT
cnv = ROOT.gPad.GetCanvas() if ROOT.gPad else None
self.__old_canvas = cnv if cnv else None
def __exit__ ( self , *_ ) :
if self.__old_canvas:
self.__old_canvas.cd()
self.__old_canvas = None
@property
def old_canvas ( self ) :
"""``old_canvas'': canvas to be preserved"""
return self.__old_canvas
# =============================================================================
# Keep the current canvas
# @code
# with keepCanvas() :
# ... do something here
# @endcode
def keepCanvas() :
"""Keep the current canvas
>>> with keepCanvas() :
... do something here
"""
return KeepCanvas()
# =============================================================================
## @class InvisibleCanvas
# Use context ``invisible canvas''
# @code
# with InvisibleCanvas() :
# ... do somehing here
# @endcode
class InvisibleCanvas(KeepCanvas) :
"""Use context ``invisible canvas''
>>> with InvisibleCanvas() :
... do something here
"""
## context manager: ENTER
def __enter__ ( self ) :
## start from keeping the current canvas
KeepCanvas.__enter__ ( self )
## create new canvas in batch mode
with Batch( True ) :
import ROOT
self.batch_canvas = ROOT.TCanvas()
self.batch_canvas.cd ()
return self.canvas
## context manager: EXIT
def __exit__ ( self , *_ ) :
if self.batch_canvas :
self.batch_canvas.Close()
del self.batch_canvas
KeepCanvas.__exit__ ( self , *_ )
# =============================================================================
## Use context ``invisible canvas''
# @code
# with invisibleCanvas() :
# ... do something here
# @endcode
def invisibleCanvas() :
""" Use context ``invisible canvas''
>>> with invisibleCanvas() :
... do something here
"""
return InvisibleCanvas()
# =============================================================================
## @class KeepArgs
# context manager to keep/preserve sys.argv
# @code
# with KeepArgs() :
# ...
# @endcode
class KeepArgs(object) :
"""Context manager to keep/preserve sys.argv
>>> with KeepArgs() :
...
"""
## context manager ENTER
def __enter__ ( self ) :
import sys, copy
self._args = copy.deepcopy( sys.argv )
return self
## context manager EXIT
def __exit__ ( self , *_ ) :
import sys, copy
sys.argv = copy.deepcopy ( self._args )
del self._args
# =============================================================================
## context manager to keep/preserve sys.argv
# @code
# with keepArgs() :
# ...
# @endcode
def keepArgs() :
"""Context manager to keep/preserve sys.argv
>>> with keepArgs() :
...
"""
return KeepArgs()
# =============================================================================
## context manager that invokes <code>time.sleep</code> before and after action
# @code
# with Wait ( after = 5 , before = 0 ) :
# ...
# @endcode
class Wait(object):
"""Context manager that invokes <code>time.sleep</code> before and after action
>>> with Wait ( after = 5 , before = 0 ) :
>>> ...
"""
def __init__ ( self , after = 0 , before = 0 ) :
self.__after = after
self.__before = before
def __enter__ ( self ) :
if 0 < self.__before :
time.sleep ( self.__before )
def __exit__ ( self , *_ ) :
if 0 < self.__after :
time.sleep ( self.__after )
@property
def before ( self ) :
"""``before'': wait some time before the action"""
return self.__before
@property
def after ( self ) :
"""``after'': wait some time after the action"""
return self.__after
# =============================================================================
## context manager that invokes <code>time.sleep</code> before and after action
# @code
# with wait ( after = 5 , before = 0 ) :
# ...
# @endcode
def wait ( after = 0 , before = 0 ) :
"""Context manager that invokes <code>time.sleep</code> before and after action
>>> with wait ( after = 5 , before = 0 ) :
>>> ...
"""
return Wait (after = after , before = before )
# =============================================================================
## EnableImplicitMT
# Context manager to enable/disable implicit MT in ROOT
# @see ROOT::EnableImplicitMT
# @see ROOT::DisableImplicitMT
# @see ROOT::IsImplicitMTEnabled
# @code
# with ImplicitMT( True ) :
# ...
# @endcode
class ImplicitMT(object) :
"""Context manager to enable/disable implicit MT in ROOT
>>> with ImplicitMT( True ) :
...
- see ROOT::EnableImplicitMT
- see ROOT::DisableImplicitMT
- see ROOT::IsImplicitMTEnabled
"""
def __init__ ( self , enable = True ) :
if isinstance ( enable , bool ) :
self.__enable = enable
self.__nthreads = 0
elif isinstance ( enable , int ) and 0 <= enable :
self.__enable = bool ( enable )
self.__nthreads = enable
else :
raise TypeError ( "ImplicitMT: invalid ``enable'' flag :%s/%s" % ( enable , type ( enable ) ) )
@property
def enable ( self ) : return self.__enable
@property
def nthreads ( self ) : return self.__nthreads
## Context manager: ENTER
def __enter__ ( self ) :
self.__initial = ROOT.ROOT. IsImplicitMTEnabled ()
if bool ( self.__initial ) == bool ( self.enable ) : pass
elif self.enable : ROOT.ROOT.EnableImplicitMT ( self.__nthreads )
else : ROOT.ROOT.DisableImplicitMT ()
return self
## Context manager: EXIT
def __exit__ ( self , *_ ) :
_current = ROOT.ROOT.IsImplicitMTEnabled()
if _current == self.__initial : pass
elif _current : ROOT.ROOT.DisableImplicitMT ()
else : ROOT.ROOT.EnableImplicitMT ()
# =============================================================================
## create 'counted' function to know number of function calls
# @code
# fun = ...
# func = counted ( fun ) ## use as function
#
# # alternatively use it as decorator:
# @counted
# def fun2 ( ... ) : return ...
# @endcode
def counted ( f ):
"""create 'counted' function to know number of function calls
Example
-------
>>> fun = ...
>>> func = counted ( fun ) ## use as function
>>> @counted
>>> def fun2 ( ... ) : return ...
"""
def wrapped ( *args, **kwargs ):
wrapped.calls += 1
return f( *args , **kwargs )
wrapped.calls = 0
return wrapped
# =============================================================================
## Context manager to enable/disable implicit MT in ROOT
# @see ROOT::EnableImplicitMT
# @see ROOT::DisableImplicitMT
# @see ROOT::IsImplicitMTEnabled
# @code
# with implicitMT( True ) :
# ...
# @endcode
def implicitMT ( enable = True ) :
"""Context manager to enable/disable implicit MT in ROOT
>>> with implicitMT( True ) :
...
- see ROOT::EnableImplicitMT
- see ROOT::DisableImplicitMT
- see ROOT::IsImplicitMTEnabled
"""
return ImplicitMT ( enable )
# =============================================================================
## Return the path to an executable which would be run if the given <code>cmd</code> was called.
# If no <code>cmd</code> would be called, return <code>None</code>.
# - <code>mode</code> is a permission mask passed to <code>os.access()</code>,
# by default determining if the file exists and executable.
# - When no <code>path</code> is specified, the results of <code>os.environ()</code> are used,
# returning either the <code>“PATH”</code> value or a fallback of <code>os.defpath</code>.
# - copied from <code>shutil</cdde> module
def local_which ( cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# =============================================================================
try :
from shutil import which
except ImportError :
which = local_which
# =============================================================================
## get the command
# @code
# >>> if cmd_exists ( 'epstopdf' ) : ...
# @endcode
# @see https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def cmd_exists ( command ) :
"""Check the existence of certain command/executable
>>> if cmd_exists ( 'epstopdf' ) : ...
"""
return which ( command ) is not None
# =============================================================================
## @class VRange
# Helper looper over the values between vmin and vmax :
# @code
# for v in VRange ( vmin = 0 , vmax = 5 , n = 100 ) :
# ... print ( v )
# @endcode
class VRange(object) :
"""Helper looper over the values between vmin and vmax :
>>> for v in VRange ( vmin = 0 , vmax = 5 , n = 100 ) :
>>> ... print ( v )
"""
def __init__ ( self , vmin , vmax , n = 100 ) :
assert isinstance ( n , integer_types ) and 0 < n,\
'VRange: invalid N=%s/%s' % ( n , type ( n ) )
self.__vmin = vmin
self.__vmax = vmax
self.__n = n
@property
def vmin ( self ) :
"""``vmin'' : minimal value"""
return self.__vmin
@property
def vmax ( self ) :
"""``vmax'' : maximal value"""
return self.__vmax
@property
def n ( self ) :
"""``n'' : number of steps"""
return self.__n
def __len__ ( self ) : return self.__n + 1
def __iter__ ( self ) :
n = self.n
fn = 1.0 / float ( n )
for i in range ( n + 1 ) :
#
if 0 == i : yield self.vmin
elif n == i : yield self.vmax
else :
f2 = i * fn
f1 = 1 - f2
yield self.vmin * f1 + f2 * self.vmax
# =============================================================================
## loop over values between xmin and xmax
# @code
# for x in vrange ( xmin , xmax , 200 ) :
# print (x)
# @endcode
def vrange ( vmin , vmax , n = 100 ) :
""" Loop over range of values between xmin and xmax
>>> for v in vrange ( vmin , vmax , 200 ) :
... print (v)
"""
return VRange ( vmin , vmax , n )
# =============================================================================
## @class LRange
# Helper looper over the values between vmin and vmax using log-steps
# @code
# for v in LRange ( vmin = 1 , vmax = 5 , n = 100 ) :
# ... print ( v )
# @endcode
class LRange(VRange) :
"""Helper looper over the values between vmin and vmax using log-steps
>>> for v in LRange ( vmin = 1 , vmax = 5 , n = 100 ) :
>>> ... print ( v )
"""
def __init__ ( self , vmin , vmax , n = 100 ) :
assert 0 < vmin and 0 < vmax,\
'LRange: invalid non-positive vmin/ymax values: %s/%s' % ( vmin , vmax )
super ( LRange , self ).__init__ ( vmin , vmax , n )
self.__lmin = math.log10 ( self.vmin )
self.__lmax = math.log10 ( self.vmax )
@property
def lmin ( self ) :
"""``lmin'' : log10(minimal value)"""
return self.__lmin
@property
def lmax ( self ) :
"""``lmax'' : log10(maximal value)"""
return self.__lmax
def __iter__ ( self ) :
n = self.n
fn = 1.0 / float ( n )
for i in range ( n + 1 ) :
#
if 0 == i : yield self.vmin
elif n == i : yield self.vmax
else :
f2 = i * fn
f1 = 1 - f2
yield 10.0 ** ( self.__lmin * f1 + f2 * self.__lmax )
# =============================================================================
## loop over values between xmin and xmax in log-scale
# @code
# for x in log_range ( xmin , xmax , 200 ) :
# print (x)
# @endcode
def log_range ( vmin , vmax , n = 100 ) :
"""Loop over values between xmin and xmax in log-scale
>>> for x in log_range ( xmin , xmax , 200 ) :
>>> print (x)
"""
return LRange ( vmin , vmax , n )
# =============================================================================
## loop over values between xmin and xmax in log-scale
# @code
# for v in lrange ( vmin , vmax , 200 ) : ## ditto
# print (v)
# @endcode
def lrange ( vmin , vmax , n = 100 ) :
""":oop over values between vmin and vmax in log-scale
>>> for v in lrange ( vmin , vmax , 200 ) : ## ditto
>>> print (v)
"""
return LRange ( vmin , vmax , n )
# =============================================================================
## split range into smaller chunks:
# @code
# for i in split_range ( 0 , 10000 , 200 ) :
# for j in range (*i) :
# ...
# @endcode
def split_range ( low , high , num ) :
"""Split range into smaller chunks:
>>> for i in split_range ( 0 , 10000 , 200 ) :
>>> for j in range (*i) :
>>> ...
"""
if high <= low or num < 1 :
yield low , low
else :
next = low + num
while next < high :
yield low , next
low = next
next += num
yield low , high
# =============================================================================
if (3,6) <= sys.version_info :
choices = random.choices
else :
def choices ( population , weights = None , cum_weights = None , k = 1 ) :
""" Simple variant of `random.choice`
"""
assert weights is None and cum_weights is None,\
"choices: Neither ``weigths'' nor ``cum_weights'' are supported!"
return [ random.choice ( population ) for i in range ( k ) ]
# ========================================================================================
## Generate some random name of given name
# @code
# name = random_name ( 5 )
# @endcode
def random_name ( size ) :
"""Generate some random name of given name
>>> name = random_name ( 5 )
"""
assert 1 <= size , 'random_name: invalid size!'
first = random.choice ( ascii_letters )
if 1 == size : return first
return first + ''.join ( choices ( sll_symbols , k = size - 1 ) )
# ========================================================================================
## generate some pseudo-random 6-symbol name from provided hash sources
def short_hash_name ( size , name , *names ) :
"""generate some pseudo-random 6-symbol name from provided hash sources
"""
size = max ( min ( size , 8 ) , 4 )
h = size , hash ( tuple ( ord ( i ) for i in name ) )
h = hash ( h )
for n in names :
h = h , hash ( tuple ( ord ( i ) for i in n ) )
h = hash ( h )
h = abs ( h ) % ( 2 ** ( 4 * size ) )
return ( '%%0%dx' % size ) % h
# =============================================================================
## Generate the random string, that can be used as password or secret word
# @code
# password = gen_password ()
# @endcode
def gen_password ( size = 12 ) :
"""Generate the random string, that can be used as password or secret word
>>> password = gen_password ()
"""
import random
## save random state
state = random.getstate ()
## reset the random seed
random.seed ()
## generate the password
result = ''.join ( choices ( all_symbols , k = size ) )
## restore the random state
random.setstate ( state )
##
return result
# =============================================================================
try :
from more_itertools import chunked, divide
except ImportError :
from itertools import islice
from functools import partial
# =========================================================================
## Return first *n* items of the iterable as a list
# @code
# take(3, range(10)) ## [0, 1, 2]
# take(5, range(3)) ## [0, 1, 2]
# @endcode
#
# The function is copied from <code>more_itertools</code>
def take(n, iterable):
"""Return first *n* items of the iterable as a list.
>>> take(3, range(10))
[0, 1, 2]
>>> take(5, range(3))
[0, 1, 2]
Effectively a short replacement for ``next`` based iterator consumption
when you want more than one item, but less than the whole iterator.
- the function is copied from `more_itertools`
"""
return list(islice(iterable, n))
# =========================================================================
## Break *iterable* into lists of length *n*:
# @code
# list(chunked([1, 2, 3, 4, 5, 6], 3)) ## [[1, 2, 3], [4, 5, 6]]
# @endcode
# If the length of *iterable* is not evenly divisible by *n*, the last
# returned list will be shorter:
# @code
# list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) ## [[1, 2, 3], [4, 5, 6], [7, 8]]
# @endcode
# <code>chunked</code> is useful for splitting up a computation on a large number
# of keys into batches, to be pickled and sent off to worker processes. One
# example is operations on rows in MySQL, which does not implement
# server-side cursors properly and would otherwise load the entire dataset
# into RAM on the client.
#
# The function is copied from <code>more_itertools</code>
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
- the function is copied from `more_itertools`
"""
return iter(partial(take, n, iter(iterable)), [])
# =========================================================================
## Divide the elements from *iterable* into *n* parts, maintaining order.
# @code
# >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
# >>> list(group_1)
# ... [1, 2, 3]
# >>> list(group_2)
# ... [4, 5, 6]
# @endcode
# If the length of *iterable* is not evenly divisible by *n*, then the
# length of the returned iterables will not be identical:
# @code
# >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
# >>> [list(c) for c in children]
# ... [[1, 2, 3], [4, 5], [6, 7]]
# @endcode
#
# If the length of the iterable is smaller than n, then the last returned
# iterables will be empty:
# @code
# >>> children = divide(5, [1, 2, 3])
# >>> [list(c) for c in children]
# ... [[1], [2], [3], [], []]
# @endcode
#
# This function will exhaust the iterable before returning and may require
# significant storage. If order is not important, see :func:`distribute`,
# which does not first pull the iterable into memory.
#
# The function is copied from <code>more_itertools</code>
def divide ( n , iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
- the function is copied from `more_itertools`
"""
if n < 1:
raise ValueError('n must be at least 1')
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
# =============================================================================
if ( 3 , 0 ) <= python_version :
from itertools import zip_longest
else :
from itertools import izip_longest as zip_longest
# =============================================================================
## Collect data into fixed-length chunks or blocks"
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
# =============================================================================
## Create iterable from other iterable or non-iterable
# @code
# for a,b in zip ( 'abcde' , make_iterable ( 1 ) ) : ...
# for a,b in zip ( 'abcde' , make_iterable ( [1,2,3] ) ) : ...
# for a,b in zip ( 'abcde' , make_iterable ( [1,2,3] , 0 , 2 ) ) : ...
# @endcode
def make_iterable ( what , default = None , size = -1 ) :
"""Create infinite iterable from other iterable or no-iterable
>>> for a,b in zip ( 'abcde' , make_iterable ( 1 ) ) : ...
>>> for a,b in zip ( 'abcde' , make_iterable ( [1,2,3] ) ) : ...
>>> for a,b in zip ( 'abcde' , make_iterable ( [1,2,3] , 0 , 2 ) ) : ...
"""
from ostap.core.ostap_types import iterable_types
if not isinstance ( what , iterable_types ) : what = what,
## make infinite iterable
result = chain ( what , repeat ( default ) )
## cut it, if needed
return result if size < 0 else islice ( result , size )
# =============================================================================
## calculate SHA512-checksum for the files
# @see hashlib
# @see hashlib.sha512
# @code
# s = checksum_files ( 'a.txt', 'b.bin' )
# @endcode
# Non-existing files are ignored
# @param files list of filenames
# @return checksum for these files
def checksum_files ( *files ) :
"""Calculate SHA512-checksum for the files
>>> s = checksum_files ( 'a.txt', 'b.bin' )
Non-existing files are ignored
- see `hashlib`
- see `hashlib.sha512`
"""
import hashlib
hash_obj = hashlib.sha512 ()
for fname in files :
if os.path.exists ( fname ) and os.path.isfile ( fname ) :
with open ( fname , "rb" ) as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_obj.update(chunk)
return hash_obj.hexdigest()
# =============================================================================
## Simple utility to check balanced parenthesis/brackets, etc...
# @code
# expression = ' .... '
# ok = balanced ( expression )
# @encode
def balanced ( expression , left = '([' , right = ')]' ) :
"""Simple utility to check balanced parenthesis/brackets, etc...
>>> expression = ' .... '
>>> ok = balanced ( expression )
"""
assert left and len(left) == len ( right ) ,\
'balanced: invalid left/right arguments!'
stack = []
for i in expression :
if i in left : stack.append ( i )
elif i in right :
pos = right.index ( i )
if stack and left[ pos ] == stack [ -1 ] :
stack.pop()
else :
return False
return True if not stack else False
# =============================================================================
# ============================================================================
if ( 3 , 9) <= sys.version_info : memoize = functools.cache
elif ( 3 , 2) <= sys.version_info :
# =========================================================================
## Simple lightweight unbounded cache
def memoize ( user_function ):
"""Simple lightweight unbounded cache"""
return functools.lru_cache(maxsize=None)(user_function)
else :
# =========================================================================
## Simple lightweight unbounded cache
class memoize(object):
"""Simple lightweight unbounded cache
"""
def __init__(self, func):
self.func = func
self.cache = {}
functools.update_wrapper ( self , func )
def __call__(self, *args, **kwargs ):
all_args = tuple ( args ) , tuple ( kwargs.iteritems() )
if all_args in self.cache:
return self.cache [ all_args ]
value = self.func( *args , **kwargs )
self.cache [ all_args] = value
return value
def __repr__(self):
return self.func.__doc__
def __get__(self, obj, objtype):
return functools.partial(self.__call__, obj)
# ============================================================================
## abstract prpoperty
# @code
# @absproperty
# def A ( self ) : ...
# @endcode
if (3,3) <= sys.version_info :
# =========================================================================
## absract property decorator
# @code
# @absproperty
# def A ( self ) : ...
# @endcode
def absproperty ( func ) :
"""Abstract property
@absproperty
def A ( self ) : ...
"""
return property ( abc.abstractmethod ( func ) )
else :
import abc
# =========================================================================
## abstract prpoperty
# @code
# @absproperty
# def A ( self ) : ...
# @endcode
absproperty = abc.abstractproperty
# =============================================================================
if (3,9) <= sys.version_info :
# =========================================================================
## class property decorator
# @code
# @classprop
# def A ( cls ) : ...
# @endcode
def classprop ( func ) :
"""Class property
@classprop
def A ( cls ) : ...
"""
return classmethod ( property ( func ) )
elif (3,0) <= sys.version_info :
# =========================================================================
## class @classproperty
# class property decorator (copied and simplified from astropy)
# @code
# @classprop
# def A ( cls ) : ...
# @endcode
class classprop(property):
"""Class property
@classprop
def A ( cls ) : ...
"""
def __new__(cls, fget=None, doc=None):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func)
return wrapper
return super(classprop,cls).__new__(cls)
def __init__(self, fget, doc=None, ):
fget = self._wrap_fget(fget)
super(classprop,self).__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
return val
def getter(self, fget):
return super(classprop,self).getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
else :
class classprop(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, inst, cls):
return self.fget(cls)
# =============================================================================
## @class NumCalls
# Count a number of times a callable object is invoked
class NumCalls (object):
"""Count a number of times a callable object is invoked"""
def __init__ ( self , func ) :
self.__func = func
self.__count = 0
functools.update_wrapper ( self, func )
def __call__ ( self, *args , **kwargs ) :
self.__count +=1
return self.__func ( *args , **kwargs )
@property
def count ( self ) :
"""``count'': number of times the function was invoked"""
return self.__count
# ==============================================================================
# Count a number of times a callable object is invoked
numcalls = NumCalls
# =============================================================================
## Copy file with the progress
# @code
# copy_with_progress ( 'inputfilename.ext' , 'outputfilename.ext' )
# @endcode
def copy_with_progress ( source , destination ) :
"""Copy file with progress
>>> copy_with_progress ( 'inputfilename.ext' , 'outputfilename.ext' )
"""
assert os.path.exists ( source ) and os.path.isfile ( source ), \
"copy_with_progress: ``source'' %s does nto exist!" % source
total = os.stat ( source ) . st_size
BLOCK = 512 * 1024
destination = os.path.abspath ( destination )
destination = os.path.normpath ( destination )
destination = os.path.realpath ( destination )
if os.path.exists ( destination ) and os.path.isdir ( destination ) :
destination = os.path.join ( destination , os.path.basename ( source ) )
from ostap.utils.progress_bar import ProgressBar
read = 0
with ProgressBar ( total , silent = total < 3 * BLOCK ) as pbar :
with open ( source , 'rb' ) as fin :
with open ( destination , 'wb' ) as fout :
while True :
block = fin.read ( BLOCK )
fout.write ( block )
read += len ( block )
pbar.update_amount ( read )
if not block : break ## BREAK
assert os.path.exists ( destination ) and \
os.path.isfile ( destination ) and \
os.stat ( destination ).st_size == total, \
"Invalid ``destination'' %s " % destination
return os.path.realpath ( destination )
# =========================================================================
## merge all files using <code>hadd</code> script from ROOT
# @param output name of the output merged file, if None,
# the temporary name will be generated,
# that will be deleted at the end of the session
# @param opts options for command <code>hadd</code>
# @return the name of the merged file
# OPTIONS:
# -a Append to the output
# -k Skip corrupt or non-existent files, do not exit
# -T Do not merge Trees
# -O Re-optimize basket size when merging TTree
# -v Explicitly set the verbosity level: 0 request no output, 99 is the default
# -j Parallelize the execution in multiple processes
# -dbg Parallelize the execution in multiple processes in debug mode (Does not delete partial files stored inside working directory)
# -d Carry out the partial multiprocess execution in the specified directory
# -n Open at most 'maxopenedfiles' at once (use 0 to request to use the system maximum)
# -cachesize Resize the prefetching cache use to speed up I/O operations(use 0 to disable)
# -experimental-io-features Used with an argument provided, enables the corresponding experimental feature for output trees
# -f Gives the ability to specify the compression level of the target file(by default 4)
# -fk Sets the target file to contain the baskets with the same compression
# as the input files (unless -O is specified). Compresses the meta data
# using the compression level specified in the first input or the
# compression setting after fk (for example 206 when using -fk206)
# -ff The compression level use is the one specified in the first input
# -f0 Do not compress the target file
# -f6 Use compression level 6. (See TFile::SetCompressionSettings for the support range of value.)
def hadd ( self , files , output = None , opts = "-ff" ) :
"""<erge all files using <code>hadd</code> script from ROOT
- `output` name of the output merged file
- `opts` options for command <code>hadd</code>
It returns the name of the merged file
If no output file name is specified, the temporary name
will be generate and the temporary file will be deleted
at the end of the session
OPTIONS:
# -a Append to the output
# -k Skip corrupt or non-existent files, do not exit
# -T Do not merge Trees
# -O Re-optimize basket size when merging TTree
# -v Explicitly set the verbosity level: 0 request no output, 99 is the default
# -j Parallelize the execution in multiple processes
# -dbg Parallelize the execution in multiple processes in debug mode (Does not delete partial files stored inside working directory)
# -d Carry out the partial multiprocess execution in the specified directory
# -n Open at most 'maxopenedfiles' at once (use 0 to request to use the system maximum)
# -cachesize Resize the prefetching cache use to speed up I/O operations(use 0 to disable)
# -experimental-io-features Used with an argument provided, enables the corresponding experimental feature for output trees
# -f Gives the ability to specify the compression level of the target file(by default 4)
# -fk Sets the target file to contain the baskets with the same compression
# as the input files (unless -O is specified). Compresses the meta data
# using the compression level specified in the first input or the
# compression setting after fk (for example 206 when using -fk206)
# -ff The compression level use is the one specified in the first input
# -f0 Do not compress the target file
# -f6 Use compression level 6. (See TFile::SetCompressionSettings for the support range of value.)
"""
if not output :
import ostap.utils.cleanup as CU
output = CU.CleanUp.tempfile ( prefix = 'ostap-hadd-' , suffix = '.root' )
import subprocess
## patterns ?
if isinstance ( files , str ) :
import glob
files = [ f for f in glob.iglob ( files ) ]
args = [ 'hadd' ] + opts.split() + [ output ] + [ f for f in files ]
subprocess.check_call ( args )
if os.path.exists ( output ) and os.path.isfile ( output ) :
return output
raise IOError ( "The output file %s does not exist!" % output )
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
logger.info ( 80*'*' )
# =============================================================================
## The END
# =============================================================================
| []
| []
| [
"PATH",
"PATHEXT"
]
| [] | ["PATH", "PATHEXT"] | python | 2 | 0 | |
tests/hdf5_bed.py | #!/usr/bin/env python
from optparse import OptionParser
import os
import h5py
import pysam
import basenji.dna_io
################################################################################
# hdf5_bed.py
#
# Checking that the BED regions output by basenji_hdf5.py match the one hot
# coded sequences in the HDF5.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <hdf5_file> <bed_file>'
parser = OptionParser(usage)
parser.add_option('-f', dest='fasta_file', default='%s/assembly/hg19.fa'%os.environ['HG19'], help='FASTA file [Default: %default]')
parser.add_option('-n', dest='check_num', default=100, type='int', help='Number of sequences to check [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide HDF5 and BED files')
else:
hdf5_file = args[0]
bed_file = args[1]
fasta = pysam.Fastafile(options.fasta_file)
hdf5_in = h5py.File(hdf5_file)
si = 0
for line in open(bed_file):
a = line.split()
if a[-1] == 'train':
chrom = a[0]
start = int(a[1])
end = int(a[2])
bed_seq = fasta.fetch(chrom, start, end).upper()
hdf5_seq = basenji.dna_io.hot1_dna(hdf5_in['train_in'][si:si+1])[0]
print(bed_seq[:10], len(bed_seq))
assert(bed_seq == hdf5_seq)
si += 1
if si > options.check_num:
break
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| []
| []
| [
"HG19"
]
| [] | ["HG19"] | python | 1 | 0 | |
src/python/tests/core/crash_analysis/stack_parsing/stack_analyzer_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the stack analyzer module."""
import os
import unittest
from crash_analysis import crash_analyzer
from crash_analysis.stack_parsing import stack_analyzer
from system import environment
from tests.test_libs import helpers
DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'stack_analyzer_data')
TEST_JOB_NAME = 'test'
class StackAnalyzerTestcase(unittest.TestCase):
"""Stack analyzer tests."""
# pylint: disable=unused-argument
@staticmethod
def _mock_symbolize_stacktrace(stacktrace, enable_inline_frames=True):
"""No-op mocked version of symbolize_stacktrace."""
return stacktrace
# pylint: enable=unused-argument
def setUp(self):
"""Set environment variables used by stack analyzer tests."""
helpers.patch_environ(self)
helpers.patch(self, [
'crash_analysis.stack_parsing.stack_symbolizer.symbolize_stacktrace',
'metrics.logs.log_error',
'platforms.android.kernel_utils.get_kernel_prefix_and_full_hash'
])
os.environ['JOB_NAME'] = TEST_JOB_NAME
self.mock.symbolize_stacktrace.side_effect = self._mock_symbolize_stacktrace
self.mock.get_kernel_prefix_and_full_hash.return_value = None, None
def _read_test_data(self, name):
"""Helper function to read test data."""
with open(os.path.join(DATA_DIRECTORY, name)) as handle:
return handle.read()
def _validate_get_crash_data(self, data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag):
"""Test all outputs from a call to get_crash_data."""
actual_state = stack_analyzer.get_crash_data(data)
actual_security_flag = crash_analyzer.is_security_issue(
data, actual_state.crash_type, actual_state.crash_address)
self.assertEqual(actual_state.crash_type, expected_type)
self.assertEqual(actual_state.crash_address, expected_address)
self.assertEqual(actual_state.crash_state, expected_state)
self.assertEqual(actual_state.crash_stacktrace, expected_stacktrace)
self.assertEqual(actual_security_flag, expected_security_flag)
def test_symbolized_asan_null_dereference(self):
"""Test for a Null-dereference derived from a simple symbolized ASan
report."""
data = self._read_test_data('symbolized_asan_null_dereference.txt')
expected_type = 'Null-dereference'
expected_address = '0x000000000018'
expected_state = ('blink::FontMetrics::ascent\n'
'blink::RenderListMarker::updateMargins\n'
'blink::RenderListItem::updateMarkerLocation\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_symbolized_asan_unknown(self):
"""Test for a simple symbolized ASan report."""
data = self._read_test_data('symbolized_asan_unknown.txt')
expected_type = 'UNKNOWN'
expected_address = '0x000000010018'
expected_state = ('blink::FontMetrics::ascent\n'
'blink::RenderListMarker::updateMargins\n'
'blink::RenderListItem::updateMarkerLocation\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_address_in_state(self):
"""Test for an unsymbolized ASan report."""
data = self._read_test_data('address_in_state.txt')
expected_state = 'GetHandleVerifier\n' * 3
actual_state = stack_analyzer.get_crash_data(data)
self.assertEqual(actual_state.crash_state, expected_state)
def test_variable_length_write(self):
"""Test that large writes are replaced with {*}."""
data = self._read_test_data('variable_length_write.txt')
expected_type = 'Stack-use-after-return\nWRITE {*}'
actual_state = stack_analyzer.get_crash_data(data)
self.assertEqual(actual_state.crash_type, expected_type)
def test_android_asan_null_dereference_read(self):
"""Test for a Null-dereference READ derived from ASan UNKNOWN READ."""
data = self._read_test_data('android_asan_null_dereference_read.txt')
expected_type = 'Null-dereference READ'
expected_address = '0x00000011'
expected_state = ('_JavaVM::AttachCurrentThread\n'
'javaAttachThread\n'
'android::AndroidRuntime::javaThreadShell\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_asan_null_dereference_write(self):
"""Test for a Null-dereference WRITE derived from ASan UNKNOWN WRITE."""
data = self._read_test_data('android_asan_null_dereference_write.txt')
expected_type = 'Null-dereference WRITE'
expected_address = '0x00000011'
expected_state = ('_JavaVM::AttachCurrentThread\n'
'javaAttachThread\n'
'android::AndroidRuntime::javaThreadShell\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_asan_uaf(self):
"""Basic test for Android ASAN format."""
data = self._read_test_data('android_asan_uaf.txt')
expected_type = 'Heap-use-after-free\nREAD 2'
expected_address = '0xac80d400'
expected_state = ('android::AString::setTo\n'
'android::AString::AString\n'
'android::MediaHTTP::connect\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_asan_unknown_read(self):
"""Test for an ASan UNKNOWN READ report."""
data = self._read_test_data('android_asan_unknown_read.txt')
expected_type = 'UNKNOWN READ'
expected_address = '0x74000011'
expected_state = ('_JavaVM::AttachCurrentThread\n'
'javaAttachThread\n'
'android::AndroidRuntime::javaThreadShell\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_asan_unknown_write(self):
"""Test for an ASan UNKNOWN WRITE report."""
data = self._read_test_data('android_asan_unknown_write.txt')
expected_type = 'UNKNOWN WRITE'
expected_address = '0x74000011'
expected_state = ('_JavaVM::AttachCurrentThread\n'
'javaAttachThread\n'
'android::AndroidRuntime::javaThreadShell\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_kernel(self):
"""Basic test for Android kernel format."""
data = self._read_test_data('android_kernel.txt')
expected_type = 'Kernel failure\nREAD Translation Fault, Section (5)'
expected_address = '0x12345678'
expected_state = ('top_frame\nnext_frame\nlast_frame\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_kernel_no_parens(self):
"""Basic test for Android kernel format with a slightly different stacktrace
format (no parentheses)."""
data = self._read_test_data('android_kernel_no_parens.txt')
expected_type = 'Kernel failure\nREAD Translation Fault, Section (5)'
expected_address = '0x12345678'
expected_state = ('top_frame\nnext_frame\nlast_frame\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_null_stack(self):
"""Test for a null state."""
data = self._read_test_data('android_null_stack.txt')
expected_type = 'UNKNOWN'
expected_address = '0xb6e43000'
expected_state = 'Surfaceflinger\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_unknown_module(self):
"""Test state the format for crashes where we only have an address."""
data = self._read_test_data('unknown_module.txt')
expected_state = 'NULL'
actual_state = stack_analyzer.get_crash_data(data)
self.assertEqual(actual_state.crash_state, expected_state)
def test_ubsan_bad_cast_downcast(self):
"""Test the ubsan bad cast downcast format."""
data = self._read_test_data('ubsan_bad_cast_downcast.txt')
expected_type = 'Bad-cast'
expected_address = '0x2aa9a6abc480'
expected_state = ('Bad-cast to blink::AXMenuList from blink::AXList\n'
'blink::RenderMenuList::didUpdateActiveOption\n'
'blink::RenderMenuList::setTextFromOption\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_bad_cast_member_call(self):
"""Test the ubsan bad cast member call format."""
data = self._read_test_data('ubsan_bad_cast_member_call.txt')
expected_type = 'Bad-cast'
expected_address = '0x15577a7fc900'
expected_state = ('Bad-cast to net::QuicSpdySession from net::QuicSession\n'
'net::QuicSpdyStream::~QuicSpdyStream\n'
'net::QuicChromiumClientStream::~QuicChromiumClientStream'
'\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_divide_by_zero(self):
"""Test the ubsan division by zero format."""
data = self._read_test_data('ubsan_divide_by_zero.txt')
expected_type = 'Divide-by-zero'
expected_state = ('mpeg_decode_postinit\n'
'decode_chunks\n'
'mpeg_decode_frame\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_incorrect_function_pointer_type(self):
"""Test the ubsan incorrect function pointer type format."""
data = self._read_test_data('ubsan_incorrect_function_pointer_type.txt')
expected_type = 'Incorrect-function-pointer-type'
expected_address = ''
expected_state = ('gl::GetGLProcAddress\n'
'gl::DriverGL::InitializeStaticBindings\n'
'gl::InitializeStaticGLBindingsGL\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_index_oob(self):
"""Test the ubsan index out-of-bounds format."""
data = self._read_test_data('ubsan_index_oob.txt')
expected_type = 'Index-out-of-bounds'
expected_address = ''
expected_state = ('CPDF_StreamParser::ParseNextElement\n'
'CPDF_StreamContentParser::Parse\n'
'CPDF_ContentParser::Continue\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_integer_overflow_addition(self):
"""Test the ubsan integer overflow due to addition format."""
data = self._read_test_data('ubsan_integer_overflow_addition.txt')
expected_type = 'Integer-overflow'
expected_address = ''
expected_state = ('gfx::Point::operator+=\n'
'gfx::Rect::Inset\n'
'cc::PictureLayerTiling::ComputeTilePriorityRects\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_integer_overflow_negation(self):
"""Test the ubsan integer overflow due to negation format."""
data = self._read_test_data('ubsan_integer_overflow_negation.txt')
expected_type = 'Integer-overflow'
expected_address = ''
expected_state = ('blink::CSSSelectorParser::consumeANPlusB\n'
'blink::CSSSelectorParser::consumePseudo\n'
'blink::CSSSelectorParser::consumeSimpleSelector\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_invalid_bool_value(self):
"""Test the ubsan bool format."""
data = self._read_test_data('ubsan_invalid_bool_value.txt')
expected_type = 'Invalid-bool-value'
expected_state = ('tsm_screen_tab_left\nparse_data\ntsm_vte_input\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_misaligned_address(self):
"""Test the ubsan alignment format."""
data = self._read_test_data('ubsan_misaligned_address.txt')
expected_type = 'Misaligned-address'
expected_state = ('pnm_decode_frame\n'
'decode_simple_internal\n'
'decode_simple_receive_frame\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_non_positive_vla_bound_value(self):
"""Test the ubsan non-positive variable length array bound format."""
data = self._read_test_data('ubsan_non_positive_vla_bound_value.txt')
expected_type = 'Non-positive-vla-bound-value'
expected_address = ''
expected_state = ('boom_internal\nanother_boom\nboom\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_null_pointer_member_access(self):
"""Test the ubsan null format for member access within null pointer."""
data = self._read_test_data('ubsan_null_pointer_member_access.txt')
expected_type = 'Null-dereference'
expected_state = ('xmlFAParseCharClassEsc\n'
'xmlFAParseAtom\n'
'xmlFAParsePiece\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_null_pointer_member_call(self):
"""Test the ubsan null format for member call on null pointer."""
data = self._read_test_data('ubsan_null_pointer_member_call.txt')
expected_type = 'Null-dereference'
expected_state = (
'base::trace_event::internal::HeapDumpWriter::AddEntryForBucket\n'
'base::trace_event::internal::HeapDumpWriter::Summarize\n'
'base::trace_event::ExportHeapDump\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_null_pointer_read(self):
"""Test the ubsan null format for load of null pointer."""
data = self._read_test_data('ubsan_null_pointer_read.txt')
expected_type = 'Null-dereference READ'
expected_state = ('SHPReadOGRObject\n'
'SHPReadOGRFeature\n'
'OGRShapeLayer::GetNextFeature\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_null_pointer_reference_binding(self):
"""Test the ubsan null format for reference binding to null pointer."""
data = self._read_test_data('ubsan_null_pointer_reference_binding.txt')
expected_type = 'Null-dereference'
expected_state = ('woff2::ConvertWOFF2ToTTF\n'
'convert_woff2ttf_fuzzer.cc\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_null_pointer_write(self):
"""Test the ubsan null format for store to null pointer."""
data = self._read_test_data('ubsan_null_pointer_write.txt')
expected_type = 'Null-dereference WRITE'
expected_state = ('SHPReadOGRObject\n'
'SHPReadOGRFeature\n'
'OGRShapeLayer::GetNextFeature\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_object_size(self):
"""Test the ubsan object-size format."""
data = self._read_test_data('ubsan_object_size.txt')
expected_type = 'Object-size'
expected_address = ''
expected_state = ('boom_internal\nanother_boom\nboom\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_pointer_overflow(self):
"""Test the ubsan pointer overflow format."""
data = self._read_test_data('ubsan_pointer_overflow.txt')
expected_type = 'Pointer-overflow'
expected_address = ''
expected_state = ('SkRasterPipelineBlitter::blitMask\n'
'blitClippedMask\n'
'draw_nine_clipped\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_pointer_overflow_null_zero_offset(self):
"""Test the null pointer with zero offset case for pointer overflow."""
data = self._read_test_data('ubsan_pointer_overflow_null_zero_offset.txt')
expected_type = 'Pointer-overflow'
expected_address = ''
expected_state = ('cff_subfont_load\ncff_font_load\ncff_face_init\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_pointer_overflow_null_nonzero_offset(self):
"""Test the null pointer with nonzero offset case for pointer overflow."""
data = self._read_test_data(
'ubsan_pointer_overflow_null_nonzero_offset.txt')
expected_type = 'Pointer-overflow'
expected_address = ''
expected_state = ('courgette::DisassemblerWin32::ParseRelocs\n'
'courgette::DisassemblerWin32::ExtractAbs32Locations\n'
'courgette::Disassembler::CreateProgram\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_java_exception(self):
"""Tests for Java exceptions found by Jazzer."""
data = self._read_test_data('java_IllegalStateException.txt')
expected_type = 'Uncaught exception'
expected_address = ''
expected_state = ('ExampleValueProfileFuzzer.mustNeverBeCalled\n'
'ExampleValueProfileFuzzer.fuzzerTestOneInput\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_java_manual_security_exception(self):
"""Tests for Java exceptions manually marked as security issues."""
data = self._read_test_data('java_severity_medium_exception.txt')
expected_type = 'Uncaught exception'
expected_address = ''
expected_state = ('com.example.JsonSanitizerFuzzer.fuzzerTestOneInput\n'
'com.google.gson.Gson.fromJson\n'
'com.google.gson.Gson.fromJson\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_java_fatal_exception(self):
"""Test for the java fatal exception format."""
data = self._read_test_data('java_fatal_exception.txt')
expected_type = 'Fatal Exception'
expected_address = ''
expected_state = ('java.util.ArrayList$ArrayListIterator.next\n'
'com.android.systemui.statusbar.policy.'
'SecurityControllerImpl.fireCallbacks\n'
'com.android.systemui.statusbar.policy.'
'SecurityControllerImpl.-wrap0\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_msan_uninitialized_value(self):
"""Test the MSan uninitialized value format."""
data = self._read_test_data('msan_uninitialized_value.txt')
expected_type = 'Use-of-uninitialized-value'
expected_address = ''
expected_state = (
'content::BrowserMessageFilter::Send\n'
'ChromeNetBenchmarkingMessageFilter::OnMessageReceived\n'
'content::BrowserMessageFilter::Internal::OnMessageReceived\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_tsan_data_race(self):
"""Test the TSan data race format."""
data = self._read_test_data('tsan_data_race.txt')
expected_type = 'Data race\nWRITE 4'
expected_address = '0x7f15d580f30c'
expected_state = ('sqlite3StatusSet\n'
'pcache1Alloc\n'
'pcache1AllocPage\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_assert(self):
"""Test the Blink assertion failure format."""
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
data = self._read_test_data('assert.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = ('start.compareTo(end) <= 0\n'
'void blink::normalizePositionsAlgorithm'
'<blink::PositionAlgorithm<blink::EditingS\n'
'blink::VisibleSelection::normalizePositions\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_stack_filtering(self):
"""Test ignore lists and stack frame filtering."""
data = self._read_test_data('stack_filtering.txt')
expected_state = ('base::OnTotallyStillHaveMemory\n'
'content::ChildDiscardableSharedMemoryManager::'
'AllocateLockedDiscardableSharedMemory\n'
'content::ChildDiscardableSharedMemoryManager::'
'AllocateLockedDiscardableMemory\n')
actual_state = stack_analyzer.get_crash_data(data, symbolize_flag=False)
self.assertEqual(actual_state.crash_state, expected_state)
def test_ignore_abort_frames(self):
"""Test that abort frames are ignored."""
data = self._read_test_data('ignore_abort_frames.txt')
expected_type = 'Abrt'
expected_address = '0x000000000001'
expected_state = ('nlohmann::basic_json<std::__1::map, std::__1::vector, '
'std::__1::basic_string<cha\n'
'nlohmann::basic_json<std::__1::map, std::__1::vector, '
'std::__1::basic_string<cha\n'
'nlohmann::basic_json<std::__1::map, std::__1::vector, '
'std::__1::basic_string<cha\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_honggfuzz(self):
"""Test that frames from honggfuzz wrappers are ignored."""
data = self._read_test_data('ignore_honggfuzz.txt')
expected_type = 'Segv on unknown address'
expected_address = ''
expected_state = ('function1\nfunction2\nfunction3\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_libc_if_symbolized(self):
"""Test that we ignore certain shared libraries if symbolized."""
data = self._read_test_data('ignore_libc_if_symbolized.txt')
expected_state = (
'blink::LayoutRubyBase::adjustInlineDirectionLineBounds\n'
'blink::LayoutBlockFlow::updateLogicalWidthForAlignment\n'
'blink::LayoutBlockFlow::computeInlineDirectionPositionsForSegment\n')
actual_state = stack_analyzer.get_crash_data(data)
self.assertEqual(actual_state.crash_state, expected_state)
def test_ignore_libcplusplus_abi(self):
"""Test that we ignore libc++ frames."""
data = self._read_test_data('ignore_libcplusplus.txt')
expected_type = 'Abrt'
expected_address = '0x7fff94dd7f06'
expected_state = (
'sfntly::BitmapSizeTable::Builder::Initialize\n'
'sfntly::BitmapSizeTable::Builder::GetIndexSubTableBuilders\n'
'InitializeBitmapBuilder\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_llvm(self):
"""Test that llvm frames are ignored."""
data = self._read_test_data('ignore_llvm.txt')
expected_type = 'Heap-use-after-free\nREAD 8'
expected_address = '0x6120000746b0'
expected_state = ('cc::SurfaceManager::UnregisterBeginFrameSource\n'
'cc::Display::~Display\n'
'~Display\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_sanitizer(self):
"""Test that sanitizer frames are ignored."""
data = self._read_test_data('ignore_sanitizer.txt')
expected_type = 'Null-dereference READ'
expected_address = '0x000000000010'
expected_state = ('GetHandleVerifier\n'
'GetHandleVerifier\n'
'GetHandleVerifier\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_vdso(self):
"""Test that vdso frames are ignored."""
data = self._read_test_data('ignore_vdso.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('static_cast<unsigned>(text_offset + text_length) '
'<= text.length() in SimplifiedB\n'
'blink::SimplifiedBackwardsTextIteratorAlgorithm'
'<blink::EditingAlgorithm<blink::N\n'
'blink::SimplifiedBackwardsTextIteratorAlgorithm'
'<blink::EditingAlgorithm<blink::N\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_win_frames(self):
"""Test that sanitizer frames are ignored."""
data = self._read_test_data('ignore_win_frames.txt')
expected_type = 'Stack-buffer-overflow\nREAD 1'
expected_address = '0x00201b12d49f'
expected_state = ('v8::internal::GenerateSourceString\n'
'regexp-builtins.cc\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_keep_libc_if_unsymbolized(self):
"""Test that certain libraries are kept for unsymbolized stacks."""
data = self._read_test_data('keep_libc_if_unsymbolized.txt')
expected_state = ('/system/lib/libc.so+0x0003a1b0\n'
'/system/lib/libc.so+0x000173c1\n'
'/system/lib/libc.so+0x00017fd3\n')
actual_state = stack_analyzer.get_crash_data(data, symbolize_flag=False)
self.assertEqual(actual_state.crash_state, expected_state)
def test_v8_check(self):
"""Test the v8 fatal error format."""
# This logic is fairly similar to that of RUNTIME_ASSERT detection. Ensure
# that we do not falsely detect CHECKs as RUNTIME_ASSERTs.
os.environ['DETECT_V8_RUNTIME_ERRORS'] = 'True'
data = self._read_test_data('v8_check.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = '!IsImpossible(mark_bit) in mark-compact.h\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_check_eq(self):
"""Test the v8 fatal error format on a failed CHECK_EQ."""
data = self._read_test_data('v8_check_eq.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = 'a == b in verifier.cc\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_check_windows(self):
"""Test the v8 fatal error format on Windows."""
data = self._read_test_data('v8_check_windows.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('!field_type->NowStable() in objects-debug.cc\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_check_security(self):
"""Test the v8 CHECK failure with security implications."""
os.environ['CHECKS_HAVE_SECURITY_IMPLICATION'] = 'True'
data = self._read_test_data('v8_check_symbolized.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = (
'old_target->kind() == new_target->kind() in objects-debug.cc\n'
'v8::internal::Code::VerifyRecompiledCode\n'
'ReplaceCode\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_dcheck(self):
"""Test the v8 DCHECK failure."""
data = self._read_test_data('v8_dcheck_symbolized.txt')
expected_type = 'DCHECK failure'
expected_address = ''
expected_state = (
'old_target->kind() == new_target->kind() in objects-debug.cc\n'
'v8::internal::Code::VerifyRecompiledCode\n'
'ReplaceCode\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_fatal_error_no_check(self):
"""Test the v8 fatal error format for non-CHECK failures."""
data = self._read_test_data('v8_fatal_error_no_check.txt')
expected_type = 'Fatal error'
expected_address = ''
expected_state = 'v8::HandleScope::CreateHandle\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_fatal_error_partial(self):
"""Test a v8 fatal error with only part of the output printed."""
data = self._read_test_data('v8_fatal_error_partial.txt')
expected_type = 'Fatal error'
expected_address = ''
expected_state = 'objects-inl.h\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_abort_with_source(self):
"""Test the v8 abort error format with source file and line information."""
data = self._read_test_data('v8_abort_with_source.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'CSA_ASSERT failed: IsFastElementsKind(LoadElementsKind(array))\n'
'code-stub-assembler.cc\n')
expected_stacktrace = data
expected_security_flag = False
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_abort_without_source(self):
"""Test the v8 abort error format without source file and line
informatiom."""
data = self._read_test_data('v8_abort_without_source.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'CSA_ASSERT failed: IsFastElementsKind(LoadElementsKind(array))\n')
expected_stacktrace = data
expected_security_flag = False
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_oom(self):
"""Test a v8 out of memory condition."""
data = self._read_test_data('v8_oom.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = ''
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_representation_changer_error(self):
"""Tests a v8 RepresentationChangerError."""
data = self._read_test_data('v8_representation_changer_error.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('Int64Constant of kRepWord64 (Internal) cannot be '
'changed to kRepTagged in repres\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_unreachable_code(self):
"""Test the v8 unreachable code format."""
data = self._read_test_data('v8_unreachable_code.txt')
expected_type = 'Unreachable code'
expected_address = ''
expected_state = 'typer.cc\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_unimplemented_code(self):
"""Test the v8 unreachable code format."""
data = self._read_test_data('v8_unimplemented_code.txt')
expected_type = 'Unreachable code'
expected_address = ''
expected_state = 'simulator-arm.cc\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_unknown_fatal_error(self):
"""Test a generic fatal error."""
data = self._read_test_data('v8_unknown_fatal_error.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('something that isn\'t supported yet in '
'simulator-arm.cc\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_runtime_error(self):
"""Test a v8 runtime error."""
os.environ['DETECT_V8_RUNTIME_ERRORS'] = 'True'
data = self._read_test_data('v8_runtime_error.txt')
expected_type = 'RUNTIME_ASSERT'
expected_address = ''
expected_state = 'args[0]->IsJSFunction() in runtime-test.cc\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_runtime_error_not_detected(self):
"""Ensure that v8 runtime errors are not detected if the flag is not set."""
data = self._read_test_data('v8_runtime_error.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_runtime_error_analyze_task(self):
"""Ensure that v8 runtime errors are detected under analyze_task"""
os.environ['TASK_NAME'] = 'analyze'
data = self._read_test_data('v8_runtime_error.txt')
expected_type = 'RUNTIME_ASSERT'
expected_address = ''
expected_state = 'args[0]->IsJSFunction() in runtime-test.cc\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_correctness_error(self):
"""Test a v8 correctness fuzzer error."""
data = self._read_test_data('v8_correctness_failure.txt')
expected_type = 'V8 correctness failure'
expected_address = ''
expected_state = ('configs: x64,fullcode:x64,ignition_staging\n'
'sources: deadbeef,beefdead,abcd1234\n'
'suppression: crbug.com/123456\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_generic_segv(self):
"""Test a SEGV caught by a generic signal handler."""
data = self._read_test_data('generic_segv.txt')
expected_type = 'UNKNOWN'
expected_address = '0x7f6b0c580000'
expected_state = 'NULL'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_asan_warning(self):
"""Ensure that ASan warning lines are ignored."""
data = self._read_test_data('ignore_asan_warning.txt')
actual_state = stack_analyzer.get_crash_data(data)
self.assertNotIn('Failed to allocate', actual_state.crash_type)
self.assertTrue(actual_state.crash_state and
'NULL' not in actual_state.crash_state)
def test_lsan_direct_leak(self):
"""Test the LSan direct leak format."""
data = self._read_test_data('lsan_direct_leak.txt')
expected_type = 'Direct-leak'
expected_address = ''
expected_state = 'xmlStrndup\nxmlStrdup\nxmlGetPropNodeValueInternal\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_lsan_indirect_leak_cycle(self):
"""Test the LSan format when we only have indirect leaks."""
data = self._read_test_data('lsan_indirect_leak_cycle.txt')
expected_type = 'Indirect-leak'
expected_address = ''
expected_state = ('xmlNewDocElementContent\n'
'xmlParseElementMixedContentDecl\n'
'xmlParseElementContentDecl\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_lsan_multiple_leaks(self):
"""Test the LSan direct leak format."""
data = self._read_test_data('lsan_multiple_leaks.txt')
expected_type = 'Direct-leak'
expected_address = ''
expected_state = 'pepper::AutoBuffer::AllocateBuffer\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_lsan_single_frame_stacks(self):
"""Test the LSan direct leak format."""
data = self._read_test_data('lsan_single_frame_stacks.txt')
expected_type = 'Direct-leak'
expected_address = ''
expected_state = 'f\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cfi_bad_cast_virtual_call(self):
"""Test the CFI output format."""
data = self._read_test_data('cfi_bad_cast.txt')
expected_type = 'Bad-cast'
expected_address = '0x000000000000'
expected_state = ('Bad-cast to blink::LayoutObject from invalid vptr\n'
'blink::LayoutObject::containingBlock\n'
'blink::LayoutBox::topLeftLocation\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cfi_bad_cast_indirect_function_call(self):
"""Test the CFI output format."""
data = self._read_test_data('cfi_bad_cast_indirect_fc.txt')
expected_type = 'Bad-cast'
expected_address = ''
expected_state = (
'Bad-cast to void (*(struct VkInstance_T *, const char *))(void)\n'
'vkGetInstanceProcAddrStub$8d185785d173e702d91e2893e143a6d9.cfi\n'
'volkGenLoadLoader\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cfi_bad_cast_invalid_vtable(self):
"""Test the CFI output format for an invalid vptr."""
data = self._read_test_data('cfi_invalid_vtable.txt')
expected_type = 'Bad-cast'
expected_address = '0x000000422710'
expected_state = 'Bad-cast to B from invalid vptr\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cfi_unrelated_vtable(self):
"""Test the CFI output format from an unrelated vtable."""
data = self._read_test_data('cfi_unrelated_vtable.txt')
expected_type = 'Bad-cast'
expected_address = '0x000000422710'
expected_state = 'Bad-cast to B from A\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cfi_nodebug(self):
"""Test the CFI output format with no debug information."""
data = self._read_test_data('cfi_nodebug.txt')
expected_type = 'Bad-cast'
expected_address = ''
expected_state = 'abc::def\nfoo\nbar\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_in_drt_string(self):
"""Test that "AddressSanitizer" in text don't cause crash detection."""
data = self._read_test_data('asan_in_drt_string.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_assert_in_drt_string(self):
"""Test that "AddressSanitizer" in text don't cause crash detection."""
data = self._read_test_data('assert_in_drt_string.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_breakpoint(self):
"""Test the ASan breakpoint format."""
data = self._read_test_data('asan_breakpoint.txt')
expected_type = 'Breakpoint'
expected_state = ('blink::PluginInfo::GetMimeClassInfo\n'
'blink::DOMPlugin::item\n'
'blink::V8Plugin::itemMethodCallback\n')
expected_address = '0xba0f4780'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_breakpoint_with_check_failure(self):
"""Test the ASan breakpoint format with CHECK failure."""
data = self._read_test_data('asan_breakpoint_with_check.txt')
expected_type = 'CHECK failure'
expected_state = ('i < size() in Vector.h\n'
'blink::PluginInfo::GetMimeClassInfo\n'
'blink::DOMPlugin::item\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_bus(self):
"""Test the ASan SIGBUS format."""
data = self._read_test_data('asan_bus.txt')
expected_type = 'Bus'
expected_state = ('storeColor\n'
'glgProcessColor\n'
'__glgProcessPixelsWithProcessor_block_invoke\n')
expected_address = '0x603000250000'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_fpe(self):
"""Test the ASan FPE format."""
data = self._read_test_data('asan_fpe.txt')
expected_type = 'Floating-point-exception'
expected_state = ('ash::WindowGrid::PositionWindows\n'
'ash::WindowSelector::Init\n'
'ash::WindowSelectorController::ToggleOverview\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_fpe(self):
"""Test the UBSan FPE format."""
data = self._read_test_data('ubsan_fpe.txt')
expected_type = 'Floating-point-exception'
expected_state = ('ash::WindowGrid::PositionWindows\n'
'ash::WindowSelector::Init\n'
'ash::WindowSelectorController::ToggleOverview\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_ill(self):
"""Test the ASan ILL format."""
data = self._read_test_data('asan_ill.txt')
expected_type = 'Ill'
expected_state = ('boom_internal\nboom_intermediate\nboom\n')
expected_address = '0x631000001001'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_ill(self):
"""Test the UBSan ILL format."""
data = self._read_test_data('ubsan_ill.txt')
expected_type = 'Ill'
expected_state = ('boom_internal\nboom_intermediate\nboom\n')
expected_address = '0x631000001001'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_ill_null_address(self):
"""Test the ASan ILL format with a null address."""
data = self._read_test_data('asan_ill_null_address.txt')
expected_type = 'Ill'
expected_state = ('boom_internal\nboom_intermediate\nboom\n')
expected_address = '0x000000000000'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_gsignal(self):
"""Test the ASan gsignal format."""
data = self._read_test_data('asan_gsignal.txt')
expected_type = 'UNKNOWN'
expected_state = (
'url::UIDNAWrapper::UIDNAWrapper\n'
'base::DefaultLazyInstanceTraits<url::UIDNAWrapper>::New\n'
'base::internal::LeakyLazyInstanceTraits<url::UIDNAWrapper>::New\n')
expected_address = '0x03e9000039cd'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_stack_overflow(self):
"""Test the ASan stack overflow format."""
data = self._read_test_data('asan_stack_overflow.txt')
expected_type = 'Stack-overflow'
expected_state = ('CPDF_ColorSpace::Load\n'
'CPDF_DocPageData::GetColorSpace\n'
'CPDF_IndexedCS::v_Load\n')
expected_address = '0x7ffc533cef30'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
# In the past, we have ignored stack overflows explicitly. Ensure that
# the current behavior is to detect them.
self.assertTrue(crash_analyzer.is_memory_tool_crash(data))
def test_asan_stack_overflow_2(self):
"""Test the ASan stack overflow format."""
data = self._read_test_data('asan_stack_overflow2.txt')
expected_type = 'Stack-overflow'
expected_state = ('begin_parse_string\n'
'finish_lithdr_notidx_v\n'
'begin_parse_string\n')
expected_address = '0x7ffca4df4b38'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_stack_overflow_3(self):
"""Test the ASan stack overflow format."""
data = self._read_test_data('asan_stack_overflow3.txt')
expected_type = 'Stack-overflow'
expected_state = ('begin_parse_string\n'
'finish_lithdr_notidx_v\n'
'begin_parse_string\n')
expected_address = '0x7ffca4df4b38'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_invalid_free(self):
"""Test the ASan bad free format."""
data = self._read_test_data('asan_invalid_free.txt')
expected_type = 'Invalid-free'
expected_state = ('_gnutls_buffer_append_printf\n'
'print_cert\n'
'gnutls_x509_crt_print\n')
expected_address = '0x00000a5742f0'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_double_free(self):
"""Test the ASan bad free format."""
data = self._read_test_data('asan_double_free.txt')
expected_type = 'Heap-double-free'
expected_state = ('clear\n'
'CPDF_DocPageData::Clear\n'
'CPDF_DocPageData::~CPDF_DocPageData\n')
expected_address = '0x610000022b80'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_deadly_signal(self):
"""Test for libfuzzer deadly signal."""
data = self._read_test_data('libfuzzer_deadly_signal.txt')
expected_type = 'Fatal-signal'
expected_state = 'NULL'
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_fuzz_target_exited(self):
"""Test for unexpected fuzz target exit."""
data = self._read_test_data('libfuzzer_fuzz_target_exited.txt')
expected_type = 'Unexpected-exit'
expected_state = 'clearsilver_fuzzer_file.cc\n'
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_fuchsia_asan(self):
"""Test for Fuchsia ASan crashes."""
# TODO(flowerhack): Once the duplicated frames issue is fixed for Fuchsia,
# update this test to recognize proper frames.
data = self._read_test_data('fuchsia_asan.txt')
expected_type = 'Heap-buffer-overflow\nWRITE 1'
expected_state = 'foo_function\nfoo_function\nbar_function\n'
expected_address = '0x663fa3bcf198'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_fuchsia_reproducible_crash(self):
"""Test for Fuchsia ASan crashes found via reproduction."""
# TODO(flowerhack): right now, we get the logs from reproducer runs, and
# then post-process them to be in a format ClusterFuzz understands. Once we
# patch Fuchsia to emit logs properly the first time, update this test
# accordingly.
data = self._read_test_data('fuchsia_reproducible_crash.txt')
expected_type = 'Fatal-signal'
expected_state = 'foo_function\nbar_function\nbasic.cc\n'
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_fuchsia_ignore(self):
"""Test for ignoring Fuchsia frames."""
data = self._read_test_data('fuchsia_ignore.txt')
expected_type = 'Fatal-signal'
expected_state = ('frame::Fake::Fake\n'
'frame::Fake::Fake2\n'
'async::TestLoop::Run\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_windows_asan_divide_by_zero(self):
"""Test for Windows ASan divide by zero crashes."""
data = self._read_test_data('windows_asan_divide_by_zero.txt')
expected_type = 'Divide-by-zero'
expected_state = (
'blink::LayoutMultiColumnSet::PageRemainingLogicalHeightForOffset\n'
'blink::LayoutFlowThread::PageRemainingLogicalHeightForOffset\n'
'blink::LayoutBox::PageRemainingLogicalHeightForOffset\n')
expected_address = '0x00000000'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cdb_divide_by_zero(self):
"""Test the CDB format for divide by zero crashes."""
data = self._read_test_data('cdb_divide_by_zero.txt')
expected_type = 'Divide-by-zero'
expected_state = ('ForStatementNode::DetermineLoopIterations<int>\n'
'ForStatementNode::VerifySelf\n'
'ParseTreeNode::VerifyNode\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cdb_integer_overflow(self):
"""Test the CDB format for safe integer overflow crashes."""
data = self._read_test_data('cdb_integer_overflow.txt')
expected_type = 'Integer-overflow'
expected_state = ('Js::TaggedInt::Divide\n'
'Js::InterpreterStackFrame::ProfiledDivide\n'
'Js::InterpreterStackFrame::Process\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cdb_read(self):
"""Test the Windows CDB format for an invalid read."""
data = self._read_test_data('cdb_read.txt')
expected_type = 'READ'
expected_state = 'crash\nggg\nfff\n'
expected_address = '0x000000000000'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cdb_read_x64(self):
"""Test the 64-bit Windows CDB format for an invalid read."""
data = self._read_test_data('cdb_read_x64.txt')
expected_type = 'READ'
expected_state = 'Ordinal101\nCreateCoreWebView\nOrdinal107\n'
expected_address = '0x000000000010'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cdb_other(self):
"""Test the CDB format for crashes that are not read/write AVs."""
data = self._read_test_data('cdb_other.txt')
expected_type = 'Heap-corruption'
expected_state = ('CScriptTimers::ExecuteTimer\n'
'CWindow::FireTimeOut\n'
'CPaintBeat::ProcessTimers\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cdb_stack_overflow(self):
"""Test the CDB stack overflow format."""
data = self._read_test_data('cdb_stack_overflow.txt')
expected_type = 'Stack-overflow'
expected_state = 'RunHTMLApplication\n'
expected_address = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_gsignal_at_first_stack_frame(self):
"""Test that gsignal is at the first stack frame."""
data = self._read_test_data('gsignal_at_first_stack_frame.txt')
expected_type = 'UNKNOWN'
expected_address = '0x5668a000177a5'
expected_state = ('AbbreviatedMonthsMap\nget\nGetInstance\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_sanitizer_signal_abrt(self):
"""Test abort signal from sanitizer for functional bug."""
data = self._read_test_data('sanitizer_signal_abrt.txt')
expected_type = 'Abrt'
expected_address = ''
expected_state = ('/tmp/coredump\n/tmp/coredump\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_sanitizer_signal_abrt_unknown(self):
"""Test abort signal on unknown address from sanitizer for functional
bug."""
data = self._read_test_data('sanitizer_signal_abrt_unknown.txt')
expected_type = 'Abrt'
expected_address = '0x000000000001'
expected_state = 'NULL'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_syzkaller_kasan(self):
"""Test syzkaller kasan."""
data = self._read_test_data('kasan_syzkaller.txt')
expected_type = 'Kernel failure\nUse-after-free\nREAD 8'
expected_state = ('sock_wake_async\n'
'sock_def_readable\n'
'unix_dgram_sendmsg\n')
expected_address = '0xffffffc01640e9d0'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_syzkaller_kasan_android(self):
"""Test syzkaller kasan."""
data = self._read_test_data('kasan_syzkaller_android.txt')
expected_type = 'Kernel failure\nNull-ptr-deref\nWRITE 4'
expected_state = ('sockfs_setattr\nnotify_change2\nchown_common\n')
expected_address = '0x00000000027c'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_kasan_gpf(self):
"""Test a KASan GPF."""
data = self._read_test_data('kasan_gpf.txt')
expected_type = 'Kernel failure\nGeneral-protection-fault'
expected_state = ('keyring_destroy\n'
'key_garbage_collector\n'
'process_one_work\n')
expected_address = ''
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_kasan_null(self):
"""Test a KASan NULL deref."""
data = self._read_test_data('kasan_null.txt')
expected_type = 'Kernel failure\nUser-memory-access\nWRITE 4'
expected_state = ('snd_seq_fifo_clear\n'
'snd_seq_ioctl_remove_events\n'
'snd_seq_do_ioctl\n')
expected_address = '0x000000000040'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_kasan_oob_read(self):
"""Test a KASan out-of-bounds read."""
data = self._read_test_data('kasan_oob_read.txt')
expected_type = 'Kernel failure\nOut-of-bounds-access\nREAD 1'
expected_state = ('platform_match\n'
'__device_attach_driver\n'
'bus_for_each_drv\n')
expected_address = '0xffffffc002583240'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_kasan_uaf(self):
"""Test a KASan use-after-free."""
data = self._read_test_data('kasan_uaf.txt')
expected_type = 'Kernel failure\nUse-after-free\nREAD 4'
expected_state = ('ip6_append_data\nudpv6_sendmsg\ninet_sendmsg\n')
expected_address = '0xffff88005031ee80'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_null_dereference_read(self):
"""Test a Null-dereference READ derived from ASan UNKNOWN READ acccess."""
data = self._read_test_data('asan_null_dereference_read.txt')
expected_type = 'Null-dereference READ'
expected_state = ('content::NavigationEntryImpl::site_instance\n'
'content::NavigationControllerImpl::ClassifyNavigation\n'
'content::NavigationControllerImpl::'
'RendererDidNavigate\n')
expected_address = '0x000000000008'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_null_dereference_write(self):
"""Test a Null-dereference WRITE derived from ASan UNKNOWN WRITE acccess."""
data = self._read_test_data('asan_null_dereference_write.txt')
expected_type = 'Null-dereference WRITE'
expected_state = ('SetTaskInfo\n'
'base::Timer::Start\n'
'Start<views::MenuController>\n')
expected_address = '0x000000000178'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_null_dereference_unknown(self):
"""Test a Null-dereference derived from ASan UNKNOWN access of unknown type
(READ/WRITE)."""
data = self._read_test_data('asan_null_dereference_unknown.txt')
expected_type = 'Null-dereference'
expected_state = (
'blink::Member<blink::StyleEngine>::get\n'
'blink::Document::styleEngine\n'
'blink::Document::updateLayoutTreeIgnorePendingStylesheets\n')
expected_address = '0x000000000530'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_null_dereference_win_read(self):
"""Test a Null-dereference READ derived from ASan UNKNOWN READ
acccess-violation on windows."""
data = self._read_test_data('asan_null_dereference_win_read.txt')
expected_type = 'Null-dereference READ'
expected_state = ('blink::SVGEnumerationBase::calculateAnimatedValue\n'
'blink::SVGAnimateElement::calculateAnimatedValue\n'
'blink::SVGAnimationElement::updateAnimation\n')
expected_address = '0x00000008'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_null_dereference_win_write(self):
"""Test a Null-dereference WRITE derived from ASan UNKNOWN WRITE
acccess-violation on windows."""
data = self._read_test_data('asan_null_dereference_win_write.txt')
expected_type = 'Null-dereference WRITE'
expected_state = ('blink::SVGEnumerationBase::calculateAnimatedValue\n'
'blink::SVGAnimateElement::calculateAnimatedValue\n'
'blink::SVGAnimationElement::updateAnimation\n')
expected_address = '0x00000008'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_unknown_crash_read_null(self):
"""Test an ASan Unknown-crash READ acccess from nullptr."""
data = self._read_test_data('asan_unknown_crash_read.txt')
expected_type = 'Null-dereference'
expected_state = ('void rawspeed::FujiDecompressor::copy_line'
'<rawspeed::FujiDecompressor::copy_line\n'
'rawspeed::FujiDecompressor::copy_line_to_xtrans\n'
'rawspeed::FujiDecompressor::fuji_decode_strip\n')
expected_address = '0x000000000006'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_unknown_crash_write(self):
"""Test an ASan Unknown-crash WRITE acccess."""
data = self._read_test_data('asan_unknown_crash_write.txt')
expected_type = 'UNKNOWN'
expected_state = ('void rawspeed::FujiDecompressor::copy_line'
'<rawspeed::FujiDecompressor::copy_line\n'
'rawspeed::FujiDecompressor::copy_line_to_xtrans\n'
'rawspeed::FujiDecompressor::fuji_decode_strip\n')
expected_address = '0x000000123456'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_unknown_read(self):
"""Test an ASan UNKNOWN READ acccess."""
data = self._read_test_data('asan_unknown_read.txt')
expected_type = 'UNKNOWN READ'
expected_state = ('content::NavigationEntryImpl::site_instance\n'
'content::NavigationControllerImpl::ClassifyNavigation\n'
'content::NavigationControllerImpl::'
'RendererDidNavigate\n')
expected_address = '0x000000010008'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_unknown_write(self):
"""Test an ASan UNKNOWN WRITE acccess."""
data = self._read_test_data('asan_unknown_write.txt')
expected_type = 'UNKNOWN WRITE'
expected_state = ('SetTaskInfo\n'
'base::Timer::Start\n'
'Start<views::MenuController>\n')
expected_address = '0x000000010178'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_unknown_unknown(self):
"""Test an ASan UNKNOWN access of unknown type (READ/WRITE)."""
data = self._read_test_data('asan_unknown_unknown.txt')
expected_type = 'UNKNOWN'
expected_state = (
'blink::Member<blink::StyleEngine>::get\n'
'blink::Document::styleEngine\n'
'blink::Document::updateLayoutTreeIgnorePendingStylesheets\n')
expected_address = '0x000000010530'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_unknown_win_read(self):
"""Test an ASan UNKNOWN READ acccess-violation on windows."""
data = self._read_test_data('asan_unknown_win_read.txt')
expected_type = 'UNKNOWN READ'
expected_state = ('blink::SVGEnumerationBase::calculateAnimatedValue\n'
'blink::SVGAnimateElement::calculateAnimatedValue\n'
'blink::SVGAnimationElement::updateAnimation\n')
expected_address = '0x00010008'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_unknown_win_write(self):
"""Test an ASan UNKNOWN WRITE acccess-violation on windows."""
data = self._read_test_data('asan_unknown_win_write.txt')
expected_type = 'UNKNOWN WRITE'
expected_state = ('blink::SVGEnumerationBase::calculateAnimatedValue\n'
'blink::SVGAnimateElement::calculateAnimatedValue\n'
'blink::SVGAnimationElement::updateAnimation\n')
expected_address = '0x00010008'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_security_check_failure(self):
"""Test a security CHECK failure (i.e. Blink RELEASE_ASSERT)."""
data = self._read_test_data('security_check_failure.txt')
expected_type = 'Security CHECK failure'
expected_address = ''
expected_state = ('startPosition.compareTo(endPosition) <= 0 in '
'Serialization.cpp\n'
'blink::CreateMarkupAlgorithm<>::createMarkup\n'
'blink::createMarkup\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_security_dcheck_failure(self):
"""Test a security DCHECK failure."""
data = self._read_test_data('security_dcheck_failure.txt')
expected_type = 'Security DCHECK failure'
expected_address = ''
expected_state = ('!terminated_ in latency_info.cc\n'
'ui::LatencyInfo::AddLatencyNumberWithTimestampImpl\n'
'ui::LatencyInfo::AddLatencyNumberWithTimestamp\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_security_dcheck_failure_with_abrt(self):
"""Test a security DCHECK failure with SIGABRT stack."""
data = self._read_test_data('security_dcheck_failure_with_abrt.txt')
expected_type = 'Security DCHECK failure'
expected_address = ''
expected_state = ('!root_parent->IsSVGElement() || '
'!ToSVGElement(root_parent) ->elements_with_relat\n'
'blink::SVGElement::RemovedFrom\n'
'blink::ContainerNode::NotifyNodeRemoved\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_string_vs_string(self):
"""Test a check failure with string vs string."""
data = self._read_test_data('check_failure_with_string_vs_string.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('start <= end in text_iterator.cc\n'
'blink::TextIteratorAlgorithm<blink::EditingAlgorithm'
'<blink::FlatTreeTraversal> >\n'
'blink::TextIteratorAlgorithm<blink::EditingAlgorithm'
'<blink::FlatTreeTraversal> >\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_vs_no_closing(self):
"""Test a check failure with string vs string (no closing bracket)."""
data = self._read_test_data('check_failure_vs_no_closing.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('record1 == record2 in file.cc\n'
'blink::TextIteratorAlgorithm<blink::EditingAlgorithm'
'<blink::FlatTreeTraversal> >\n'
'blink::TextIteratorAlgorithm<blink::EditingAlgorithm'
'<blink::FlatTreeTraversal> >\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_msan_abrt(self):
"""Test a check failure with MSan SIGABRT stack."""
data = self._read_test_data('check_failure_with_msan_abrt.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('i < length_ in string_piece.h\n'
'base::BasicStringPiece<std::__1::basic_string<char, '
'std::__1::char_traits<char>,\n'
'base::internal::JSONParser::ConsumeStringRaw\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_android_security_dcheck_failure(self):
"""Test an android security DCHECK failure."""
data = self._read_test_data('android_security_dcheck_failure.txt')
expected_type = 'Security DCHECK failure'
expected_address = ''
expected_state = ('offset + length <= impl.length() in StringView.h\n'
'set\n'
'StringView\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_android_media(self):
"""Test a CHECK failure in Android Media."""
data = self._read_test_data('check_failure_android_media.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = (
'CHECK_EQ( (unsigned)ptr[0],1u) failed in MPEG4Extractor.cpp\n'
'android::MPEG4Source::MPEG4Source\n'
'android::MPEG4Extractor::getTrack\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_android_media2(self):
"""Test a CHECK failure on Android."""
data = self._read_test_data('check_failure_android_media2.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = (
'CHECK(mFormat->findInt32(kKeyCryptoDefaultIVSize, &ivlength)) failed '
'in MPEG4Ext\n'
'android::MPEG4Source::parseSampleAuxiliaryInformationOffsets\n'
'android::MPEG4Source::parseChunk\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_chrome(self):
"""Test a CHECK failure with a Chrome symbolized stacktrace."""
data = self._read_test_data('check_failure_chrome.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('!terminated_ in latency_info.cc\n'
'ui::LatencyInfo::AddLatencyNumberWithTimestampImpl\n'
'ui::LatencyInfo::AddLatencyNumberWithTimestamp\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_chrome_android(self):
"""Test a CHECK failure with a Chrome on Android symbolized stacktrace."""
data = self._read_test_data('check_failure_chrome_android.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('Timed out waiting for GPU channel in '
'compositor_impl_android.cc\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_chrome_android2(self):
"""Test a CHECK failure with a Chrome on Android symbolized stacktrace."""
data = self._read_test_data('check_failure_chrome_android2.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('lifecycle().state() < '
'DocumentLifecycle::LayoutClean in FrameView.cpp\n'
'blink::FrameView::checkLayoutInvalidationIsAllowed\n'
'blink::FrameView::setNeedsLayout\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_chrome_mac(self):
"""Test a CHECK failure with a Chrome on Mac symbolized stacktrace."""
if not environment.is_posix():
self.skipTest('This test needs c++filt for demangling and is only '
'applicable for posix platforms.')
data = self._read_test_data('check_failure_chrome_mac.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('max_start_velocity > 0 in fling_curve.cc\n'
'ui::FlingCurve::FlingCurve\n'
'ui::WebGestureCurveImpl::CreateFromDefaultPlatformCurve'
'\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_chrome_win(self):
"""Test a CHECK failure with a Chrome on Windows symbolized stacktrace."""
data = self._read_test_data('check_failure_chrome_win.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('it != device_change_subscribers_.end() in '
'media_stream_dispatcher_host.cc\n'
'content::MediaStreamDispatcherHost::'
'OnCancelDeviceChangeNotifications\n'
'IPC::MessageT<MediaStreamHostMsg_'
'CancelDeviceChangeNotifications_Meta,std::tuple\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_assert_message(self):
"""Test the CHECK failure with assert message format."""
data = self._read_test_data('check_failure_with_assert_message.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = (
'host->listeners_.IsEmpty() in render_process_host_impl.cc\n'
'content::RenderProcessHostImpl::CheckAllTerminated\n'
'content::BrowserMainLoop::ShutdownThreadsAndCleanUp\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_comparison(self):
"""Test for special CHECK failure formats (CHECK_EQ, CHECK_LE, etc.)."""
data = self._read_test_data('check_failure_with_comparison.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = 'len > 0 in zygote_linux.cc\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_comparison2(self):
"""Test for special CHECK failure formats (CHECK_EQ, CHECK_LE, etc.)."""
data = self._read_test_data('check_failure_with_comparison2.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('layout_snapped_paint_offset == snapped_paint_offset '
'in compositing_layer_propert\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_handle_sigill_disabled(self):
"""Test the CHECK failure crash with ASAN_OPTIONS=handle_sigill=0."""
data = self._read_test_data('check_failure_with_handle_sigill=0.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = (
'length == 0 || (length > 0 && data != __null) in vector.h\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_handle_sigill_enabled(self):
"""Test the CHECK failure crash with ASAN_OPTIONS=handle_sigill=1."""
data = self._read_test_data('check_failure_with_handle_sigill=1.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = (
'length == 0 || (length > 0 && data != __null) in vector.h\n'
'v8::internal::Vector<unsigned char const>::Vector\n'
'v8::internal::wasm::ModuleWireBytes::ModuleWireBytes\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_with_fuzzer_data(self):
"""Test the CHECK failure crash with fuzzer data."""
for data in [('F0813 00:29:27.775753 384244 file.cc:130] '
'Check failed: query failed: "fuzzed456$data"'),
('F0813 00:29:27.775753 384244 file.cc:130] '
'Check failed: query failed: \'fuzzed456$data\''),
('F0813 00:29:27.775753 384244 file.cc:130] '
'Check failed: query failed = \'fuzzed456$data\''),
('F0813 00:29:27.775753 384244 file.cc:130] '
'Check failed: query failed = "fuzzed456$data"')]:
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('query failed in file.cc\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_oom(self):
"""Test an out of memory stacktrace."""
data = self._read_test_data('oom.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = ''
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_oom2(self):
"""Test an out of memory stacktrace."""
data = self._read_test_data('oom2.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = ''
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_oom3(self):
"""Test an out of memory stacktrace."""
data = self._read_test_data('oom3.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = ''
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_oom4(self):
"""Test an out of memory stacktrace."""
os.environ['FUZZ_TARGET'] = 'pdf_jpx_fuzzer'
os.environ['REPORT_OOMS_AND_HANGS'] = 'True'
data = self._read_test_data('oom4.txt')
expected_type = 'Out-of-memory'
expected_address = ''
expected_state = 'pdf_jpx_fuzzer\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_timeout_enabled(self):
"""Test a libFuzzer timeout stacktrace (with reporting enabled)."""
os.environ['FUZZ_TARGET'] = 'pdfium_fuzzer'
os.environ['REPORT_OOMS_AND_HANGS'] = 'True'
data = self._read_test_data('libfuzzer_timeout.txt')
expected_type = 'Timeout'
expected_address = ''
expected_state = 'pdfium_fuzzer\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_timeout_disabled(self):
"""Test a libFuzzer timeout stacktrace (with reporting disabled)."""
data = self._read_test_data('libfuzzer_timeout.txt')
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_oom_without_redzone(self):
"""Test a libFuzzer OOM stacktrace with no redzone."""
os.environ['FUZZ_TARGET'] = 'freetype2_fuzzer'
os.environ['REPORT_OOMS_AND_HANGS'] = 'True'
data = self._read_test_data('libfuzzer_oom.txt')
expected_type = 'Out-of-memory'
expected_address = ''
expected_state = 'freetype2_fuzzer\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
data = self._read_test_data('libfuzzer_oom_malloc.txt')
expected_stacktrace = data
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_oom_with_small_redzone(self):
"""Test a libFuzzer OOM stacktrace with redzone equal or smaller than 64."""
os.environ['FUZZ_TARGET'] = 'freetype2_fuzzer'
os.environ['REPORT_OOMS_AND_HANGS'] = 'True'
os.environ['REDZONE'] = '64'
data = self._read_test_data('libfuzzer_oom.txt')
expected_type = 'Out-of-memory'
expected_address = ''
expected_state = 'freetype2_fuzzer\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
data = self._read_test_data('libfuzzer_oom_malloc.txt')
expected_stacktrace = data
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_oom_with_higher_redzone(self):
"""Test a libFuzzer OOM stacktrace with redzone greater than 64."""
data = self._read_test_data('libfuzzer_oom.txt')
os.environ['REPORT_OOMS_AND_HANGS'] = 'True'
os.environ['REDZONE'] = '256'
expected_type = ''
expected_address = ''
expected_state = ''
expected_stacktrace = ''
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
data = self._read_test_data('libfuzzer_oom_malloc.txt')
expected_stacktrace = ''
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_glibc_assertion(self):
"""Test assertion (glibc)."""
data = self._read_test_data('assert_glibc.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = ('record\n'
'DuplicateRecordAndInsertInterval\n'
'DoDpPhrasing\n')
expected_stacktrace = data
expected_security_flag = False
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_glibc_assertion_with_glib(self):
"""Test assertion (glibc) with glib frames."""
data = self._read_test_data('assert_glibc_with_glib.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = ('g_utf8_validate (tag, -1, NULL)\n'
'gst_tag_list_from_vorbiscomment\n'
'tag_list_from_vorbiscomment_packet\n')
expected_stacktrace = data
expected_security_flag = False
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_chromium_log_assert(self):
"""Tests assertion (chromium's LOG_ASSERT)."""
data = self._read_test_data('assert_chromium_log.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'parsed_output == double_parsed_output. Parser/Writer mismatch.\n'
'correctness_fuzzer.cc\n')
expected_stacktrace = data
expected_security_flag = False
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_asan_container_overflow(self):
"""Test an ASan container overflow."""
data = self._read_test_data('asan_container_overflow_read.txt')
expected_type = 'Container-overflow\nREAD 4'
expected_address = '0x61000006be40'
expected_state = ('SkSL::Compiler::addDefinitions\n'
'SkSL::Compiler::scanCFG\n'
'SkSL::Compiler::internalConvertProgram\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_cobalt_check(self):
"""Test a cobalt check failure crash.."""
data = self._read_test_data('cobalt_check.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('space_width_ > 0 in font_list.cc\n'
'cobalt::dom::FontList::GenerateSpaceWidth\n'
'cobalt::dom::FontList::GetSpaceWidth\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ubsan_unsigned_integer_overflow(self):
"""Test that we analyze Unsigned-integer-overflow correctly."""
data = self._read_test_data('ubsan_unsigned_integer_overflow.txt')
expected_type = 'Unsigned-integer-overflow'
expected_address = ''
expected_state = ('xmlHashComputeKey\n'
'xmlHashAddEntry3\n'
'xmlAddEntity\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_libfuzzer_llvm_test_one_input(self):
"""Test that we use the filename as the crash state instead of
LLVMFuzzerTestOneInput."""
data = self._read_test_data(
'libfuzzer_llvm_fuzzer_test_one_input_crash.txt')
expected_type = 'Abrt'
expected_address = '0x03e900003b7b'
expected_state = ('deflate_set_dictionary_fuzzer.cc\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_webkit_wtfcrash(self):
"""Test that WTFCrash is ignored."""
data = self._read_test_data('wtfcrash.txt')
expected_type = 'Ill'
expected_address = '0x000002ade51c'
expected_state = (
'JSC::BuiltinExecutables::createExecutable\n'
'JSC::BuiltinExecutables::typedArrayPrototypeEveryCodeExecutable\n'
'JSC::typedArrayPrototypeEveryCodeGenerator\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_v8_javascript_assertion_should_pass(self):
"""Don't detect the string Assertion in javascript output as a failure."""
data = self._read_test_data('v8_javascript_assertion_should_pass.txt')
self._validate_get_crash_data(data, '', '', '', data, False)
def test_asan_assert_failure(self):
"""Test asan assertions formatted as 'assert failure: ...'."""
data = self._read_test_data('asan_assert_failure.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'ss_len == 0 || ss_len >= offsetof(struct sockaddr_un, sun_path) + 1\n'
'Envoy::Network::Address::addressFromSockAddr\n'
'Envoy::Network::Address::addressFromFd\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_glib_assert_failure(self):
"""Test glib assertions formatted as 'assert failure: ...'."""
data = self._read_test_data('glib_assert_failure.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = ('(j < i)\n'
'ast_array_get_pattern\n'
'array_get_pattern\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_assert_with_panic_keyword(self):
"""Test assertions formatted as 'panic: ...'."""
data = self._read_test_data('assert_with_panic_keyword.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'not reached\n'
'Envoy::Upstream::ClusterManagerImpl::ClusterManagerImpl\n'
'Envoy::Upstream::ValidationClusterManager::ValidationClusterManager\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_ignore_regex(self):
"""Test ignore regex work as expected."""
def _mock_config_get(_, param, default):
"""Handle test configuration options."""
if param == 'stacktrace.stack_frame_ignore_regexes':
return [r'Envoy\:\:Upstream\:\:ClusterManagerImpl']
return default
helpers.patch(self, ['config.local_config.ProjectConfig.get'])
self.mock.get.side_effect = _mock_config_get
data = self._read_test_data('assert_with_panic_keyword.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'not reached\n'
'Envoy::Upstream::ValidationClusterManager::ValidationClusterManager\n'
'Envoy::Upstream::ValidationClusterManagerFactory::'
'clusterManagerFromProto\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_check_failure_google(self):
"""Test check failure format for internal google."""
data = self._read_test_data('check_failure_google.txt')
expected_type = 'CHECK failure'
expected_address = ''
expected_state = ('std::is_sorted(foo.begin(), foo.end()) in file.cc\n'
'Frame\n'
'path.cc\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_assert_google(self):
"""Test check failure format for internal google."""
data = self._read_test_data('assert_failure_google.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = ('Blah.empty() && "Failure!"\nFrame\npath.cc\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_log_fatal_google(self):
"""Test log fatal format for internal google."""
data = self._read_test_data('log_fatal_google.txt')
expected_type = 'Fatal error'
expected_address = ''
expected_state = ('Log fatal in file.h\nFrame\npath.cc\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_asan_panic(self):
"""Test golang stacktrace with panic and ASan."""
data = self._read_test_data('golang_asan_panic.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = 'asn1: string not valid UTF-8\nasn1.Fuzz\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_sigsegv_panic(self):
"""Test golang stacktrace with panic and SIGSEGV."""
data = self._read_test_data('golang_sigsegv_panic.txt')
expected_type = 'Invalid memory address'
expected_address = ''
expected_state = 'math.glob..func1\nmath.init.ializers\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_libfuzzer_panic(self):
"""Test golang stacktrace with panic and libFuzzer's deadly signal."""
data = self._read_test_data('golang_libfuzzer_panic.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'parse //%B9%B9%B9%B9%B9%01%00%00%00%00%00%00%00%B9%B9%B9%B9%B9%B9%B9%B'
'9%B9%B9%B9\nurl.Fuzz\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_with_type_assertions_in_frames(self):
"""Test golang stacktrace with panic with type assertions in stack frames.
"""
data = self._read_test_data(
'golang_panic_with_type_assertions_in_frames.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = ('index > windowEnd\n'
'flate.(*compressor).deflate\n'
'flate.(*compressor).syncFlush\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_fatal_error_stack_overflow(self):
"""Test golang stacktrace with fatal error caused by stack overflow."""
data = self._read_test_data('golang_fatal_error_stack_overflow.txt')
expected_type = 'Stack overflow'
expected_address = ''
expected_state = ('ast.(*scanner).next\n'
'ast.(*scanner).scanIdent\n'
'ast.(*scanner).Scan\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_custom_short_message(self):
"""Test golang stacktrace with panic and custom short message."""
data = self._read_test_data('golang_panic_custom_short_message.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = 'bad hex char\nprog.fromHexChar\nprog.hexToByte\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_runtime_error_invalid_memory_address(self):
"""Test golang stacktrace with panic caused by invalid memory address."""
data = self._read_test_data(
'golang_panic_runtime_error_invalid_memory_address.txt')
expected_type = 'Invalid memory address'
expected_address = ''
expected_state = ('repro.(*context).reproMinimizeProg\n'
'repro.(*context).repro\n'
'repro.Run\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_runtime_error_index_out_of_range(self):
"""Test golang stacktrace with panic caused by index out of range."""
data = self._read_test_data(
'golang_panic_runtime_error_index_out_of_range.txt')
expected_type = 'Index out of range'
expected_address = ''
expected_state = ('http.(*conn).serve.func1\n'
'http.HandlerFunc.ServeHTTP\n'
'http.(*ServeMux).ServeHTTP\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_runtime_error_index_out_of_range_with_msan(self):
"""Test golang stacktrace with panic caused by index out of range
with memory sanitizer."""
data = self._read_test_data(
'golang_panic_runtime_error_index_out_of_range_with_msan.txt')
expected_type = 'Index out of range'
expected_address = ''
expected_state = ('http.(*conn).serve.func1\n'
'http.HandlerFunc.ServeHTTP\n'
'http.(*ServeMux).ServeHTTP\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_runtime_error_slice_bounds_out_of_range(self):
"""Test golang stacktrace with panic caused by slice bounds out of range."""
data = self._read_test_data(
'golang_panic_runtime_error_slice_bounds_out_of_range.txt')
expected_type = 'Slice bounds out of range'
expected_address = ''
expected_state = ('json.(*decodeState).unquoteBytes\n'
'json.(*decodeState).literalStore\n'
'json.(*decodeState).object\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_runtime_error_integer_divide_by_zero(self):
"""Test golang stacktrace with panic caused by integer divide by zero."""
data = self._read_test_data(
'golang_panic_runtime_error_integer_divide_by_zero.txt')
expected_type = 'Integer divide by zero'
expected_address = ''
expected_state = ('go-bsbmp.(*SensorBMP180).ReadPressureMult10Pa\n'
'go-bsbmp.(*BMP).ReadAltitude\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_panic_runtime_error_makeslice_len_out_of_range(self):
"""Test golang stacktrace with panic caused by makeslice len out of range.
"""
data = self._read_test_data(
'golang_panic_runtime_error_makeslice_len_out_of_range.txt')
expected_type = 'Makeslice: len out of range'
expected_address = ''
expected_state = 'gc.newliveness\ngc.liveness\ngc.compile\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_generic_fatal_error_and_asan_abrt(self):
"""Test golang stacktrace with a generic fatal error and ASan's ABRT
signature that should be ignored for known golang crashes."""
data = self._read_test_data('golang_generic_fatal_error_and_asan_abrt.txt')
expected_type = 'Fatal error'
expected_address = ''
expected_state = 'error message here\njson.(*decodeState).unquoteBytes\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_generic_panic_and_asan_abrt(self):
"""Test golang stacktrace with a generic panic and ASan's ABRT signature
that should be ignored for known golang crashes."""
data = self._read_test_data('golang_generic_panic_and_asan_abrt.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = 'error message here\njson.(*decodeState).unquoteBytes\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_golang_new_crash_type_and_asan_abrt(self):
"""Test golang stacktrace with an unknown message and ASan's ABRT signature
that should be captured for unknown golang crashes."""
data = self._read_test_data('golang_new_crash_type_and_asan_abrt.txt')
expected_type = 'Abrt'
expected_address = '0x000000000001'
expected_state = 'NULL'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_hwasan_allocation_tail_overwritten(self):
"""Test HWASan allocation tail overwritten crash."""
data = self._read_test_data('hwasan_allocation_tail_overwritten.txt')
expected_type = 'Allocation-tail-overwritten'
expected_address = '0x00444b02b180'
expected_state = 'frame1\nframe2\nframe3\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_hwasan_tag_mismatch(self):
"""Test HWASan tag mismatch crash."""
data = self._read_test_data('hwasan_tag_mismatch.txt')
expected_type = 'Tag-mismatch\nREAD 8'
expected_address = '0x0040e1287be0'
expected_state = 'frame1\nframe2\nframe3\n'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_missing_library_android(self):
"""Test for missing library crash (Android)."""
data = self._read_test_data('missing_library_android.txt')
expected_type = 'Missing-library'
expected_address = ''
expected_state = 'libvts_codecoverage.so\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_missing_library_linux(self):
"""Test for missing library crash (Linux)."""
data = self._read_test_data('missing_library_linux.txt')
expected_type = 'Missing-library'
expected_address = ''
expected_state = 'libtest.so.1\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_undetected_ubsan_error_logs_error(self):
"""Ensure that we log an error if we see an unknown UBSan error type."""
data = self._read_test_data('ubsan_unknown_logs_error.txt')
expected_type = 'UNKNOWN'
expected_address = ''
expected_state = 'a\nb\nc\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
self.mock.log_error.assert_called_once_with(
'Unknown UBSan crash type: '
'unsupported ubsan error that needs a new signature')
def test_libfuzzer_overwrites_const_input(self):
"""Test for libFuzzer when target tries to overwrite const input."""
os.environ['FUZZ_TARGET'] = 'ap-mgmt'
data = self._read_test_data('libfuzzer_overwrites_const_input.txt')
expected_type = 'Overwrites-const-input'
expected_address = ''
expected_state = 'ap-mgmt\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_python_unhandled_exception(self):
"""Test python stacktrace with an unhandled exception."""
data = self._read_test_data('python_unhandled_exception.txt')
expected_type = 'Uncaught exception'
expected_address = ''
expected_state = '_read_exact\n_read_gzip_header\nread\n'
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_gdb_sigtrap(self):
"""Test for GDB stack."""
data = self._read_test_data('gdb_sigtrap.txt')
expected_type = 'SIGTRAP'
expected_address = '0x000000000ac8'
expected_state = (
'xymodem_trnasfer\nLoadImageFromUsb30\nLoadBL1FromUsb30\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_gdb_sigtrap_and_libfuzzer(self):
"""Test for GDB stack with libfuzzer."""
data = self._read_test_data('gdb_sigtrap_and_libfuzzer.txt')
expected_type = 'SIGTRAP'
expected_address = '0x000000000ac8'
expected_state = (
'xymodem_trnasfer\nLoadImageFromUsb30\nLoadBL1FromUsb30\n')
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_rust_assert(self):
"""Test for assertion in Rust."""
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
data = self._read_test_data('rust_assert.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'Error: could not find an available port\n'
'libra_config::utils::get_available_port::h7d7baacfb554bae8\n'
'libra_json_rpc::fuzzing::fuzzer::hde487212e06dd4fd\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_rust_oom(self):
"""Test for out of memory in Rust."""
os.environ['REPORT_OOMS_AND_HANGS'] = 'True'
data = self._read_test_data('rust_oom.txt')
expected_type = 'Out-of-memory'
expected_address = ''
expected_state = (
'mp4parse::fallible::TryVec$LT$T$GT$::reserve::h1159314b25b06f7c\n'
'mp4parse::fallible::try_read_up_to::h64a356817b37893a\n'
'mp4parse::fallible::TryRead::read_into_try_vec::h1a15ad50dda088e3\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_rust_ignores(self):
"""Test that uninteresting frames are ignored for Rust."""
environment.set_value('ASSERTS_HAVE_SECURITY_IMPLICATION', False)
data = self._read_test_data('rust_ignores.txt')
expected_type = 'ASSERT'
expected_address = ''
expected_state = (
'called `Result::unwrap()` on an `Err` value: failed directive on '
'wasmtime/crates\n'
'wasmtime_fuzzing::oracles::spectest::ha380505b8ea313d4\n')
expected_stacktrace = data
expected_security_flag = False
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_linux_kernel_library_libfuzzer(self):
"""Test Linux Kernel Library fuzzed with libfuzzer."""
data = self._read_test_data('lkl_libfuzzer.txt')
expected_type = 'Kernel failure\nSlab-out-of-bounds\nWRITE 4'
expected_state = (
'hid_generic_probe\nreally_probe\n__device_attach_driver\n')
expected_address = '0x7f2a8ad06d18'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_linux_kernel_library_libfuzzer_symbolized(self):
"""Test Linux Kernel Library fuzzed with libfuzzer symbolized."""
data = self._read_test_data('lkl_libfuzzer_symbolized.txt')
expected_type = 'Kernel failure\nSlab-out-of-bounds\nWRITE 4'
expected_state = ('__hidinput_change_resolution_multipliers\n'
'hidinput_connect\n'
'hid_connect\n')
expected_address = '0x7f5256480ddc'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
def test_linux_kernel_library_libfuzzer_unsymbolized(self):
"""Test Linux Kernel Library fuzzed with libfuzzer unsymbolized."""
data = self._read_test_data('lkl_libfuzzer_unsymbolized.txt')
expected_type = 'Kernel failure\nSlab-out-of-bounds\nWRITE 4'
expected_state = ('hid_generic_probe\n'
'really_probe\n'
'__device_attach_driver\n')
expected_address = '0x7f58af2ac9ec'
expected_stacktrace = data
expected_security_flag = True
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
| []
| []
| [
"DETECT_V8_RUNTIME_ERRORS",
"FUZZ_TARGET",
"REPORT_OOMS_AND_HANGS",
"TASK_NAME",
"JOB_NAME",
"CHECKS_HAVE_SECURITY_IMPLICATION",
"REDZONE"
]
| [] | ["DETECT_V8_RUNTIME_ERRORS", "FUZZ_TARGET", "REPORT_OOMS_AND_HANGS", "TASK_NAME", "JOB_NAME", "CHECKS_HAVE_SECURITY_IMPLICATION", "REDZONE"] | python | 7 | 0 | |
pkg/vmtest/integration.go | // Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vmtest
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/u-root/u-root/pkg/cp"
"github.com/u-root/u-root/pkg/golang"
"github.com/u-root/u-root/pkg/qemu"
"github.com/u-root/u-root/pkg/testutil"
"github.com/u-root/u-root/pkg/uio"
"github.com/u-root/u-root/pkg/ulog"
"github.com/u-root/u-root/pkg/ulog/ulogtest"
"github.com/u-root/u-root/pkg/uroot"
"github.com/u-root/u-root/pkg/uroot/initramfs"
)
// Options are integration test options.
type Options struct {
// BuildOpts are u-root initramfs options.
//
// They are used if the test needs to generate an initramfs.
// Fields that are not set are populated by QEMU and QEMUTest as
// possible.
BuildOpts uroot.Opts
// QEMUOpts are QEMU VM options for the test.
//
// Fields that are not set are populated by QEMU and QEMUTest as
// possible.
QEMUOpts qemu.Options
// DontSetEnv doesn't set the BuildOpts.Env and uses the user-supplied one.
//
// TODO: make uroot.Opts.Env a pointer?
DontSetEnv bool
// Name is the test's name.
//
// If name is left empty, the calling function's function name will be
// used as determined by runtime.Caller.
Name string
// Uinit is the uinit that should be added to a generated initramfs.
//
// If none is specified, the generic uinit will be used, which searches for
// and runs the script generated from TestCmds.
Uinit string
// TestCmds are commands to execute after init.
//
// QEMUTest generates an Elvish script with these commands. The script is
// shared with the VM, and is run from the generic uinit.
TestCmds []string
// TmpDir is the temporary directory exposed to the QEMU VM.
TmpDir string
// Logger logs build statements.
Logger ulog.Logger
// Extra environment variables to set when building (used by u-bmc)
ExtraBuildEnv []string
// Use virtual vfat rather than 9pfs
UseVVFAT bool
}
func last(s string) string {
l := strings.Split(s, ".")
return l[len(l)-1]
}
func callerName(depth int) string {
// Use the test name as the serial log's file name.
pc, _, _, ok := runtime.Caller(depth)
if !ok {
panic("runtime caller failed")
}
f := runtime.FuncForPC(pc)
return last(f.Name())
}
// TestLineWriter is an io.Writer that logs full lines of serial to tb.
func TestLineWriter(tb testing.TB, prefix string) io.WriteCloser {
return uio.FullLineWriter(&testLineWriter{tb: tb, prefix: prefix})
}
type jsonStripper struct {
uio.LineWriter
}
func (j jsonStripper) OneLine(p []byte) {
// Poor man's JSON detector.
if len(p) == 0 || p[0] == '{' {
return
}
j.LineWriter.OneLine(p)
}
func JSONLessTestLineWriter(tb testing.TB, prefix string) io.WriteCloser {
return uio.FullLineWriter(jsonStripper{&testLineWriter{tb: tb, prefix: prefix}})
}
// testLineWriter is an io.Writer that logs full lines of serial to tb.
type testLineWriter struct {
tb testing.TB
prefix string
}
func replaceCtl(str []byte) []byte {
for i, c := range str {
if c == 9 || c == 10 {
} else if c < 32 || c == 127 {
str[i] = '~'
}
}
return str
}
func (tsw *testLineWriter) OneLine(p []byte) {
tsw.tb.Logf("%s %s: %s", testutil.NowLog(), tsw.prefix, string(replaceCtl(p)))
}
// TestArch returns the architecture under test. Pass this as GOARCH when
// building Go programs to be run in the QEMU environment.
func TestArch() string {
if env := os.Getenv("UROOT_TESTARCH"); env != "" {
return env
}
return "amd64"
}
// SkipWithoutQEMU skips the test when the QEMU environment variables are not
// set. This is already called by QEMUTest(), so use if some expensive
// operations are performed before calling QEMUTest().
func SkipWithoutQEMU(t *testing.T) {
if _, ok := os.LookupEnv("UROOT_QEMU"); !ok {
t.Skip("QEMU test is skipped unless UROOT_QEMU is set")
}
if _, ok := os.LookupEnv("UROOT_KERNEL"); !ok {
t.Skip("QEMU test is skipped unless UROOT_KERNEL is set")
}
}
func QEMUTest(t *testing.T, o *Options) (*qemu.VM, func()) {
SkipWithoutQEMU(t)
if len(o.Name) == 0 {
o.Name = callerName(2)
}
if o.Logger == nil {
o.Logger = &ulogtest.Logger{TB: t}
}
if o.QEMUOpts.SerialOutput == nil {
o.QEMUOpts.SerialOutput = TestLineWriter(t, "serial")
}
// Create or reuse a temporary directory. This is exposed to the VM.
if o.TmpDir == "" {
tmpDir, err := ioutil.TempDir("", "uroot-integration")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
o.TmpDir = tmpDir
}
qOpts, err := QEMU(o)
if err != nil {
t.Fatalf("Failed to create QEMU VM %s: %v", o.Name, err)
}
vm, err := qOpts.Start()
if err != nil {
t.Fatalf("Failed to start QEMU VM %s: %v", o.Name, err)
}
return vm, func() {
vm.Close()
t.Logf("QEMU command line to reproduce %s:\n%s", o.Name, vm.CmdlineQuoted())
if t.Failed() {
t.Log("Keeping temp dir: ", o.TmpDir)
} else if len(o.TmpDir) == 0 {
if err := os.RemoveAll(o.TmpDir); err != nil {
t.Logf("failed to remove temporary directory %s: %v", o.TmpDir, err)
}
}
}
}
// QEMU builds the u-root environment and prepares QEMU options given the test
// options and environment variables.
//
// QEMU will augment o.BuildOpts and o.QEMUOpts with configuration that the
// caller either requested (through the Options.Uinit field, for example) or
// that the caller did not set.
//
// QEMU returns the QEMU launch options or an error.
func QEMU(o *Options) (*qemu.Options, error) {
if len(o.Name) == 0 {
o.Name = callerName(2)
}
// Generate Elvish shell script of test commands in o.TmpDir.
if len(o.TestCmds) > 0 {
testFile := filepath.Join(o.TmpDir, "test.elv")
if err := ioutil.WriteFile(
testFile, []byte(strings.Join(o.TestCmds, "\n")), 0777); err != nil {
return nil, err
}
}
// Set the initramfs.
if len(o.QEMUOpts.Initramfs) == 0 {
o.QEMUOpts.Initramfs = filepath.Join(o.TmpDir, "initramfs.cpio")
if err := ChooseTestInitramfs(o.DontSetEnv, o.BuildOpts, o.Uinit, o.QEMUOpts.Initramfs); err != nil {
return nil, err
}
}
if len(o.QEMUOpts.Kernel) == 0 {
// Copy kernel to o.TmpDir for tests involving kexec.
kernel := filepath.Join(o.TmpDir, "kernel")
if err := cp.Copy(os.Getenv("UROOT_KERNEL"), kernel); err != nil {
return nil, err
}
o.QEMUOpts.Kernel = kernel
}
switch TestArch() {
case "amd64":
o.QEMUOpts.KernelArgs += " console=ttyS0 earlyprintk=ttyS0"
case "arm":
o.QEMUOpts.KernelArgs += " console=ttyAMA0"
}
o.QEMUOpts.KernelArgs += " uroot.vmtest"
var dir qemu.Device
if o.UseVVFAT {
dir = qemu.ReadOnlyDirectory{Dir: o.TmpDir}
} else {
dir = qemu.P9Directory{Dir: o.TmpDir, Arch: TestArch()}
}
o.QEMUOpts.Devices = append(o.QEMUOpts.Devices, qemu.VirtioRandom{}, dir)
return &o.QEMUOpts, nil
}
// ChooseTestInitramfs chooses which initramfs will be used for a given test and
// places it at the location given by outputFile.
// Default to the override initramfs if one is specified in the UROOT_INITRAMFS
// environment variable. Else, build an initramfs with the given parameters.
// If no uinit was provided, the generic one is used.
func ChooseTestInitramfs(dontSetEnv bool, o uroot.Opts, uinit, outputFile string) error {
override := os.Getenv("UROOT_INITRAMFS")
if len(override) > 0 {
log.Printf("Overriding with initramfs %q", override)
return cp.Copy(override, outputFile)
}
if len(uinit) == 0 {
log.Printf("Defaulting to generic initramfs")
uinit = "github.com/u-root/u-root/integration/testcmd/generic/uinit"
}
_, err := CreateTestInitramfs(dontSetEnv, o, uinit, outputFile)
return err
}
// CreateTestInitramfs creates an initramfs with the given build options and
// uinit, and writes it to the given output file. If no output file is provided,
// one will be created.
// The output file name is returned. It is the caller's responsibility to remove
// the initramfs file after use.
func CreateTestInitramfs(dontSetEnv bool, o uroot.Opts, uinit, outputFile string) (string, error) {
if !dontSetEnv {
env := golang.Default()
env.CgoEnabled = false
env.GOARCH = TestArch()
o.Env = env
}
logger := log.New(os.Stderr, "", 0)
// If build opts don't specify any commands, include all commands. Else,
// always add init and elvish.
var cmds []string
if len(o.Commands) == 0 {
cmds = []string{
"github.com/u-root/u-root/cmds/core/*",
"github.com/u-root/u-root/cmds/exp/*",
}
}
if len(uinit) != 0 {
cmds = append(cmds, uinit)
}
// Add our commands to the build opts.
o.AddBusyBoxCommands(cmds...)
// Fill in the default build options if not specified.
if o.BaseArchive == nil {
o.BaseArchive = uroot.DefaultRamfs().Reader()
}
if len(o.InitCmd) == 0 {
o.InitCmd = "init"
}
if len(o.DefaultShell) == 0 {
o.DefaultShell = "elvish"
}
if len(o.TempDir) == 0 {
tempDir, err := ioutil.TempDir("", "initramfs-tempdir")
if err != nil {
return "", fmt.Errorf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tempDir)
o.TempDir = tempDir
}
// Create an output file if one was not provided.
if len(outputFile) == 0 {
f, err := ioutil.TempFile("", "initramfs.cpio")
if err != nil {
return "", fmt.Errorf("failed to create output file: %v", err)
}
outputFile = f.Name()
}
w, err := initramfs.CPIO.OpenWriter(logger, outputFile)
if err != nil {
return "", fmt.Errorf("Failed to create initramfs writer: %v", err)
}
o.OutputFile = w
return outputFile, uroot.CreateInitramfs(logger, o)
}
| [
"\"UROOT_TESTARCH\"",
"\"UROOT_KERNEL\"",
"\"UROOT_INITRAMFS\""
]
| []
| [
"UROOT_INITRAMFS",
"UROOT_KERNEL",
"UROOT_TESTARCH"
]
| [] | ["UROOT_INITRAMFS", "UROOT_KERNEL", "UROOT_TESTARCH"] | go | 3 | 0 | |
pkg/config/config.go | package config
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"runtime"
"gopkg.in/yaml.v2"
)
const (
configDir string = "dlv"
configDirHidden string = ".dlv"
configFile string = "config.yml"
)
// SubstitutePathRule describes a rule for substitution of path to source code file.
type SubstitutePathRule struct {
// Directory path will be substituted if it matches `From`.
From string
// Path to which substitution is performed.
To string
}
// SubstitutePathRules is a slice of source code path substitution rules.
type SubstitutePathRules []SubstitutePathRule
// Config defines all configuration options available to be set through the config file.
type Config struct {
// Commands aliases.
Aliases map[string][]string `yaml:"aliases"`
// Source code path substitution rules.
SubstitutePath SubstitutePathRules `yaml:"substitute-path"`
// MaxStringLen is the maximum string length that the commands print,
// locals, args and vars should read (in verbose mode).
MaxStringLen *int `yaml:"max-string-len,omitempty"`
// MaxArrayValues is the maximum number of array items that the commands
// print, locals, args and vars should read (in verbose mode).
MaxArrayValues *int `yaml:"max-array-values,omitempty"`
// MaxVariableRecurse is output evaluation depth of nested struct members, array and
// slice items and dereference pointers
MaxVariableRecurse *int `yaml:"max-variable-recurse,omitempty"`
// DisassembleFlavor allow user to specify output syntax flavor of assembly, one of
// this list "intel"(default), "gnu", "go"
DisassembleFlavor *string `yaml:"disassemble-flavor,omitempty"`
// If ShowLocationExpr is true whatis will print the DWARF location
// expression for its argument.
ShowLocationExpr bool `yaml:"show-location-expr"`
// Source list line-number color (3/4 bit color codes as defined
// here: https://en.wikipedia.org/wiki/ANSI_escape_code#Colors)
SourceListLineColor int `yaml:"source-list-line-color"`
// DebugFileDirectories is the list of directories Delve will use
// in order to resolve external debug info files.
DebugInfoDirectories []string `yaml:"debug-info-directories"`
}
// LoadConfig attempts to populate a Config object from the config.yml file.
func LoadConfig() *Config {
err := createConfigPath()
if err != nil {
fmt.Printf("Could not create config directory: %v.", err)
return &Config{}
}
fullConfigFile, err := GetConfigFilePath(configFile)
if err != nil {
fmt.Printf("Unable to get config file path: %v.", err)
return &Config{}
}
hasOldConfig, err := hasOldConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to determine if old config exists: %v\n", err)
}
if hasOldConfig {
userHomeDir := getUserHomeDir()
oldLocation := path.Join(userHomeDir, configDirHidden)
if err := moveOldConfig(); err != nil {
fmt.Fprintf(os.Stderr, "Unable to move old config: %v\n", err)
return &Config{}
}
if err := os.RemoveAll(oldLocation); err != nil {
fmt.Fprintf(os.Stderr, "Unable to remove old config location: %v\n", err)
return &Config{}
}
fmt.Fprintf(os.Stderr, "Successfully moved config from: %s to: %s\n", oldLocation, fullConfigFile)
}
f, err := os.Open(fullConfigFile)
if err != nil {
f, err = createDefaultConfig(fullConfigFile)
if err != nil {
fmt.Printf("Error creating default config file: %v", err)
return &Config{}
}
}
defer func() {
err := f.Close()
if err != nil {
fmt.Printf("Closing config file failed: %v.", err)
}
}()
data, err := ioutil.ReadAll(f)
if err != nil {
fmt.Printf("Unable to read config data: %v.", err)
return &Config{}
}
var c Config
err = yaml.Unmarshal(data, &c)
if err != nil {
fmt.Printf("Unable to decode config file: %v.", err)
return &Config{}
}
if len(c.DebugInfoDirectories) == 0 {
c.DebugInfoDirectories = []string{"/usr/lib/debug/.build-id"}
}
return &c
}
// SaveConfig will marshal and save the config struct
// to disk.
func SaveConfig(conf *Config) error {
fullConfigFile, err := GetConfigFilePath(configFile)
if err != nil {
return err
}
out, err := yaml.Marshal(*conf)
if err != nil {
return err
}
f, err := os.Create(fullConfigFile)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write(out)
return err
}
// moveOldConfig attempts to move config to new location
// $HOME/.dlv to $XDG_CONFIG_HOME/dlv
func moveOldConfig() error {
if os.Getenv("XDG_CONFIG_HOME") == "" && runtime.GOOS != "linux" {
return nil
}
userHomeDir := getUserHomeDir()
p := path.Join(userHomeDir, configDirHidden, configFile)
_, err := os.Stat(p)
if err != nil {
return fmt.Errorf("unable to read config file located at: %s", p)
}
newFile, err := GetConfigFilePath(configFile)
if err != nil {
return fmt.Errorf("unable to read config file located at: %s", err)
}
if err := os.Rename(p, newFile); err != nil {
return fmt.Errorf("unable to move %s to %s", p, newFile)
}
return nil
}
func createDefaultConfig(path string) (*os.File, error) {
f, err := os.Create(path)
if err != nil {
return nil, fmt.Errorf("unable to create config file: %v", err)
}
err = writeDefaultConfig(f)
if err != nil {
return nil, fmt.Errorf("unable to write default configuration: %v", err)
}
f.Seek(0, io.SeekStart)
return f, nil
}
func writeDefaultConfig(f *os.File) error {
_, err := f.WriteString(
`# Configuration file for the delve debugger.
# This is the default configuration file. Available options are provided, but disabled.
# Delete the leading hash mark to enable an item.
# Uncomment the following line and set your preferred ANSI foreground color
# for source line numbers in the (list) command (if unset, default is 34,
# dark blue) See https://en.wikipedia.org/wiki/ANSI_escape_code#3/4_bit
# source-list-line-color: 34
# Provided aliases will be added to the default aliases for a given command.
aliases:
# command: ["alias1", "alias2"]
# Define sources path substitution rules. Can be used to rewrite a source path stored
# in program's debug information, if the sources were moved to a different place
# between compilation and debugging.
# Note that substitution rules will not be used for paths passed to "break" and "trace"
# commands.
substitute-path:
# - {from: path, to: path}
# Maximum number of elements loaded from an array.
# max-array-values: 64
# Maximum loaded string length.
# max-string-len: 64
# Output evaluation.
# max-variable-recurse: 1
# Uncomment the following line to make the whatis command also print the DWARF location expression of its argument.
# show-location-expr: true
# Allow user to specify output syntax flavor of assembly, one of this list "intel"(default), "gnu", "go".
# disassemble-flavor: intel
# List of directories to use when searching for separate debug info files.
debug-info-directories: ["/usr/lib/debug/.build-id"]
`)
return err
}
// createConfigPath creates the directory structure at which all config files are saved.
func createConfigPath() error {
path, err := GetConfigFilePath("")
if err != nil {
return err
}
return os.MkdirAll(path, 0700)
}
// GetConfigFilePath gets the full path to the given config file name.
func GetConfigFilePath(file string) (string, error) {
if configPath := os.Getenv("XDG_CONFIG_HOME"); configPath != "" {
return path.Join(configPath, configDir, file), nil
}
userHomeDir := getUserHomeDir()
if runtime.GOOS == "linux" {
return path.Join(userHomeDir, ".config", configDir, file), nil
}
return path.Join(userHomeDir, configDirHidden, file), nil
}
// Checks if the user has a config at the old location: $HOME/.dlv
func hasOldConfig() (bool, error) {
// If you don't have XDG_CONFIG_HOME set and aren't on Linux you have nothing to move
if os.Getenv("XDG_CONFIG_HOME") == "" && runtime.GOOS != "linux" {
return false, nil
}
userHomeDir := getUserHomeDir()
o := path.Join(userHomeDir, configDirHidden, configFile)
_, err := os.Stat(o)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
func getUserHomeDir() string {
userHomeDir := "."
usr, err := user.Current()
if err == nil {
userHomeDir = usr.HomeDir
}
return userHomeDir
}
| [
"\"XDG_CONFIG_HOME\"",
"\"XDG_CONFIG_HOME\"",
"\"XDG_CONFIG_HOME\""
]
| []
| [
"XDG_CONFIG_HOME"
]
| [] | ["XDG_CONFIG_HOME"] | go | 1 | 0 | |
demo_semi_supervised_learning_low_dim_no_hyperparameters.py | # -*- coding: utf-8 -*-
# %reset -f
"""
@author: Hiromasa Kaneko
"""
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dcekit.learning import SemiSupervisedLearningLowDimension
from sklearn.decomposition import PCA
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF, ConstantKernel
from sklearn.model_selection import cross_val_predict, train_test_split
number_of_training_samples = 30
number_of_pca_components = 5
fold_number = 10
# load dataset
dataset = pd.read_csv('descriptors8_with_boiling_point.csv', encoding='SHIFT-JIS', index_col=0)
y = dataset.iloc[:, 0]
x = dataset.iloc[:, 1:]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=x.shape[0] - number_of_training_samples,
random_state=0)
fold_number = min(fold_number, len(y_train))
# Gaussian process regression and PCA
regression_model = GaussianProcessRegressor(ConstantKernel() * RBF() + WhiteKernel(), alpha=0)
low_dimension_model = PCA(n_components=number_of_pca_components)
model = SemiSupervisedLearningLowDimension(base_estimator=regression_model,
base_dimension_reductioner=low_dimension_model,
x_unsupervised=x_test, autoscaling_flag=True, cv_flag=False)
model.fit(x_train, y_train)
# calculate y in training data
calculated_y_train = model.predict(x_train)
# yy-plot
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_train, calculated_y_train, c='blue')
y_max = np.max(np.array([np.array(y_train), calculated_y_train]))
y_min = np.min(np.array([np.array(y_train), calculated_y_train]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Calculated Y')
plt.show()
# r2, RMSE, MAE
print('r2: {0}'.format(float(1 - sum((y_train - calculated_y_train) ** 2) / sum((y_train - y_train.mean()) ** 2))))
print('RMSE: {0}'.format(float((sum((y_train - calculated_y_train) ** 2) / len(y_train)) ** 0.5)))
print('MAE: {0}'.format(float(sum(abs(y_train - calculated_y_train)) / len(y_train))))
# estimate y in cross-validation in training data
estimated_y_in_cv = cross_val_predict(model, x_train, y_train, cv=fold_number)
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_train, estimated_y_in_cv, c='blue')
y_max = np.max(np.array([np.array(y_train), estimated_y_in_cv]))
y_min = np.min(np.array([np.array(y_train), estimated_y_in_cv]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Estimated Y in CV')
plt.show()
# r2cv, RMSEcv, MAEcv
print('r2cv: {0}'.format(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2))))
print('RMSEcv: {0}'.format(float((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)))
print('MAEcv: {0}'.format(float(sum(abs(y_train - estimated_y_in_cv)) / len(y_train))))
# prediction
if x_test.shape[0]:
predicted_y_test = model.predict(x_test)
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_test, predicted_y_test, c='blue')
y_max = np.max(np.array([np.array(y_test), predicted_y_test]))
y_min = np.min(np.array([np.array(y_test), predicted_y_test]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Predicted Y')
plt.show()
# r2p, RMSEp, MAEp
print('r2p: {0}'.format(float(1 - sum((y_test - predicted_y_test) ** 2) / sum((y_test - y_test.mean()) ** 2))))
print('RMSEp: {0}'.format(float((sum((y_test - predicted_y_test) ** 2) / len(y_test)) ** 0.5)))
print('MAEp: {0}'.format(float(sum(abs(y_test - predicted_y_test)) / len(y_test))))
| []
| []
| []
| [] | [] | python | null | null | null |
scripting/asgi.py | """
ASGI config for scripting project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scripting.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
hack/update_crdb_versions/main.go | /*
Copyright 2022 The Cockroach Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"regexp"
"sort"
"time"
"github.com/Masterminds/semver/v3"
"gopkg.in/yaml.v2"
)
const crdbVersionsInvertedRegexp = "^v19|^v21.1.8$|latest|ubi$"
const CrdbVersionsFileName = "crdb-versions.yaml"
// TODO(rail): we may need to add pagination handling in case we pass 500 versions
// Use anonymous API to get the list of published images from the RedHat Catalog.
const crdbVersionsUrl = "https://catalog.redhat.com/api/containers/v1/repositories/registry/" +
"registry.connect.redhat.com/repository/cockroachdb/cockroach/images?" +
"exclude=data.repositories.comparison.advisory_rpm_mapping,data.brew," +
"data.cpe_ids,data.top_layer_id&page_size=500&page=0"
const crdbVersionsDefaultTimeout = 30
const CrdbVersionsFileDescription = `#
# Supported CockroachDB versions.
#
# This file contains a list of CockroachDB versions that are supported by the
# operator. hack/crdbversions/main.go uses this list to generate various
# manifests.
# Please update this file when CockroachDB releases new versions.
#
# Generated. DO NOT EDIT. This file is created via make release/gen-templates
`
type CrdbVersionsResponse struct {
Data []struct {
Repositories []struct {
Tags []struct {
Name string `json:"name"`
} `json:"tags"`
} `json:"repositories"`
} `json:"data"`
}
func GetData(data *CrdbVersionsResponse) error {
client := http.Client{Timeout: crdbVersionsDefaultTimeout * time.Second}
r, err := client.Get(crdbVersionsUrl)
if err != nil {
return fmt.Errorf("Cannot make a get request: %s", err)
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(data)
}
func GetVersions(data CrdbVersionsResponse) []string {
var versions []string
for _, data := range data.Data {
for _, repo := range data.Repositories {
for _, tag := range repo.Tags {
if IsValid(tag.Name) {
versions = append(versions, tag.Name)
}
}
}
}
return versions
}
func IsValid(version string) bool {
match, _ := regexp.MatchString(crdbVersionsInvertedRegexp, version)
return !match
}
// sortVersions converts the slice with versions to slice with semver.Version
// sorts them and converts back to slice with version strings
func SortVersions(versions []string) []string {
vs := make([]*semver.Version, len(versions))
for i, r := range versions {
v, err := semver.NewVersion(r)
if err != nil {
log.Fatalf("Cannot parse version : %s", err)
}
vs[i] = v
}
sort.Sort(semver.Collection(vs))
var sortedVersions []string
for _, v := range vs {
sortedVersions = append(sortedVersions, v.Original())
}
return sortedVersions
}
func GenerateCrdbVersionsFile(versions []string, path string) error {
f, err := os.Create(path)
if err != nil {
log.Fatalf("Cannot create %s file: %s", path, err)
}
defer f.Close()
versionsMap := map[string][]string{"CrdbVersions": versions}
yamlVersions, err := yaml.Marshal(&versionsMap)
if err != nil {
log.Fatalf("error while converting to yaml: %v", err)
}
result := append([]byte(CrdbVersionsFileDescription), yamlVersions...)
return ioutil.WriteFile(path, result, 0)
}
func main() {
responseData := CrdbVersionsResponse{}
err := GetData(&responseData)
if err != nil {
log.Fatalf("Cannot parse response: %s", err)
}
rawVersions := GetVersions(responseData)
sortedVersions := SortVersions(rawVersions)
outputFile := filepath.Join(os.Getenv("BUILD_WORKSPACE_DIRECTORY"), CrdbVersionsFileName)
err = GenerateCrdbVersionsFile(sortedVersions, outputFile)
if err != nil {
log.Fatalf("Cannot write %s file: %s", outputFile, err)
}
}
| [
"\"BUILD_WORKSPACE_DIRECTORY\""
]
| []
| [
"BUILD_WORKSPACE_DIRECTORY"
]
| [] | ["BUILD_WORKSPACE_DIRECTORY"] | go | 1 | 0 | |
LOAD Network v1.01/src/testing/S3Querying.java | package testing;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.List;
import java.util.Properties;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
import com.amazonaws.*;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.util.StringUtils;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.Bucket;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.CompressionType;
import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.InputSerialization;
import com.amazonaws.services.s3.model.JSONInput;
import com.amazonaws.services.s3.model.JSONOutput;
import com.amazonaws.services.s3.model.ExpressionType;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.OutputSerialization;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.ListObjectsV2Request;
import com.amazonaws.services.s3.model.ListObjectsV2Result;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.model.SelectObjectContentEvent;
import com.amazonaws.services.s3.model.SelectObjectContentEventStream;
import com.amazonaws.services.s3.model.SelectObjectContentEventVisitor;
import com.amazonaws.services.s3.model.SelectObjectContentRequest;
import com.amazonaws.services.s3.model.SelectObjectContentResult;
import com.amazonaws.services.s3.model.SelectRecordsInputStream;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.fasterxml.jackson.databind.*;
import com.fasterxml.jackson.core.*;
import com.fasterxml.jackson.annotation.*;
public class S3Querying {
public static void main(String[] args) throws IOException {
Properties prop=new Properties();
String propFilePath = "../resources/config.properties";
FileInputStream inputStream;
try {
inputStream = new FileInputStream(propFilePath);
prop.load(inputStream);
} catch (IOException e1) {
e1.printStackTrace();
}
String accessKey = System.getenv("s3Accesskey");
String secretKey = System.getenv("s3Secretkey");
AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
// Set S3 Client Endpoint
AwsClientBuilder.EndpointConfiguration switchEndpoint = new AwsClientBuilder.EndpointConfiguration(
prop.getProperty("s3BaseName"),"");
// Set signer type and http scheme
ClientConfiguration conf = new ClientConfiguration();
conf.setSignerOverride("S3SignerType");
conf.setProtocol(Protocol.HTTPS);
AmazonS3 S3Client = AmazonS3ClientBuilder.standard()
.withEndpointConfiguration(switchEndpoint)
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withClientConfiguration(conf)
.withPathStyleAccessEnabled(true)
.build();
System.out.println("===========================================");
System.out.println(" Connection to the S3 ");
System.out.println("===========================================\n");
S3Object fullObject = null;
try {
/*
*/
String bucketName = "processed-canonical-data"; //Name of the bucket
String prefix = "linguistic-processing/2020-03-11/";
ListObjectsV2Request req = new
ListObjectsV2Request().withBucketName(bucketName).withPrefix(prefix).withDelimiter("/").withMaxKeys(1);
ListObjectsV2Result result = S3Client.listObjectsV2(req);
String key = result.getObjectSummaries().get(0).getKey();
//Testing selecting the object's content of a particular bucket
SelectObjectContentRequest request = new SelectObjectContentRequest();
request.setBucketName(bucketName);
request.setKey(key);
request.setExpression("SELECT TOP 1 FROM S3Object s");
request.setExpressionType(ExpressionType.SQL);
InputSerialization inputSerialization = new InputSerialization();
inputSerialization.setJson(new JSONInput());
inputSerialization.setCompressionType(CompressionType.BZIP2);
request.setInputSerialization(inputSerialization);
OutputSerialization outputSerialization = new OutputSerialization();
outputSerialization.setJson(new JSONOutput());
request.setOutputSerialization(outputSerialization);
final AtomicBoolean isResultComplete = new AtomicBoolean(false);
//SelectObjectContentResult results = S3Client.selectObjectContent(request);
GetObjectRequest object_request = new GetObjectRequest(bucketName, key);
fullObject = S3Client.getObject(object_request);
System.out.println("Content-Type: " + fullObject.getObjectMetadata().getContentType());
System.out.println("Content: ");
displayTextInputStream(fullObject.getObjectContent());
/*
SelectRecordsInputStream resultInputStream = results.getPayload().getRecordsInputStream(new SelectObjectContentEventVisitor() {
@Override
public void visit(SelectObjectContentEvent.StatsEvent event)
{
System.out.println(
"Received Stats, Bytes Scanned: " + event.getDetails().getBytesScanned()
+ " Bytes Processed: " + event.getDetails().getBytesProcessed());
}
@Override
public void visit(SelectObjectContentEvent.EndEvent event)
{
isResultComplete.set(true);
System.out.println("Received End Event. Result is complete.");
}
}
);*/
//Select Records from the Input Stream
/*for (S3ObjectSummary objectSummary : result.getObjectSummaries())
{
if(objectSummary.getKey() == "linguistic-processing/2020-03-11/JDG-1880.ling.annotation.jsonl.bz2") {
}
System.out.println(" --- " + objectSummary.getKey() +" "
+ "(size = " + objectSummary.getSize() + ")" +" "
+ "(eTag = " + objectSummary.getETag() + ")");
System.out.println();
}*/
}
catch (AmazonServiceException ase)
{
System.out.println("Caught an AmazonServiceException, which means your request made it to S3, but was rejected with an error response for some reason.");
System.out.println("Error Message: " + ase.getMessage());
System.out.println("HTTP Status Code: " + ase.getStatusCode());
System.out.println("AWS Error Code: " + ase.getErrorCode());
System.out.println("Error Type: " + ase.getErrorType());
System.out.println("Request ID: " + ase.getRequestId());
}
catch (AmazonClientException ace)
{
System.out.println("Caught an AmazonClientException, which means the client encountered "
+ "a serious internal problem while trying to communicate with S3 such as not being able to access the network.");
System.out.println("Error Message: " + ace.getMessage());
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
// To ensure that the network connection doesn't remain open, close any open input streams.
if (fullObject != null) {
fullObject.close();
}
}
}
private static void displayTextInputStream(InputStream input) throws IOException {
// Read the text input stream one line at a time and display each line.
Scanner fileIn = new Scanner(new BZip2CompressorInputStream(input));
if (null != fileIn) {
while (fileIn.hasNext()) {
System.out.println("Line: " + fileIn.nextLine());
}
}
}
}
| [
"\"s3Accesskey\"",
"\"s3Secretkey\""
]
| []
| [
"s3Secretkey",
"s3Accesskey"
]
| [] | ["s3Secretkey", "s3Accesskey"] | java | 2 | 0 | |
implementations/dcgan/dcgan.py | import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
from tensorboardX import SummaryWriter # 用于记录训练信息
import sys # 用于引入上一层的py文件,如果用pycharm执行没有问题,但是如果直接python *.py会找不到文件
sys.path.append("../..")
from implementations import global_config
# 根据上层定义好的全局数据构建结果文件夹,所有GAN都使用这种结构
os.makedirs(global_config.generated_image_root, exist_ok=True)
os.makedirs(global_config.checkpoint_root, exist_ok=True)
os.makedirs(global_config.pretrained_generator_root, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation") #只涉及数据读取,并不是用cpu去训练
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space") #拟合的分辨率越大,用于表示信息的隐空间一般也需要设置的越大
parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels") #与 有关
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
# 添加预读取模型相关参数
parser.add_argument("--generator_interval",type=int,default=20,help="interval between saving generators, epoch")
# 若启用多卡训练
parser.add_argument("--gpus",type=str,default=None,help="gpus you want to use, e.g. \"0,1\"")
opt = parser.parse_args()
print(opt)
if opt.gpus is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus # 设置可见GPU
print("Now using gpu " + opt.gpus +" for training...")
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.init_size = opt.img_size // 8 # 4*4
self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 256 * self.init_size ** 2)) # 通过本步与forward中的out.view,将latent code处理为第一层卷积接收的小分辨率大深度数据
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(256),
# 4x4 -> 8x8
nn.ConvTranspose2d(256,128,4,2,1,bias=False), # 转置卷积,带有学习特性和上采样功能,增加的像素通过学习获得
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# 8x8 -> 16x16
nn.ConvTranspose2d(128, 64,4,2, 1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# 16x16 -> 32x32
nn.ConvTranspose2d(64, opt.channels,4,2, 1, bias=False),
nn.Tanh(),
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 256, self.init_size, self.init_size) # batch*128*8*8(B*C*W*H)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, momentum=0.8))
return block
self.model = nn.Sequential( # 1. 一系列卷积层,深度逐渐*2,分辨率逐渐缩小
*discriminator_block(opt.channels, 16, bn=False), # *解包参数,或打包参数,本例中将discriminator_block的返回值解包
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = opt.img_size // 2 ** 4
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid()) # 2. 随后使用全连接层变成一个Scalar
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out) # 输出的Scalar表示这张图为真的的概率,由于已用sigmoid处理,只需调用BCEloss跟标答比较
return validity
# Loss function
adversarial_loss = torch.nn.BCELoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
# 若多卡运算,将数据放到两个GPU上,注意是对nn.Model处理,而不需要处理损失,实际上在forward的时候只接收一半的数据
if opt.gpus is not None:
generator = nn.DataParallel(generator)
discriminator = nn.DataParallel(discriminator)
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
# os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
global_config.data_root,
train=True,
download=False,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# 初始化log记录器
writer = SummaryWriter(log_dir=os.path.join(global_config.log_root,"log"))
# 使用一个固定的latent code来生成图片,更易观察模型的演化
fixed_z =Variable(Tensor(np.random.normal(0, 1, (opt.batch_size, opt.latent_dim))))
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, (imgs, _) in enumerate(dataloader):
# Adversarial ground truths 创建标答:维度为(batch,1),valid为全1矩阵,fake为全0矩阵
valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)
# Configure input D的真实输入
real_imgs = Variable(imgs.type(Tensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad() # 每个网络训练一步之前都要清空梯度
# Sample noise as generator input 生成G的输入,隐向量,每次变化的,最开始有个不变的用于生成同一个隐向量的结果
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
# Generate a batch of images 将z送入生成器,得到生成图片
gen_imgs = generator(z)
# Loss measures generator's ability to fool the discriminator 将判别器的输出与全0比较,期望判别器判别生成图片是假的,BCE loss
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
# 由于最大化在编码时难以实现,实际最小化 1/2 {𝐸_(𝑥∼𝑃_𝑑𝑎𝑡𝑎 ) [𝑙𝑜𝑔𝐷(𝑥)−1]+𝐸_(𝑥∼𝑃_𝑔 ) [𝑙𝑜𝑔(𝐷(𝑥))]}
real_loss = adversarial_loss(discriminator(real_imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())
)
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
sampled_imgs = generator(fixed_z) # 固定tensor采样出的随着训练进行的图片变化,方便观察
save_image(sampled_imgs.data[:25], os.path.join(global_config.generated_image_root,"%d.png" % batches_done), nrow=5, normalize=True)
writer.add_scalar("loss/G_loss", g_loss.item(), global_step=batches_done) # 横轴iter纵轴G_loss
writer.add_scalar("loss/D_loss", d_loss.item(), global_step=batches_done) # 横轴iter纵轴D_loss
writer.add_scalars("loss/loss", {"g_loss":g_loss.item(),"d_loss":d_loss.item()}, global_step=batches_done) # 两个loss画在一张图里
if epoch % opt.generator_interval == 0:
# 保存生成器
torch.save(generator.state_dict(),os.path.join(global_config.pretrained_generator_root,"%05d_ckpt_g.pth"%epoch)) # 只保存一个生成器
# 最后再保存一遍所有信息
# 定义所有需要保存并加载的参数,以字典的形式
state = {
'epoch': epoch,
'G_state_dict': generator.module.state_dict(),
'D_state_dict': discriminator.module.state_dict(),
'optimizer_G': optimizer_G.state_dict(),
'optimizer_D': optimizer_D.state_dict(),
}
torch.save(state,os.path.join(global_config.checkpoint_root,"%05d_ckpt.pth"%epoch)) # 保存checkpoint的时候用字典,方便恢复
torch.save(generator.state_dict(), os.path.join(global_config.pretrained_generator_root, "%05d_ckpt_g.pth" % epoch)) # 只保存一个带有模型信息和参数的生成器,用于后续生成图片 | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
core/chaincode/platforms/java/platform.go | /*
Copyright DTCC 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package java
import (
"archive/tar"
"bytes"
"compress/gzip"
"errors"
"fmt"
"net/url"
"strings"
"github.com/hyperledger/fabric/core/chaincode/platforms/ccmetadata"
cutil "github.com/hyperledger/fabric/core/container/util"
pb "github.com/hyperledger/fabric/protos/peer"
// "path/filepath"
)
// Platform for java chaincodes in java
type Platform struct {
}
var buildCmds = map[string]string{
"src/build.gradle": "gradle -b build.gradle clean && gradle -b build.gradle build",
"src/pom.xml": "mvn -f pom.xml clean && mvn -f pom.xml package",
}
//getBuildCmd returns the type of build gradle/maven based on the file
//found in java chaincode project root
//build.gradle - gradle - returns the first found build type
//pom.xml - maven
func getBuildCmd(codePackage []byte) (string, error) {
is := bytes.NewReader(codePackage)
gr, err := gzip.NewReader(is)
if err != nil {
return "", fmt.Errorf("failure opening gzip stream: %s", err)
}
defer gr.Close()
tr := tar.NewReader(gr)
for {
header, err := tr.Next()
if err != nil {
return "", errors.New("build file not found")
}
if cmd, ok := buildCmds[header.Name]; ok == true {
return cmd, nil
}
}
}
//ValidateSpec validates the java chaincode specs
func (javaPlatform *Platform) ValidateSpec(spec *pb.ChaincodeSpec) error {
path, err := url.Parse(spec.ChaincodeId.Path)
fmt.Printf("URL: %+v\n", path)
if err != nil || path == nil {
return fmt.Errorf("invalid path: %s", err)
}
//we have no real good way of checking existence of remote urls except by downloading and testing
//which we do later anyway. But we *can* - and *should* - test for existence of local paths.
//Treat empty scheme as a local filesystem path
// if url.Scheme == "" {
// pathToCheck := filepath.Join(os.Getenv("GOPATH"), "src", spec.ChaincodeId.Path)
// exists, err := pathExists(pathToCheck)
// if err != nil {
// return fmt.Errorf("Error validating chaincode path: %s", err)
// }
// if !exists {
// return fmt.Errorf("Path to chaincode does not exist: %s", spec.ChaincodeId.Path)
// }
// }
return nil
}
func (javaPlatform *Platform) ValidateDeploymentSpec(cds *pb.ChaincodeDeploymentSpec) error {
// FIXME: Java platform needs to implement its own validation similar to GOLANG
return nil
}
// WritePackage writes the java chaincode package
func (javaPlatform *Platform) GetDeploymentPayload(spec *pb.ChaincodeSpec) ([]byte, error) {
var err error
inputbuf := bytes.NewBuffer(nil)
gw := gzip.NewWriter(inputbuf)
tw := tar.NewWriter(gw)
//ignore the generated hash. Just use the tw
//The hash could be used in a future enhancement
//to check, warn of duplicate installs etc.
_, err = collectChaincodeFiles(spec, tw)
if err != nil {
return nil, err
}
err = writeChaincodePackage(spec, tw)
tw.Close()
gw.Close()
if err != nil {
return nil, err
}
payload := inputbuf.Bytes()
return payload, nil
}
func (javaPlatform *Platform) GenerateDockerfile(cds *pb.ChaincodeDeploymentSpec) (string, error) {
var err error
var buf []string
buildCmd, err := getBuildCmd(cds.CodePackage)
if err != nil {
return "", err
}
buf = append(buf, cutil.GetDockerfileFromConfig("chaincode.java.Dockerfile"))
buf = append(buf, "ADD codepackage.tgz /root/chaincode")
buf = append(buf, "RUN cd /root/chaincode/src && "+buildCmd)
buf = append(buf, "RUN cp /root/chaincode/src/build/chaincode.jar /root")
buf = append(buf, "RUN cp /root/chaincode/src/build/libs/* /root/libs")
dockerFileContents := strings.Join(buf, "\n")
return dockerFileContents, nil
}
func (javaPlatform *Platform) GenerateDockerBuild(cds *pb.ChaincodeDeploymentSpec, tw *tar.Writer) error {
return cutil.WriteBytesToPackage("codepackage.tgz", cds.CodePackage, tw)
}
//GetMetadataProvider fetches metadata provider given deployment spec
func (javaPlatform *Platform) GetMetadataProvider(cds *pb.ChaincodeDeploymentSpec) ccmetadata.MetadataProvider {
return &ccmetadata.TargzMetadataProvider{cds}
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 15555 if testnet else 5555
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
test/integration/sagemaker/test_tfs.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import pytest
import util
NON_P3_REGIONS = ['ap-southeast-1', 'ap-southeast-2', 'ap-south-1',
'ca-central-1', 'eu-central-1', 'eu-west-2', 'us-west-1']
@pytest.fixture(params=os.environ['TEST_VERSIONS'].split(','))
def version(request):
return request.param
@pytest.fixture(scope='session')
def repo(request):
return request.config.getoption('--repo') or 'sagemaker-tensorflow-serving'
@pytest.fixture
def tag(request, version, instance_type):
if request.config.getoption('--tag'):
return request.config.getoption('--tag')
arch = 'gpu' if instance_type.startswith('ml.p') else 'cpu'
return f'{version}-{arch}'
@pytest.fixture
def image_uri(registry, region, repo, tag):
return util.image_uri(registry, region, repo, tag)
@pytest.fixture(params=os.environ['TEST_INSTANCE_TYPES'].split(','))
def instance_type(request, region):
return request.param
@pytest.fixture(scope='module')
def accelerator_type():
return None
@pytest.fixture(scope='session')
def tfs_model(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
'test/data/tfs-model.tar.gz')
@pytest.fixture(scope='session')
def python_model_with_requirements(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
'test/data/python-with-requirements.tar.gz')
@pytest.fixture(scope='session')
def python_model_with_lib(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
'test/data/python-with-lib.tar.gz')
def test_tfs_model(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, tfs_model,
image_uri, instance_type, accelerator_type):
input_data = {'instances': [1.0, 2.0, 5.0]}
util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, tfs_model,
image_uri, instance_type, accelerator_type, input_data)
def test_batch_transform(region, boto_session, sagemaker_client,
model_name, tfs_model, image_uri,
instance_type):
results = util.run_batch_transform_job(region=region,
boto_session=boto_session,
model_data=tfs_model,
image_uri=image_uri,
model_name=model_name,
sagemaker_client=sagemaker_client,
instance_type=instance_type)
assert len(results) == 10
for r in results:
assert r == [3.5, 4.0, 5.5]
def test_python_model_with_requirements(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name,
python_model_with_requirements, image_uri, instance_type,
accelerator_type):
if 'p3' in instance_type:
pytest.skip('skip for p3 instance')
# the python service needs to transform this to get a valid prediction
input_data = {'x': [1.0, 2.0, 5.0]}
output_data = util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name,
python_model_with_requirements, image_uri,
instance_type, accelerator_type, input_data)
# python service adds this to tfs response
assert output_data['python'] is True
assert output_data['pillow'] == '6.0.0'
def test_python_model_with_lib(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, python_model_with_lib,
image_uri, instance_type, accelerator_type):
if 'p3' in instance_type:
pytest.skip('skip for p3 instance')
# the python service needs to transform this to get a valid prediction
input_data = {'x': [1.0, 2.0, 5.0]}
output_data = util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, python_model_with_lib,
image_uri, instance_type, accelerator_type, input_data)
# python service adds this to tfs response
assert output_data['python'] is True
assert output_data['dummy_module'] == '0.1'
| []
| []
| [
"TEST_VERSIONS",
"TEST_INSTANCE_TYPES"
]
| [] | ["TEST_VERSIONS", "TEST_INSTANCE_TYPES"] | python | 2 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend VPs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a vpubd or vpub-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the vpub data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Vpub/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Vpub")
return os.path.expanduser("~/.vpub")
def read_bitcoin_config(dbdir):
"""Read the vpub.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "vpub.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a vpub JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 11774 if testnet else 11772
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the vpubd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(vpubd):
info = vpubd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
vpubd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = vpubd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(vpubd):
address_summary = dict()
address_to_account = dict()
for info in vpubd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = vpubd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = vpubd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-vpub-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(vpubd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(vpubd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to vpubd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = vpubd.createrawtransaction(inputs, outputs)
signed_rawtx = vpubd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(vpubd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = vpubd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(vpubd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = vpubd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(vpubd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get VPs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send VPs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of vpub.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
vpubd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(vpubd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(vpubd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(vpubd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(vpubd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = vpubd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sdk
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
)
var debug utils.Debug
func init() {
debug = utils.Init("sdk")
}
// Version this value will be replaced while build: -ldflags="-X sdk.version=x.x.x"
var Version = "0.0.1"
var defaultConnectTimeout = 5 * time.Second
var defaultReadTimeout = 10 * time.Second
var DefaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), Version)
var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
return fn
}
// Client the type Client
type Client struct {
SourceIp string
SecureTransport string
isInsecure bool
regionId string
config *Config
httpProxy string
httpsProxy string
noProxy string
logger *Logger
userAgent map[string]string
signer auth.Signer
httpClient *http.Client
asyncTaskQueue chan func()
readTimeout time.Duration
connectTimeout time.Duration
EndpointMap map[string]string
EndpointType string
Network string
Domain string
isOpenAsync bool
}
func (client *Client) Init() (err error) {
panic("not support yet")
}
func (client *Client) SetEndpointRules(endpointMap map[string]string, endpointType string, netWork string) {
client.EndpointMap = endpointMap
client.Network = netWork
client.EndpointType = endpointType
}
func (client *Client) SetHTTPSInsecure(isInsecure bool) {
client.isInsecure = isInsecure
}
func (client *Client) GetHTTPSInsecure() bool {
return client.isInsecure
}
func (client *Client) SetHttpsProxy(httpsProxy string) {
client.httpsProxy = httpsProxy
}
func (client *Client) GetHttpsProxy() string {
return client.httpsProxy
}
func (client *Client) SetHttpProxy(httpProxy string) {
client.httpProxy = httpProxy
}
func (client *Client) GetHttpProxy() string {
return client.httpProxy
}
func (client *Client) SetNoProxy(noProxy string) {
client.noProxy = noProxy
}
func (client *Client) GetNoProxy() string {
return client.noProxy
}
func (client *Client) SetTransport(transport http.RoundTripper) {
if client.httpClient == nil {
client.httpClient = &http.Client{}
}
client.httpClient.Transport = transport
}
// InitWithProviderChain will get credential from the providerChain,
// the RsaKeyPairCredential Only applicable to regionID `ap-northeast-1`,
// if your providerChain may return a credential type with RsaKeyPairCredential,
// please ensure your regionID is `ap-northeast-1`.
func (client *Client) InitWithProviderChain(regionId string, provider provider.Provider) (err error) {
config := client.InitClientConfig()
credential, err := provider.Resolve()
if err != nil {
return
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithOptions(regionId string, config *Config, credential auth.Credential) (err error) {
if regionId != "" {
match, _ := regexp.MatchString("^[a-zA-Z0-9_-]+$", regionId)
if !match {
return fmt.Errorf("regionId contains invalid characters")
}
}
client.regionId = regionId
client.config = config
client.httpClient = &http.Client{}
if config.Transport != nil {
client.httpClient.Transport = config.Transport
} else if config.HttpTransport != nil {
client.httpClient.Transport = config.HttpTransport
}
if config.Timeout > 0 {
client.httpClient.Timeout = config.Timeout
}
if config.EnableAsync {
client.EnableAsync(config.GoRoutinePoolSize, config.MaxTaskQueueSize)
}
client.signer, err = auth.NewSignerWithCredential(credential, client.ProcessCommonRequestWithSigner)
return
}
func (client *Client) SetReadTimeout(readTimeout time.Duration) {
client.readTimeout = readTimeout
}
func (client *Client) SetConnectTimeout(connectTimeout time.Duration) {
client.connectTimeout = connectTimeout
}
func (client *Client) GetReadTimeout() time.Duration {
return client.readTimeout
}
func (client *Client) GetConnectTimeout() time.Duration {
return client.connectTimeout
}
func (client *Client) getHttpProxy(scheme string) (proxy *url.URL, err error) {
if scheme == "https" {
if client.GetHttpsProxy() != "" {
proxy, err = url.Parse(client.httpsProxy)
} else if rawurl := os.Getenv("HTTPS_PROXY"); rawurl != "" {
proxy, err = url.Parse(rawurl)
} else if rawurl := os.Getenv("https_proxy"); rawurl != "" {
proxy, err = url.Parse(rawurl)
}
} else {
if client.GetHttpProxy() != "" {
proxy, err = url.Parse(client.httpProxy)
} else if rawurl := os.Getenv("HTTP_PROXY"); rawurl != "" {
proxy, err = url.Parse(rawurl)
} else if rawurl := os.Getenv("http_proxy"); rawurl != "" {
proxy, err = url.Parse(rawurl)
}
}
return proxy, err
}
func (client *Client) getNoProxy(scheme string) []string {
var urls []string
if client.GetNoProxy() != "" {
urls = strings.Split(client.noProxy, ",")
} else if rawurl := os.Getenv("NO_PROXY"); rawurl != "" {
urls = strings.Split(rawurl, ",")
} else if rawurl := os.Getenv("no_proxy"); rawurl != "" {
urls = strings.Split(rawurl, ",")
}
return urls
}
// EnableAsync enable the async task queue
func (client *Client) EnableAsync(routinePoolSize, maxTaskQueueSize int) {
if client.isOpenAsync {
fmt.Println("warning: Please not call EnableAsync repeatedly")
return
}
client.isOpenAsync = true
client.asyncTaskQueue = make(chan func(), maxTaskQueueSize)
for i := 0; i < routinePoolSize; i++ {
go func() {
for {
task, notClosed := <-client.asyncTaskQueue
if !notClosed {
return
} else {
task()
}
}
}()
}
}
func (client *Client) InitWithAccessKey(regionId, accessKeyId, accessKeySecret string) (err error) {
config := client.InitClientConfig()
credential := &credentials.AccessKeyCredential{
AccessKeyId: accessKeyId,
AccessKeySecret: accessKeySecret,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithStsToken(regionId, accessKeyId, accessKeySecret, securityToken string) (err error) {
config := client.InitClientConfig()
credential := &credentials.StsTokenCredential{
AccessKeyId: accessKeyId,
AccessKeySecret: accessKeySecret,
AccessKeyStsToken: securityToken,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (err error) {
config := client.InitClientConfig()
credential := &credentials.RamRoleArnCredential{
AccessKeyId: accessKeyId,
AccessKeySecret: accessKeySecret,
RoleArn: roleArn,
RoleSessionName: roleSessionName,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (err error) {
config := client.InitClientConfig()
credential := &credentials.RamRoleArnCredential{
AccessKeyId: accessKeyId,
AccessKeySecret: accessKeySecret,
RoleArn: roleArn,
RoleSessionName: roleSessionName,
Policy: policy,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithRsaKeyPair(regionId, publicKeyId, privateKey string, sessionExpiration int) (err error) {
config := client.InitClientConfig()
credential := &credentials.RsaKeyPairCredential{
PrivateKey: privateKey,
PublicKeyId: publicKeyId,
SessionExpiration: sessionExpiration,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithEcsRamRole(regionId, roleName string) (err error) {
config := client.InitClientConfig()
credential := &credentials.EcsRamRoleCredential{
RoleName: roleName,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithBearerToken(regionId, bearerToken string) (err error) {
config := client.InitClientConfig()
credential := &credentials.BearerTokenCredential{
BearerToken: bearerToken,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitClientConfig() (config *Config) {
if client.config != nil {
return client.config
} else {
return NewConfig()
}
}
func (client *Client) DoAction(request requests.AcsRequest, response responses.AcsResponse) (err error) {
if (client.SecureTransport == "false" || client.SecureTransport == "true") && client.SourceIp != "" {
t := reflect.TypeOf(request).Elem()
v := reflect.ValueOf(request).Elem()
for i := 0; i < t.NumField(); i++ {
value := v.FieldByName(t.Field(i).Name)
if t.Field(i).Name == "requests.RoaRequest" {
request.GetHeaders()["x-acs-proxy-source-ip"] = client.SourceIp
request.GetHeaders()["x-acs-proxy-secure-transport"] = client.SecureTransport
return client.DoActionWithSigner(request, response, nil)
} else if t.Field(i).Name == "PathPattern" && !value.IsZero() {
request.GetHeaders()["x-acs-proxy-source-ip"] = client.SourceIp
request.GetHeaders()["x-acs-proxy-secure-transport"] = client.SecureTransport
return client.DoActionWithSigner(request, response, nil)
} else if i == t.NumField()-1 {
request.GetQueryParams()["SourceIp"] = client.SourceIp
request.GetQueryParams()["SecureTransport"] = client.SecureTransport
return client.DoActionWithSigner(request, response, nil)
}
}
}
return client.DoActionWithSigner(request, response, nil)
}
func (client *Client) GetEndpointRules(regionId string, product string) (endpointRaw string, err error) {
if client.EndpointType == "regional" {
if regionId == "" {
err = fmt.Errorf("RegionId is empty, please set a valid RegionId.")
return "", err
}
endpointRaw = strings.Replace("<product><network>.<region_id>.aliyuncs.com", "<region_id>", regionId, 1)
} else {
endpointRaw = "<product><network>.aliyuncs.com"
}
endpointRaw = strings.Replace(endpointRaw, "<product>", strings.ToLower(product), 1)
if client.Network == "" || client.Network == "public" {
endpointRaw = strings.Replace(endpointRaw, "<network>", "", 1)
} else {
endpointRaw = strings.Replace(endpointRaw, "<network>", "-"+client.Network, 1)
}
return endpointRaw, nil
}
func (client *Client) buildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (httpRequest *http.Request, err error) {
// add clientVersion
request.GetHeaders()["x-sdk-core-version"] = Version
regionId := client.regionId
if len(request.GetRegionId()) > 0 {
regionId = request.GetRegionId()
}
// resolve endpoint
endpoint := request.GetDomain()
if endpoint == "" && client.Domain != "" {
endpoint = client.Domain
}
if endpoint == "" {
endpoint = endpoints.GetEndpointFromMap(regionId, request.GetProduct())
}
if endpoint == "" && client.EndpointType != "" &&
(request.GetProduct() != "Sts" || len(request.GetQueryParams()) == 0) {
if client.EndpointMap != nil && client.Network == "" || client.Network == "public" {
endpoint = client.EndpointMap[regionId]
}
if endpoint == "" {
endpoint, err = client.GetEndpointRules(regionId, request.GetProduct())
if err != nil {
return
}
}
}
if endpoint == "" {
resolveParam := &endpoints.ResolveParam{
Domain: request.GetDomain(),
Product: request.GetProduct(),
RegionId: regionId,
LocationProduct: request.GetLocationServiceCode(),
LocationEndpointType: request.GetLocationEndpointType(),
CommonApi: client.ProcessCommonRequest,
}
endpoint, err = endpoints.Resolve(resolveParam)
if err != nil {
return
}
}
request.SetDomain(endpoint)
if request.GetScheme() == "" {
request.SetScheme(client.config.Scheme)
}
// init request params
err = requests.InitParams(request)
if err != nil {
return
}
// signature
var finalSigner auth.Signer
if signer != nil {
finalSigner = signer
} else {
finalSigner = client.signer
}
httpRequest, err = buildHttpRequest(request, finalSigner, regionId)
if err == nil {
userAgent := DefaultUserAgent + getSendUserAgent(client.config.UserAgent, client.userAgent, request.GetUserAgent())
httpRequest.Header.Set("User-Agent", userAgent)
}
return
}
func getSendUserAgent(configUserAgent string, clientUserAgent, requestUserAgent map[string]string) string {
realUserAgent := ""
for key1, value1 := range clientUserAgent {
for key2 := range requestUserAgent {
if key1 == key2 {
key1 = ""
}
}
if key1 != "" {
realUserAgent += fmt.Sprintf(" %s/%s", key1, value1)
}
}
for key, value := range requestUserAgent {
realUserAgent += fmt.Sprintf(" %s/%s", key, value)
}
if configUserAgent != "" {
return realUserAgent + fmt.Sprintf(" Extra/%s", configUserAgent)
}
return realUserAgent
}
func (client *Client) AppendUserAgent(key, value string) {
newkey := true
if client.userAgent == nil {
client.userAgent = make(map[string]string)
}
if strings.ToLower(key) != "core" && strings.ToLower(key) != "go" {
for tag := range client.userAgent {
if tag == key {
client.userAgent[tag] = value
newkey = false
}
}
if newkey {
client.userAgent[key] = value
}
}
}
func (client *Client) BuildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (err error) {
_, err = client.buildRequestWithSigner(request, signer)
return
}
func (client *Client) getTimeout(request requests.AcsRequest) (time.Duration, time.Duration) {
readTimeout := defaultReadTimeout
connectTimeout := defaultConnectTimeout
reqReadTimeout := request.GetReadTimeout()
reqConnectTimeout := request.GetConnectTimeout()
if reqReadTimeout != 0*time.Millisecond {
readTimeout = reqReadTimeout
} else if client.readTimeout != 0*time.Millisecond {
readTimeout = client.readTimeout
} else if client.httpClient.Timeout != 0 {
readTimeout = client.httpClient.Timeout
} else if timeout, ok := getAPIMaxTimeout(request.GetProduct(), request.GetActionName()); ok {
readTimeout = timeout
}
if reqConnectTimeout != 0*time.Millisecond {
connectTimeout = reqConnectTimeout
} else if client.connectTimeout != 0*time.Millisecond {
connectTimeout = client.connectTimeout
}
return readTimeout, connectTimeout
}
func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) {
return func(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{
Timeout: connectTimeout,
DualStack: true,
}).DialContext(ctx, network, address)
}
}
func (client *Client) setTimeout(request requests.AcsRequest) {
readTimeout, connectTimeout := client.getTimeout(request)
client.httpClient.Timeout = readTimeout
if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil {
trans.DialContext = Timeout(connectTimeout)
client.httpClient.Transport = trans
} else if client.httpClient.Transport == nil {
client.httpClient.Transport = &http.Transport{
DialContext: Timeout(connectTimeout),
}
}
}
func (client *Client) getHTTPSInsecure(request requests.AcsRequest) (insecure bool) {
if request.GetHTTPSInsecure() != nil {
insecure = *request.GetHTTPSInsecure()
} else {
insecure = client.GetHTTPSInsecure()
}
return insecure
}
func (client *Client) DoActionWithSigner(request requests.AcsRequest, response responses.AcsResponse, signer auth.Signer) (err error) {
if client.Network != "" {
match, _ := regexp.MatchString("^[a-zA-Z0-9_-]+$", client.Network)
if !match {
return fmt.Errorf("netWork contains invalid characters")
}
}
fieldMap := make(map[string]string)
initLogMsg(fieldMap)
defer func() {
client.printLog(fieldMap, err)
}()
httpRequest, err := client.buildRequestWithSigner(request, signer)
if err != nil {
return
}
client.setTimeout(request)
proxy, err := client.getHttpProxy(httpRequest.URL.Scheme)
if err != nil {
return err
}
noProxy := client.getNoProxy(httpRequest.URL.Scheme)
var flag bool
for _, value := range noProxy {
if strings.HasPrefix(value, "*") {
value = fmt.Sprintf(".%s", value)
}
noProxyReg, err := regexp.Compile(value)
if err != nil {
return err
}
if noProxyReg.MatchString(httpRequest.Host) {
flag = true
break
}
}
// Set whether to ignore certificate validation.
// Default InsecureSkipVerify is false.
if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil {
if trans.TLSClientConfig != nil {
trans.TLSClientConfig.InsecureSkipVerify = client.getHTTPSInsecure(request)
} else {
trans.TLSClientConfig = &tls.Config{
InsecureSkipVerify: client.getHTTPSInsecure(request),
}
}
if proxy != nil && !flag {
trans.Proxy = http.ProxyURL(proxy)
}
client.httpClient.Transport = trans
}
var httpResponse *http.Response
for retryTimes := 0; retryTimes <= client.config.MaxRetryTime; retryTimes++ {
if proxy != nil && proxy.User != nil {
if password, passwordSet := proxy.User.Password(); passwordSet {
httpRequest.SetBasicAuth(proxy.User.Username(), password)
}
}
if retryTimes > 0 {
client.printLog(fieldMap, err)
initLogMsg(fieldMap)
}
putMsgToMap(fieldMap, httpRequest)
debug("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto)
debug("> Host: %s", httpRequest.Host)
for key, value := range httpRequest.Header {
debug("> %s: %v", key, strings.Join(value, ""))
}
debug(">")
debug(" Retry Times: %d.", retryTimes)
startTime := time.Now()
fieldMap["{start_time}"] = startTime.Format("2006-01-02 15:04:05")
httpResponse, err = hookDo(client.httpClient.Do)(httpRequest)
fieldMap["{cost}"] = time.Since(startTime).String()
if err == nil {
fieldMap["{code}"] = strconv.Itoa(httpResponse.StatusCode)
fieldMap["{res_headers}"] = TransToString(httpResponse.Header)
debug("< %s %s", httpResponse.Proto, httpResponse.Status)
for key, value := range httpResponse.Header {
debug("< %s: %v", key, strings.Join(value, ""))
}
}
debug("<")
// receive error
if err != nil {
debug(" Error: %s.", err.Error())
if !client.config.AutoRetry {
return
} else if retryTimes >= client.config.MaxRetryTime {
// timeout but reached the max retry times, return
times := strconv.Itoa(retryTimes + 1)
timeoutErrorMsg := fmt.Sprintf(errors.TimeoutErrorMessage, times, times)
if strings.Contains(err.Error(), "Client.Timeout") {
timeoutErrorMsg += " Read timeout. Please set a valid ReadTimeout."
} else {
timeoutErrorMsg += " Connect timeout. Please set a valid ConnectTimeout."
}
err = errors.NewClientError(errors.TimeoutErrorCode, timeoutErrorMsg, err)
return
}
}
if isCertificateError(err) {
return
}
// if status code >= 500 or timeout, will trigger retry
if client.config.AutoRetry && (err != nil || isServerError(httpResponse)) {
client.setTimeout(request)
// rewrite signatureNonce and signature
httpRequest, err = client.buildRequestWithSigner(request, signer)
// buildHttpRequest(request, finalSigner, regionId)
if err != nil {
return
}
continue
}
break
}
err = responses.Unmarshal(response, httpResponse, request.GetAcceptFormat())
fieldMap["{res_body}"] = response.GetHttpContentString()
debug("%s", response.GetHttpContentString())
// wrap server errors
if serverErr, ok := err.(*errors.ServerError); ok {
var wrapInfo = map[string]string{}
serverErr.RespHeaders = response.GetHttpHeaders()
wrapInfo["StringToSign"] = request.GetStringToSign()
err = errors.WrapServerError(serverErr, wrapInfo)
}
return
}
func isCertificateError(err error) bool {
if err != nil && strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
return true
}
return false
}
func putMsgToMap(fieldMap map[string]string, request *http.Request) {
fieldMap["{host}"] = request.Host
fieldMap["{method}"] = request.Method
fieldMap["{uri}"] = request.URL.RequestURI()
fieldMap["{pid}"] = strconv.Itoa(os.Getpid())
fieldMap["{version}"] = strings.Split(request.Proto, "/")[1]
hostname, _ := os.Hostname()
fieldMap["{hostname}"] = hostname
fieldMap["{req_headers}"] = TransToString(request.Header)
fieldMap["{target}"] = request.URL.Path + request.URL.RawQuery
}
func buildHttpRequest(request requests.AcsRequest, singer auth.Signer, regionId string) (httpRequest *http.Request, err error) {
err = auth.Sign(request, singer, regionId)
if err != nil {
return
}
requestMethod := request.GetMethod()
requestUrl := request.BuildUrl()
body := request.GetBodyReader()
httpRequest, err = http.NewRequest(requestMethod, requestUrl, body)
if err != nil {
return
}
for key, value := range request.GetHeaders() {
httpRequest.Header[key] = []string{value}
}
// host is a special case
if host, containsHost := request.GetHeaders()["Host"]; containsHost {
httpRequest.Host = host
}
return
}
func isServerError(httpResponse *http.Response) bool {
return httpResponse.StatusCode >= http.StatusInternalServerError
}
/**
only block when any one of the following occurs:
1. the asyncTaskQueue is full, increase the queue size to avoid this
2. Shutdown() in progressing, the client is being closed
**/
func (client *Client) AddAsyncTask(task func()) (err error) {
if client.asyncTaskQueue != nil {
if client.isOpenAsync {
client.asyncTaskQueue <- task
}
} else {
err = errors.NewClientError(errors.AsyncFunctionNotEnabledCode, errors.AsyncFunctionNotEnabledMessage, nil)
}
return
}
func (client *Client) GetConfig() *Config {
return client.config
}
func (client *Client) GetSigner() auth.Signer {
return client.signer
}
func (client *Client) SetSigner(signer auth.Signer) {
client.signer = signer
}
func NewClient() (client *Client, err error) {
client = &Client{}
err = client.Init()
return
}
func NewClientWithProvider(regionId string, providers ...provider.Provider) (client *Client, err error) {
client = &Client{}
var pc provider.Provider
if len(providers) == 0 {
pc = provider.DefaultChain
} else {
pc = provider.NewProviderChain(providers)
}
err = client.InitWithProviderChain(regionId, pc)
return
}
func NewClientWithOptions(regionId string, config *Config, credential auth.Credential) (client *Client, err error) {
client = &Client{}
err = client.InitWithOptions(regionId, config, credential)
return
}
func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) {
client = &Client{}
err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret)
return
}
func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) {
client = &Client{}
err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken)
return
}
func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) {
client = &Client{}
err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName)
return
}
func NewClientWithRamRoleArnAndPolicy(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (client *Client, err error) {
client = &Client{}
err = client.InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy)
return
}
func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) {
client = &Client{}
err = client.InitWithEcsRamRole(regionId, roleName)
return
}
func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) {
client = &Client{}
err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration)
return
}
func NewClientWithBearerToken(regionId, bearerToken string) (client *Client, err error) {
client = &Client{}
err = client.InitWithBearerToken(regionId, bearerToken)
return
}
func (client *Client) ProcessCommonRequest(request *requests.CommonRequest) (response *responses.CommonResponse, err error) {
request.TransToAcsRequest()
response = responses.NewCommonResponse()
err = client.DoAction(request, response)
return
}
func (client *Client) ProcessCommonRequestWithSigner(request *requests.CommonRequest, signerInterface interface{}) (response *responses.CommonResponse, err error) {
if signer, isSigner := signerInterface.(auth.Signer); isSigner {
request.TransToAcsRequest()
response = responses.NewCommonResponse()
err = client.DoActionWithSigner(request, response, signer)
return
}
panic("should not be here")
}
func (client *Client) Shutdown() {
if client.asyncTaskQueue != nil {
close(client.asyncTaskQueue)
}
client.isOpenAsync = false
}
// Deprecated: Use NewClientWithRamRoleArn in this package instead.
func NewClientWithStsRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) {
return NewClientWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName)
}
// Deprecated: Use NewClientWithEcsRamRole in this package instead.
func NewClientWithStsRoleNameOnEcs(regionId string, roleName string) (client *Client, err error) {
return NewClientWithEcsRamRole(regionId, roleName)
}
| [
"\"HTTPS_PROXY\"",
"\"https_proxy\"",
"\"HTTP_PROXY\"",
"\"http_proxy\"",
"\"NO_PROXY\"",
"\"no_proxy\""
]
| []
| [
"NO_PROXY",
"https_proxy",
"HTTP_PROXY",
"HTTPS_PROXY",
"http_proxy",
"no_proxy"
]
| [] | ["NO_PROXY", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "no_proxy"] | go | 6 | 0 | |
gcsfs/gcs.go | // Copyright © 2021 Vasily Ovchinnikov <[email protected]>.
//
// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
// licensed under Apache License 2.0.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsfs
import (
"context"
"os"
"time"
"cloud.google.com/go/storage"
"github.com/googleapis/google-cloud-go-testing/storage/stiface"
"github.com/tbhartman/afero-lite"
"google.golang.org/api/option"
)
type GcsFs struct {
source *Fs
}
// NewGcsFS creates a GCS file system, automatically instantiating and decorating the storage client.
// You can provide additional options to be passed to the client creation, as per
// cloud.google.com/go/storage documentation
func NewGcsFS(ctx context.Context, opts ...option.ClientOption) (afero.Fs, error) {
if json := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON"); json != "" {
opts = append(opts, option.WithCredentialsJSON([]byte(json)))
}
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
return NewGcsFSFromClient(ctx, client)
}
// NewGcsFSWithSeparator is the same as NewGcsFS, but the files system will use the provided folder separator.
func NewGcsFSWithSeparator(ctx context.Context, folderSeparator string, opts ...option.ClientOption) (afero.Fs, error) {
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
return NewGcsFSFromClientWithSeparator(ctx, client, folderSeparator)
}
// NewGcsFSFromClient creates a GCS file system from a given storage client
func NewGcsFSFromClient(ctx context.Context, client *storage.Client) (afero.Fs, error) {
c := stiface.AdaptClient(client)
return &GcsFs{NewGcsFs(ctx, c)}, nil
}
// NewGcsFSFromClientWithSeparator is the same as NewGcsFSFromClient, but the file system will use the provided folder separator.
func NewGcsFSFromClientWithSeparator(ctx context.Context, client *storage.Client, folderSeparator string) (afero.Fs, error) {
c := stiface.AdaptClient(client)
return &GcsFs{NewGcsFsWithSeparator(ctx, c, folderSeparator)}, nil
}
// Wraps gcs.GcsFs and convert some return types to afero interfaces.
func (fs *GcsFs) Name() string {
return fs.source.Name()
}
func (fs *GcsFs) Create(name string) (afero.File, error) {
return fs.source.Create(name)
}
func (fs *GcsFs) Mkdir(name string, perm os.FileMode) error {
return fs.source.Mkdir(name, perm)
}
func (fs *GcsFs) MkdirAll(path string, perm os.FileMode) error {
return fs.source.MkdirAll(path, perm)
}
func (fs *GcsFs) Open(name string) (afero.File, error) {
return fs.source.Open(name)
}
func (fs *GcsFs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
return fs.source.OpenFile(name, flag, perm)
}
func (fs *GcsFs) Remove(name string) error {
return fs.source.Remove(name)
}
func (fs *GcsFs) RemoveAll(path string) error {
return fs.source.RemoveAll(path)
}
func (fs *GcsFs) Rename(oldname, newname string) error {
return fs.source.Rename(oldname, newname)
}
func (fs *GcsFs) Stat(name string) (os.FileInfo, error) {
return fs.source.Stat(name)
}
func (fs *GcsFs) Chmod(name string, mode os.FileMode) error {
return fs.source.Chmod(name, mode)
}
func (fs *GcsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return fs.source.Chtimes(name, atime, mtime)
}
func (fs *GcsFs) Chown(name string, uid, gid int) error {
return fs.source.Chown(name, uid, gid)
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS_JSON\""
]
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS_JSON"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS_JSON"] | go | 1 | 0 | |
src/app.py | import os
import unittest
import config
import web
from orm.cleanup import clean_up_unused_images, resize_uploads
from orm.setup import seed_db
from migrate.data_migrate import migrate_data as migrate_data_
app = web.app
@app.cli.command()
def setup_db():
web.init_app()
seed_db()
@app.cli.command()
def clean_uploads():
web.init_app()
clean_up_unused_images()
@app.cli.command()
def resize_images():
web.init_app()
resize_uploads()
@app.cli.command()
def migrate_data():
web.init_app()
os.chdir(app.root_path)
migrate_data_(config.DATABASE_PATH)
@app.cli.command()
def test():
suite = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
# web.ocr.test()
app.run(host=os.getenv('HOST'), port=os.getenv('PORT'), debug=True)
pass
| []
| []
| [
"PORT",
"HOST"
]
| [] | ["PORT", "HOST"] | python | 2 | 0 | |
examples/v1/service-level-objectives/GetSLOCorrections.go | // Get Corrections For an SLO returns "OK" response
package main
import (
"context"
"encoding/json"
"fmt"
"os"
datadog "github.com/DataDog/datadog-api-client-go/api/v1/datadog"
)
func main() {
// there is a valid "slo" in the system
SloData0ID := os.Getenv("SLO_DATA_0_ID")
ctx := datadog.NewDefaultContext(context.Background())
configuration := datadog.NewConfiguration()
configuration.SetUnstableOperationEnabled("GetSLOCorrections", true)
apiClient := datadog.NewAPIClient(configuration)
resp, r, err := apiClient.ServiceLevelObjectivesApi.GetSLOCorrections(ctx, SloData0ID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `ServiceLevelObjectivesApi.GetSLOCorrections`: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
responseContent, _ := json.MarshalIndent(resp, "", " ")
fmt.Fprintf(os.Stdout, "Response from `ServiceLevelObjectivesApi.GetSLOCorrections`:\n%s\n", responseContent)
}
| [
"\"SLO_DATA_0_ID\""
]
| []
| [
"SLO_DATA_0_ID"
]
| [] | ["SLO_DATA_0_ID"] | go | 1 | 0 | |
src/third_party/wiredtiger/examples/java/com/wiredtiger/examples/ex_thread.java | /*-
* Public Domain 2014-2016 MongoDB, Inc.
* Public Domain 2008-2014 WiredTiger, Inc.
*
* This is free and unencumbered software released into the public domain.
*
* Anyone is free to copy, modify, publish, use, compile, sell, or
* distribute this software, either in source code form or as a compiled
* binary, for any purpose, commercial or non-commercial, and by any
* means.
*
* In jurisdictions that recognize copyright laws, the author or authors
* of this software dedicate any and all copyright interest in the
* software to the public domain. We make this dedication for the benefit
* of the public at large and to the detriment of our heirs and
* successors. We intend this dedication to be an overt act of
* relinquishment in perpetuity of all present and future rights to this
* software under copyright law.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* ex_thread.java
* This is an example demonstrating how to create and access a simple
* table from multiple threads.
*/
package com.wiredtiger.examples;
import com.wiredtiger.db.*;
import java.io.*;
import java.util.*;
/*! [thread scan] */
class ScanThread extends Thread {
private Connection conn;
public ScanThread(Connection conn) {
this.conn = conn;
}
public void run()
{
try {
int ret;
Session session = conn.open_session(null);
Cursor cursor = session.open_cursor("table:access", null, null);
/* Show all records. */
while ((ret = cursor.next()) == 0) {
String key = cursor.getKeyString();
String value = cursor.getValueString();
System.out.println("Got record: " + key + " : " + value);
}
if (ret != wiredtiger.WT_NOTFOUND)
System.err.println("Cursor.next: " +
wiredtiger.wiredtiger_strerror(ret));
cursor.close();
session.close(null);
} catch (WiredTigerException wte) {
System.err.println("Exception " + wte);
}
}
}
/*! [thread scan] */
public class ex_thread {
public static String home;
public static final int NUM_THREADS = 10;
/*! [thread main] */
public static void main(String[] argv)
{
try {
Thread[] threads = new Thread[NUM_THREADS];
int i, ret;
Connection conn;
/*
* Create a clean test directory for this run of the test program if the
* environment variable isn't already set (as is done by make check).
*/
if (System.getenv("WIREDTIGER_HOME") == null) {
home = "WT_HOME";
try {
Process proc = Runtime.getRuntime().exec("/bin/rm -rf " + home);
BufferedReader br = new BufferedReader(
new InputStreamReader(proc.getInputStream()));
while(br.ready())
System.out.println(br.readLine());
br.close();
proc.waitFor();
if (!(new File(home)).mkdir())
System.err.println("mkdir: failed");
} catch (Exception ex) {
System.err.println("Exception: " + home + ": " + ex);
System.exit(1);
}
} else
home = null;
if ((conn = wiredtiger.open(home, "create")) == null) {
System.err.println("Error connecting to " + home);
System.exit(1);
}
/* Note: further error checking omitted for clarity. */
Session session = conn.open_session(null);
ret = session.create("table:access", "key_format=S,value_format=S");
Cursor cursor = session.open_cursor("table:access", null, "overwrite");
cursor.putKeyString("key1");
cursor.putValueString("value1");
ret = cursor.insert();
cursor.close();
ret = session.close(null);
for (i = 0; i < NUM_THREADS; i++) {
threads[i] = new ScanThread(conn);
threads[i].start();
}
for (i = 0; i < NUM_THREADS; i++)
try {
threads[i].join();
ret = -1;
}
catch (InterruptedException ie) {
}
ret = conn.close(null);
System.exit(ret);
}
catch (WiredTigerException wte) {
System.err.println("Exception: " + wte);
wte.printStackTrace();
System.exit(1);
}
}
/*! [thread main] */
}
| [
"\"WIREDTIGER_HOME\""
]
| []
| [
"WIREDTIGER_HOME"
]
| [] | ["WIREDTIGER_HOME"] | java | 1 | 0 | |
tests/test_api_render.py | """
This module tests the test API. These are high-level integration tests. Lower level unit tests
should go in test_render.py
"""
import os
import re
import sys
from unittest import mock
import pytest
import yaml
from conda_build import api, render
from conda_build.conda_interface import subdir, cc_conda_build
from tests import utils
from .utils import metadata_dir, thisdir
def test_render_need_download(testing_workdir, testing_config):
# first, test that the download/render system renders all it can,
# and accurately returns its needs
with pytest.raises((ValueError, SystemExit)):
metadata, need_download, need_reparse_in_env = api.render(
os.path.join(metadata_dir, "source_git_jinja2"),
config=testing_config,
no_download_source=True)[0]
assert need_download
assert need_reparse_in_env
# Test that allowing source download lets it to the right thing.
metadata, need_download, need_reparse_in_env = api.render(
os.path.join(metadata_dir, "source_git_jinja2"),
config=testing_config,
no_download_source=False,
finalize=False)[0]
assert not need_download
assert metadata.meta["package"]["version"] == "1.20.2"
def test_render_yaml_output(testing_workdir, testing_config):
metadata, need_download, need_reparse_in_env = api.render(
os.path.join(metadata_dir, "source_git_jinja2"),
config=testing_config)[0]
yaml_metadata = api.output_yaml(metadata)
assert "package:" in yaml_metadata
# writes file with yaml data in it
api.output_yaml(metadata, os.path.join(testing_workdir, "output.yaml"))
assert "package:" in open(os.path.join(testing_workdir, "output.yaml")).read()
def test_get_output_file_path(testing_workdir, testing_metadata):
testing_metadata = render.finalize_metadata(testing_metadata)
api.output_yaml(testing_metadata, 'recipe/meta.yaml')
build_path = api.get_output_file_paths(os.path.join(testing_workdir, 'recipe'),
config=testing_metadata.config,
no_download_source=True)[0]
assert build_path == os.path.join(testing_metadata.config.croot,
testing_metadata.config.host_subdir,
"test_get_output_file_path-1.0-1.tar.bz2")
def test_get_output_file_path_metadata_object(testing_metadata):
testing_metadata.final = True
build_path = api.get_output_file_paths(testing_metadata)[0]
assert build_path == os.path.join(testing_metadata.config.croot,
testing_metadata.config.host_subdir,
"test_get_output_file_path_metadata_object-1.0-1.tar.bz2")
def test_get_output_file_path_jinja2(testing_workdir, testing_config):
# If this test does not raise, it's an indicator that the workdir is not
# being cleaned as it should.
recipe = os.path.join(metadata_dir, "source_git_jinja2")
# First get metadata with a recipe that is known to need a download:
with pytest.raises((ValueError, SystemExit)):
build_path = api.get_output_file_paths(recipe,
config=testing_config,
no_download_source=True)[0]
metadata, need_download, need_reparse_in_env = api.render(
recipe,
config=testing_config,
no_download_source=False)[0]
build_path = api.get_output_file_paths(metadata)[0]
_hash = metadata.hash_dependencies()
python = ''.join(metadata.config.variant['python'].split('.')[:2])
assert build_path == os.path.join(testing_config.croot, testing_config.host_subdir,
"conda-build-test-source-git-jinja2-1.20.2-"
"py{}{}_0_g262d444.tar.bz2".format(python, _hash))
@mock.patch('conda_build.source')
def test_output_without_jinja_does_not_download(mock_source, testing_workdir, testing_config):
api.get_output_file_path(os.path.join(metadata_dir, "source_git"), config=testing_config)[0]
mock_source.provide.assert_not_called()
def test_pin_compatible_semver(testing_config):
recipe_dir = os.path.join(metadata_dir, '_pin_compatible')
metadata = api.render(recipe_dir, config=testing_config)[0][0]
assert 'zlib >=1.2.11,<2.0a0' in metadata.get_value('requirements/run')
@pytest.mark.slow
@pytest.mark.skipif(
utils.on_win and sys.version_info < (3, 6),
reason="Failing tests on Azure for Python 2.7"
)
def test_resolved_packages_recipe(testing_config):
recipe_dir = os.path.join(metadata_dir, '_resolved_packages_host_build')
metadata = api.render(recipe_dir, config=testing_config)[0][0]
assert all(len(pkg.split()) == 3 for pkg in metadata.get_value('requirements/run'))
run_requirements = {x.split()[0] for x in metadata.get_value('requirements/run')}
for package in [
'curl', # direct dependency
'numpy', # direct dependency
'zlib', # indirect dependency of curl
'python', # indirect dependency of numpy
]:
assert package in run_requirements
@pytest.mark.slow
def test_host_entries_finalized(testing_config):
recipe = os.path.join(metadata_dir, '_host_entries_finalized')
metadata = api.render(recipe, config=testing_config)
assert len(metadata) == 2
outputs = api.get_output_file_paths(metadata)
assert any('py27' in out for out in outputs)
assert any('py39' in out for out in outputs)
def test_hash_no_apply_to_custom_build_string(testing_metadata, testing_workdir):
testing_metadata.meta['build']['string'] = 'steve'
testing_metadata.meta['requirements']['build'] = ['zlib 1.2.8']
api.output_yaml(testing_metadata, 'meta.yaml')
metadata = api.render(testing_workdir)[0][0]
assert metadata.build_id() == 'steve'
def test_pin_depends(testing_config):
"""This is deprecated functionality - replaced by the more general variants pinning scheme"""
recipe = os.path.join(metadata_dir, '_pin_depends_strict')
m = api.render(recipe, config=testing_config)[0][0]
# the recipe python is not pinned, but having pin_depends set will force it to be.
assert any(re.search(r'python\s+[23]\.', dep) for dep in m.meta['requirements']['run'])
def test_cross_recipe_with_only_build_section(testing_config):
recipe = os.path.join(metadata_dir, '_cross_prefix_elision_compiler_used')
metadata = api.render(recipe, config=testing_config, bypass_env_check=True)[0][0]
assert metadata.config.host_subdir != subdir
assert metadata.config.build_prefix != metadata.config.host_prefix
assert not metadata.build_is_host
def test_cross_info_index_platform(testing_config):
recipe = os.path.join(metadata_dir, '_cross_build_unix_windows')
metadata = api.render(recipe, config=testing_config, bypass_env_check=True)[0][0]
info_index = metadata.info_index()
assert metadata.config.host_subdir != subdir
assert metadata.config.host_subdir == info_index['subdir']
assert metadata.config.host_platform != metadata.config.platform
assert metadata.config.host_platform == info_index['platform']
def test_setting_condarc_vars_with_env_var_expansion(testing_workdir):
os.makedirs('config')
# python won't be used - the stuff in the recipe folder will override it
python_versions = ['2.6', '3.4', '3.10']
config = {'python': python_versions,
'bzip2': ['0.9', '1.0']}
with open(os.path.join('config', 'conda_build_config.yaml'), 'w') as f:
yaml.dump(config, f, default_flow_style=False)
cc_conda_build_backup = cc_conda_build.copy()
# hacky equivalent of changing condarc
# careful, this is global and affects other tests! make sure to clear it!
cc_conda_build.update({'config_file': '${TEST_WORKDIR}/config/conda_build_config.yaml'})
os.environ['TEST_WORKDIR'] = testing_workdir
try:
m = api.render(os.path.join(thisdir, 'test-recipes', 'variants', '19_used_variables'),
bypass_env_check=True, finalize=False)[0][0]
# this one should have gotten clobbered by the values in the recipe
assert m.config.variant['python'] not in python_versions
# this confirms that we loaded the config file correctly
assert len(m.config.squished_variants['bzip2']) == 2
finally:
cc_conda_build.clear()
cc_conda_build.update(cc_conda_build_backup)
def test_self_reference_run_exports_pin_subpackage_picks_up_version_correctly():
recipe = os.path.join(metadata_dir, '_self_reference_run_exports')
m = api.render(recipe)[0][0]
run_exports = m.meta.get('build', {}).get('run_exports', [])
assert run_exports
assert len(run_exports) == 1
assert run_exports[0].split()[1] == '>=1.0.0,<2.0a0'
def test_run_exports_with_pin_compatible_in_subpackages(testing_config):
recipe = os.path.join(metadata_dir, '_run_exports_in_outputs')
ms = api.render(recipe, config=testing_config)
for m, _, _ in ms:
if m.name().startswith('gfortran_'):
run_exports = set(m.meta.get('build', {}).get('run_exports', {}).get('strong', []))
assert len(run_exports) == 1
# len after splitting should be more than one because of pin_compatible. If it's only zlib, we've lost the
# compatibility bound info. This is generally due to lack of rendering of an output, such that the
# compatibility bounds just aren't added in.
assert all(len(export.split()) > 1 for export in run_exports), run_exports
def test_ignore_build_only_deps(testing_config):
ms = api.render(os.path.join(thisdir, 'test-recipes', 'variants', 'python_in_build_only'),
bypass_env_check=True, finalize=False)
assert len(ms) == 1
def test_merge_build_host_build_key(testing_workdir, testing_metadata):
m = api.render(os.path.join(metadata_dir, '_no_merge_build_host'))[0][0]
assert not any('bzip2' in dep for dep in m.meta['requirements']['run'])
def test_merge_build_host_empty_host_section(testing_config):
m = api.render(os.path.join(metadata_dir, '_empty_host_avoids_merge'))[0][0]
assert not any('bzip2' in dep for dep in m.meta['requirements']['run'])
@pytest.mark.skipif(sys.platform != "linux2", reason="package on remote end is only on linux")
@pytest.mark.xfail(reason="It needs to be fixed for Python v2.7. #3681")
def test_run_exports_from_repo_without_channeldata(testing_config):
ms = api.render(os.path.join(metadata_dir, '_run_export_no_channeldata'), config=testing_config)
assert ms[0][0].meta['requirements']['build'] == ["exporty"]
# these two will be missing if run_exports has failed.
assert ms[0][0].meta['requirements']['host'] == ["exporty"]
assert ms[0][0].meta['requirements']['run'] == ["exporty"]
def test_pin_expression_works_with_prereleases(testing_config):
recipe = os.path.join(metadata_dir, '_pinning_prerelease')
ms = api.render(recipe, config=testing_config)
assert len(ms) == 2
m = next(m_[0] for m_ in ms if m_[0].meta['package']['name'] == 'bar')
assert 'foo >=3.10.0.rc1,<3.11.0a0' in m.meta['requirements']['run']
def test_pin_expression_works_with_python_prereleases(testing_config):
recipe = os.path.join(metadata_dir, '_pinning_prerelease_python')
ms = api.render(recipe, config=testing_config)
assert len(ms) == 2
m = next(m_[0] for m_ in ms if m_[0].meta['package']['name'] == 'bar')
assert 'python >=3.10.0rc1,<3.11.0a0' in m.meta['requirements']['run']
| []
| []
| [
"TEST_WORKDIR"
]
| [] | ["TEST_WORKDIR"] | python | 1 | 0 | |
common/archiver/gcloud/connector/clientDelegate.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination clientDelegate_mock.go
package connector
import (
"context"
"io/ioutil"
"os"
"cloud.google.com/go/storage"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
)
type (
// GcloudStorageClient is an interface that expose some methods from gcloud storage client
GcloudStorageClient interface {
Bucket(URI string) BucketHandleWrapper
}
clientDelegate struct {
nativeClient *storage.Client
}
)
type (
// BucketHandleWrapper is an interface that expose some methods from gcloud storage bucket
BucketHandleWrapper interface {
Object(name string) ObjectHandleWrapper
Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper
Attrs(ctx context.Context) (*storage.BucketAttrs, error)
}
bucketDelegate struct {
bucket *storage.BucketHandle
}
)
type (
// ObjectHandleWrapper is an interface that expose some methods from gcloud storage object
ObjectHandleWrapper interface {
NewWriter(ctx context.Context) WriterWrapper
NewReader(ctx context.Context) (ReaderWrapper, error)
Attrs(ctx context.Context) (*storage.ObjectAttrs, error)
}
objectDelegate struct {
object *storage.ObjectHandle
}
)
type (
// WriterWrapper is an interface that expose some methods from gcloud storage writer
WriterWrapper interface {
Close() error
Write(p []byte) (n int, err error)
CloseWithError(err error) error
}
writerDelegate struct {
writer *storage.Writer
}
)
type (
// ReaderWrapper is an interface that expose some methods from gcloud storage reader
ReaderWrapper interface {
Close() error
Read(p []byte) (int, error)
}
readerDelegate struct {
reader *storage.Reader
}
)
type (
// ObjectIteratorWrapper is an interface that expose some methods from gcloud storage objectIterator
ObjectIteratorWrapper interface {
Next() (*storage.ObjectAttrs, error)
}
objectIteratorDelegate struct {
iterator *storage.ObjectIterator
}
)
func newClientDelegate() (*clientDelegate, error) {
ctx := context.Background()
if credentialsPath := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); credentialsPath != "" {
return newClientDelegateWithCredentials(ctx, credentialsPath)
}
return newDefaultClientDelegate(ctx)
}
func newDefaultClientDelegate(ctx context.Context) (*clientDelegate, error) {
nativeClient, err := storage.NewClient(ctx)
return &clientDelegate{nativeClient: nativeClient}, err
}
func newClientDelegateWithCredentials(ctx context.Context, credentialsPath string) (*clientDelegate, error) {
jsonKey, err := ioutil.ReadFile(credentialsPath)
if err != nil {
return newDefaultClientDelegate(ctx)
}
conf, err := google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl)
if err != nil {
return newDefaultClientDelegate(ctx)
}
nativeClient, err := storage.NewClient(ctx, option.WithTokenSource(conf.TokenSource(ctx)))
return &clientDelegate{nativeClient: nativeClient}, err
}
// Bucket returns a BucketHandle, which provides operations on the named bucket.
// This call does not perform any network operations.
//
// The supplied name must contain only lowercase letters, numbers, dashes,
// underscores, and dots. The full specification for valid bucket names can be
// found at:
// https://cloud.google.com/storage/docs/bucket-naming
func (c *clientDelegate) Bucket(bucketName string) BucketHandleWrapper {
return &bucketDelegate{bucket: c.nativeClient.Bucket(bucketName)}
}
// Object returns an ObjectHandle, which provides operations on the named object.
// This call does not perform any network operations.
//
// name must consist entirely of valid UTF-8-encoded runes. The full specification
// for valid object names can be found at:
// https://cloud.google.com/storage/docs/bucket-naming
func (b *bucketDelegate) Object(name string) ObjectHandleWrapper {
return &objectDelegate{object: b.bucket.Object(name)}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *bucketDelegate) Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper {
return b.bucket.Objects(ctx, q)
}
// Attrs returns the metadata for the bucket.
func (b *bucketDelegate) Attrs(ctx context.Context) (*storage.BucketAttrs, error) {
return b.bucket.Attrs(ctx)
}
// Next returns the next result. Its second return value is iterator.Done if
// there are no more results. Once Next returns iterator.Done, all subsequent
// calls will return iterator.Done.
//
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
// have a non-empty Prefix field, and a zero value for all other fields. These
// represent prefixes.
func (o *objectIteratorDelegate) Next() (*storage.ObjectAttrs, error) {
return o.iterator.Next()
}
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
// A new object will be created unless an object with this name already exists.
// Otherwise any previous object with the same name will be replaced.
// The object will not be available (and any previous object will remain)
// until Close has been called.
//
// Attributes can be set on the object by modifying the returned Writer's
// ObjectAttrs field before the first call to Write. If no ContentType
// attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType.
//
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *objectDelegate) NewWriter(ctx context.Context) WriterWrapper {
return &writerDelegate{writer: o.object.NewWriter(ctx)}
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *objectDelegate) NewReader(ctx context.Context) (ReaderWrapper, error) {
r, err := o.object.NewReader(ctx)
return &readerDelegate{reader: r}, err
}
func (o *objectDelegate) Attrs(ctx context.Context) (attrs *storage.ObjectAttrs, err error) {
return o.object.Attrs(ctx)
}
// Close completes the write operation and flushes any buffered data.
// If Close doesn't return an error, metadata about the written object
// can be retrieved by calling Attrs.
func (w *writerDelegate) Close() error {
return w.writer.Close()
}
// Write appends to w. It implements the io.Writer interface.
//
// Since writes happen asynchronously, Write may return a nil
// error even though the write failed (or will fail). Always
// use the error returned from Writer.Close to determine if
// the upload was successful.
func (w *writerDelegate) Write(p []byte) (int, error) {
return w.writer.Write(p)
}
// CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil.
//
// Deprecated: cancel the context passed to NewWriter instead.
func (w *writerDelegate) CloseWithError(err error) error {
return w.writer.CloseWithError(err)
}
// Close closes the Reader. It must be called when done reading.
func (r *readerDelegate) Close() error {
return r.reader.Close()
}
func (r *readerDelegate) Read(p []byte) (int, error) {
return r.reader.Read(p)
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS\""
]
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | go | 1 | 0 | |
recipes/recipe_modules/bot_update/resources/bot_update.py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(hinoka): Use logging.
import cStringIO
import codecs
from contextlib import contextmanager
import copy
import ctypes
import json
import optparse
import os
import pprint
import random
import re
import subprocess
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import uuid
import os.path as path
# How many bytes at a time to read from pipes.
BUF_SIZE = 256
# Define a bunch of directory paths.
# Relative to this script's filesystem path.
THIS_DIR = path.dirname(path.abspath(__file__))
DEPOT_TOOLS_DIR = path.abspath(path.join(THIS_DIR, '..', '..', '..', '..'))
CHROMIUM_GIT_HOST = 'https://chromium.googlesource.com'
CHROMIUM_SRC_URL = CHROMIUM_GIT_HOST + '/chromium/src.git'
BRANCH_HEADS_REFSPEC = '+refs/branch-heads/*'
TAGS_REFSPEC = '+refs/tags/*'
# Regular expression to match sha1 git revision.
COMMIT_HASH_RE = re.compile(r'[0-9a-f]{5,40}', re.IGNORECASE)
# Regular expression that matches a single commit footer line.
COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s*(.*)')
# Footer metadata keys for regular and gsubtreed mirrored commit positions.
COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
COMMIT_ORIGINAL_POSITION_FOOTER_KEY = 'Cr-Original-Commit-Position'
# Regular expression to parse gclient's revinfo entries.
REVINFO_RE = re.compile(r'^([^:]+):\s+([^@]+)@(.+)$')
# Copied from scripts/recipes/chromium.py.
GOT_REVISION_MAPPINGS = {
CHROMIUM_SRC_URL: {
'got_revision': 'src/',
'got_nacl_revision': 'src/native_client/',
'got_swarm_client_revision': 'src/tools/swarm_client/',
'got_swarming_client_revision': 'src/tools/swarming_client/',
'got_v8_revision': 'src/v8/',
'got_webkit_revision': 'src/third_party/WebKit/',
'got_webrtc_revision': 'src/third_party/webrtc/',
}
}
GCLIENT_TEMPLATE = """solutions = %(solutions)s
cache_dir = r%(cache_dir)s
%(target_os)s
%(target_os_only)s
%(target_cpu)s
"""
# How many times to try before giving up.
ATTEMPTS = 5
GIT_CACHE_PATH = path.join(DEPOT_TOOLS_DIR, 'git_cache.py')
GCLIENT_PATH = path.join(DEPOT_TOOLS_DIR, 'gclient.py')
class SubprocessFailed(Exception):
def __init__(self, message, code, output):
Exception.__init__(self, message)
self.code = code
self.output = output
class PatchFailed(SubprocessFailed):
pass
class GclientSyncFailed(SubprocessFailed):
pass
class InvalidDiff(Exception):
pass
RETRY = object()
OK = object()
FAIL = object()
class PsPrinter(object):
def __init__(self, interval=300):
self.interval = interval
self.active = sys.platform.startswith('linux2')
self.thread = None
@staticmethod
def print_pstree():
"""Debugging function used to print "ps auxwwf" for stuck processes."""
subprocess.call(['ps', 'auxwwf'])
def poke(self):
if self.active:
self.cancel()
self.thread = threading.Timer(self.interval, self.print_pstree)
self.thread.start()
def cancel(self):
if self.active and self.thread is not None:
self.thread.cancel()
self.thread = None
def call(*args, **kwargs): # pragma: no cover
"""Interactive subprocess call."""
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
kwargs.setdefault('bufsize', BUF_SIZE)
cwd = kwargs.get('cwd', os.getcwd())
stdin_data = kwargs.pop('stdin_data', None)
if stdin_data:
kwargs['stdin'] = subprocess.PIPE
out = cStringIO.StringIO()
new_env = kwargs.get('env', {})
env = os.environ.copy()
env.update(new_env)
kwargs['env'] = env
if new_env:
print '===Injecting Environment Variables==='
for k, v in sorted(new_env.items()):
print '%s: %s' % (k, v)
print '===Running %s ===' % (' '.join(args),)
print 'In directory: %s' % cwd
start_time = time.time()
proc = subprocess.Popen(args, **kwargs)
if stdin_data:
proc.stdin.write(stdin_data)
proc.stdin.close()
psprinter = PsPrinter()
# This is here because passing 'sys.stdout' into stdout for proc will
# produce out of order output.
hanging_cr = False
while True:
psprinter.poke()
buf = proc.stdout.read(BUF_SIZE)
if not buf:
break
if hanging_cr:
buf = '\r' + buf
hanging_cr = buf.endswith('\r')
if hanging_cr:
buf = buf[:-1]
buf = buf.replace('\r\n', '\n').replace('\r', '\n')
sys.stdout.write(buf)
out.write(buf)
if hanging_cr:
sys.stdout.write('\n')
out.write('\n')
psprinter.cancel()
code = proc.wait()
elapsed_time = ((time.time() - start_time) / 60.0)
outval = out.getvalue()
if code:
print '===Failed in %.1f mins of %s ===' % (elapsed_time, ' '.join(args))
print
raise SubprocessFailed('%s failed with code %d in %s.' %
(' '.join(args), code, cwd),
code, outval)
print '===Succeeded in %.1f mins of %s ===' % (elapsed_time, ' '.join(args))
print
return outval
def git(*args, **kwargs): # pragma: no cover
"""Wrapper around call specifically for Git commands."""
if args and args[0] == 'cache':
# Rewrite "git cache" calls into "python git_cache.py".
cmd = (sys.executable, '-u', GIT_CACHE_PATH) + args[1:]
else:
git_executable = 'git'
# On windows, subprocess doesn't fuzzy-match 'git' to 'git.bat', so we
# have to do it explicitly. This is better than passing shell=True.
if sys.platform.startswith('win'):
git_executable += '.bat'
cmd = (git_executable,) + args
return call(*cmd, **kwargs)
def get_gclient_spec(solutions, target_os, target_os_only, target_cpu,
git_cache_dir):
return GCLIENT_TEMPLATE % {
'solutions': pprint.pformat(solutions, indent=4),
'cache_dir': '"%s"' % git_cache_dir,
'target_os': ('\ntarget_os=%s' % target_os) if target_os else '',
'target_os_only': '\ntarget_os_only=%s' % target_os_only,
'target_cpu': ('\ntarget_cpu=%s' % target_cpu) if target_cpu else ''
}
def solutions_printer(solutions):
"""Prints gclient solution to stdout."""
print 'Gclient Solutions'
print '================='
for solution in solutions:
name = solution.get('name')
url = solution.get('url')
print '%s (%s)' % (name, url)
if solution.get('deps_file'):
print ' Dependencies file is %s' % solution['deps_file']
if 'managed' in solution:
print ' Managed mode is %s' % ('ON' if solution['managed'] else 'OFF')
custom_vars = solution.get('custom_vars')
if custom_vars:
print ' Custom Variables:'
for var_name, var_value in sorted(custom_vars.iteritems()):
print ' %s = %s' % (var_name, var_value)
custom_deps = solution.get('custom_deps')
if 'custom_deps' in solution:
print ' Custom Dependencies:'
for deps_name, deps_value in sorted(custom_deps.iteritems()):
if deps_value:
print ' %s -> %s' % (deps_name, deps_value)
else:
print ' %s: Ignore' % deps_name
for k, v in solution.iteritems():
# Print out all the keys we don't know about.
if k in ['name', 'url', 'deps_file', 'custom_vars', 'custom_deps',
'managed']:
continue
print ' %s is %s' % (k, v)
print
def modify_solutions(input_solutions):
"""Modifies urls in solutions to point at Git repos.
returns: new solution dictionary
"""
assert input_solutions
solutions = copy.deepcopy(input_solutions)
for solution in solutions:
original_url = solution['url']
parsed_url = urlparse.urlparse(original_url)
parsed_path = parsed_url.path
solution['managed'] = False
# We don't want gclient to be using a safesync URL. Instead it should
# using the lkgr/lkcr branch/tags.
if 'safesync_url' in solution:
print 'Removing safesync url %s from %s' % (solution['safesync_url'],
parsed_path)
del solution['safesync_url']
return solutions
def remove(target, cleanup_dir):
"""Remove a target by moving it into cleanup_dir."""
if not path.exists(cleanup_dir):
os.makedirs(cleanup_dir)
dest = path.join(cleanup_dir, '%s_%s' % (
path.basename(target), uuid.uuid4().hex))
print 'Marking for removal %s => %s' % (target, dest)
try:
os.rename(target, dest)
except Exception as e:
print 'Error renaming %s to %s: %s' % (target, dest, str(e))
raise
def ensure_no_checkout(dir_names, cleanup_dir):
"""Ensure that there is no undesired checkout under build/."""
build_dir = os.getcwd()
has_checkout = any(path.exists(path.join(build_dir, dir_name, '.git'))
for dir_name in dir_names)
if has_checkout:
for filename in os.listdir(build_dir):
deletion_target = path.join(build_dir, filename)
print '.git detected in checkout, deleting %s...' % deletion_target,
remove(deletion_target, cleanup_dir)
print 'done'
def call_gclient(*args, **kwargs):
"""Run the "gclient.py" tool with the supplied arguments.
Args:
args: command-line arguments to pass to gclient.
kwargs: keyword arguments to pass to call.
"""
cmd = [sys.executable, '-u', GCLIENT_PATH]
cmd.extend(args)
# Disable metrics collection on bots, since it's not supported anyway.
kwargs.setdefault('env', {})['DEPOT_TOOLS_METRICS'] = '0'
return call(*cmd, **kwargs)
def gclient_configure(solutions, target_os, target_os_only, target_cpu,
git_cache_dir):
"""Should do the same thing as gclient --spec='...'."""
with codecs.open('.gclient', mode='w', encoding='utf-8') as f:
f.write(get_gclient_spec(
solutions, target_os, target_os_only, target_cpu, git_cache_dir))
@contextmanager
def git_config_if_not_set(key, value):
"""Set git config for key equal to value if key was not set.
If key was not set, unset it once we're done."""
should_unset = True
try:
git('config', '--global', key)
should_unset = False
except SubprocessFailed as e:
git('config', '--global', key, value)
try:
yield
finally:
if should_unset:
git('config', '--global', '--unset', key)
def gclient_sync(
with_branch_heads, with_tags, revisions, break_repo_locks,
disable_syntax_validation, patch_refs, gerrit_reset,
gerrit_rebase_patch_ref):
# We just need to allocate a filename.
fd, gclient_output_file = tempfile.mkstemp(suffix='.json')
os.close(fd)
args = ['sync', '--verbose', '--reset', '--force',
'--ignore_locks', '--output-json', gclient_output_file,
'--nohooks', '--noprehooks', '--delete_unversioned_trees']
if with_branch_heads:
args += ['--with_branch_heads']
if with_tags:
args += ['--with_tags']
if break_repo_locks:
args += ['--break_repo_locks']
if disable_syntax_validation:
args += ['--disable-syntax-validation']
for name, revision in sorted(revisions.iteritems()):
if revision.upper() == 'HEAD':
revision = 'origin/master'
args.extend(['--revision', '%s@%s' % (name, revision)])
if patch_refs:
for patch_ref in patch_refs:
args.extend(['--patch-ref', patch_ref])
if not gerrit_reset:
args.append('--no-reset-patch-ref')
if not gerrit_rebase_patch_ref:
args.append('--no-rebase-patch-ref')
try:
call_gclient(*args)
except SubprocessFailed as e:
# If gclient sync is handling patching, parse the output for a patch error
# message.
if 'Failed to apply patch.' in e.output:
raise PatchFailed(e.message, e.code, e.output)
# Throw a GclientSyncFailed exception so we can catch this independently.
raise GclientSyncFailed(e.message, e.code, e.output)
else:
with open(gclient_output_file) as f:
return json.load(f)
finally:
os.remove(gclient_output_file)
def gclient_revinfo():
return call_gclient('revinfo', '-a') or ''
def normalize_git_url(url):
"""Normalize a git url to be consistent.
This recognizes urls to the googlesoruce.com domain. It ensures that
the url:
* Do not end in .git
* Do not contain /a/ in their path.
"""
try:
p = urlparse.urlparse(url)
except Exception:
# Not a url, just return it back.
return url
if not p.netloc.endswith('.googlesource.com'):
# Not a googlesource.com URL, can't normalize this, just return as is.
return url
upath = p.path
if upath.startswith('/a'):
upath = upath[len('/a'):]
if upath.endswith('.git'):
upath = upath[:-len('.git')]
return 'https://%s%s' % (p.netloc, upath)
# TODO(hinoka): Remove this once all downstream recipes stop using this format.
def create_manifest_old():
manifest = {}
output = gclient_revinfo()
for line in output.strip().splitlines():
match = REVINFO_RE.match(line.strip())
if match:
manifest[match.group(1)] = {
'repository': match.group(2),
'revision': match.group(3),
}
else:
print "WARNING: Couldn't match revinfo line:\n%s" % line
return manifest
# TODO(hinoka): Include patch revision.
def create_manifest(gclient_output, patch_root):
"""Return the JSONPB equivilent of the source manifest proto.
The source manifest proto is defined here:
https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_engine/source_manifest.proto
This is based off of:
* The gclient_output (from calling gclient.py --output-json) which contains
the directory -> repo:revision mapping.
* Gerrit Patch info which contains info about patched revisions.
We normalize the URLs using the normalize_git_url function.
"""
manifest = {
'version': 0, # Currently the only valid version is 0.
}
dirs = {}
if patch_root:
patch_root = patch_root.strip('/') # Normalize directory names.
for directory, info in gclient_output.get('solutions', {}).iteritems():
directory = directory.strip('/') # Normalize the directory name.
# The format of the url is "https://repo.url/blah.git@abcdefabcdef" or
# just "https://repo.url/blah.git"
url = info.get('url') or ''
repo, _, url_revision = url.partition('@')
repo = normalize_git_url(repo)
# There are two places to get the revision from, we do it in this order:
# 1. In the "revision" field
# 2. At the end of the URL, after @
revision = info.get('revision') or url_revision
if repo and revision:
dirs[directory] = {
'git_checkout': {
'repo_url': repo,
'revision': revision,
}
}
manifest['directories'] = dirs
return manifest
def get_commit_message_footer_map(message):
"""Returns: (dict) A dictionary of commit message footer entries.
"""
footers = {}
# Extract the lines in the footer block.
lines = []
for line in message.strip().splitlines():
line = line.strip()
if len(line) == 0:
del lines[:]
continue
lines.append(line)
# Parse the footer
for line in lines:
m = COMMIT_FOOTER_ENTRY_RE.match(line)
if not m:
# If any single line isn't valid, continue anyway for compatibility with
# Gerrit (which itself uses JGit for this).
continue
footers[m.group(1)] = m.group(2).strip()
return footers
def get_commit_message_footer(message, key):
"""Returns: (str/None) The footer value for 'key', or None if none was found.
"""
return get_commit_message_footer_map(message).get(key)
# Derived from:
# http://code.activestate.com/recipes/577972-disk-usage/?in=user-4178764
def get_total_disk_space():
cwd = os.getcwd()
# Windows is the only platform that doesn't support os.statvfs, so
# we need to special case this.
if sys.platform.startswith('win'):
_, total, free = (ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong())
if sys.version_info >= (3,) or isinstance(cwd, unicode):
fn = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fn = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fn(cwd, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
# WinError() will fetch the last error code.
raise ctypes.WinError()
return (total.value, free.value)
else:
st = os.statvfs(cwd)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
return (total, free)
def _get_target_branch_and_revision(solution_name, git_url, revisions):
normalized_name = solution_name.strip('/')
if normalized_name in revisions:
configured = revisions[normalized_name]
elif git_url in revisions:
configured = revisions[git_url]
else:
return 'master', 'HEAD'
parts = configured.split(':', 1)
if len(parts) == 2:
# Support for "branch:revision" syntax.
return parts
if COMMIT_HASH_RE.match(configured):
return 'master', configured
return configured, 'HEAD'
def get_target_pin(solution_name, git_url, revisions):
"""Returns revision to be checked out if it is pinned, else None."""
_, revision = _get_target_branch_and_revision(
solution_name, git_url, revisions)
if COMMIT_HASH_RE.match(revision):
return revision
return None
def force_solution_revision(solution_name, git_url, revisions, cwd):
branch, revision = _get_target_branch_and_revision(
solution_name, git_url, revisions)
if revision and revision.upper() != 'HEAD':
treeish = revision
else:
# TODO(machenbach): This won't work with branch-heads, as Gerrit's
# destination branch would be e.g. refs/branch-heads/123. But here
# we need to pass refs/remotes/branch-heads/123 to check out.
# This will also not work if somebody passes a local refspec like
# refs/heads/master. It needs to translate to refs/remotes/origin/master
# first. See also https://crbug.com/740456 .
if branch.startswith(('refs/', 'origin/')):
treeish = branch
else:
treeish = 'origin/' + branch
# Note that -- argument is necessary to ensure that git treats `treeish`
# argument as revision or ref, and not as a file/directory which happens to
# have the exact same name.
git('checkout', '--force', treeish, '--', cwd=cwd)
def _has_in_git_cache(revision_sha1, refs, git_cache_dir, url):
"""Returns whether given revision_sha1 is contained in cache of a given repo.
"""
try:
mirror_dir = git(
'cache', 'exists', '--quiet', '--cache-dir', git_cache_dir, url).strip()
git('cat-file', '-e', revision_sha1, cwd=mirror_dir)
for ref in refs:
git('cat-file', '-e', ref, cwd=mirror_dir)
return True
except SubprocessFailed:
return False
def is_broken_repo_dir(repo_dir):
# Treat absence of 'config' as a signal of a partially deleted repo.
return not path.exists(os.path.join(repo_dir, '.git', 'config'))
def _maybe_break_locks(checkout_path, tries=3):
"""This removes all .lock files from this repo's .git directory.
In particular, this will cleanup index.lock files, as well as ref lock
files.
"""
def attempt():
git_dir = os.path.join(checkout_path, '.git')
for dirpath, _, filenames in os.walk(git_dir):
for filename in filenames:
if filename.endswith('.lock'):
to_break = os.path.join(dirpath, filename)
print 'breaking lock: %s' % to_break
try:
os.remove(to_break)
except OSError as ex:
print 'FAILED to break lock: %s: %s' % (to_break, ex)
raise
for _ in xrange(tries):
try:
attempt()
return
except Exception:
pass
def git_checkouts(solutions, revisions, refs, git_cache_dir, cleanup_dir):
build_dir = os.getcwd()
first_solution = True
for sln in solutions:
sln_dir = path.join(build_dir, sln['name'])
_git_checkout(sln, sln_dir, revisions, refs, git_cache_dir, cleanup_dir)
if first_solution:
git_ref = git('log', '--format=%H', '--max-count=1',
cwd=path.join(build_dir, sln['name'])
).strip()
first_solution = False
return git_ref
def _git_checkout(sln, sln_dir, revisions, refs, git_cache_dir, cleanup_dir):
name = sln['name']
url = sln['url']
populate_cmd = (['cache', 'populate', '--ignore_locks', '-v',
'--cache-dir', git_cache_dir, url, '--reset-fetch-config'])
for ref in refs:
populate_cmd.extend(['--ref', ref])
env = {}
if url == CHROMIUM_SRC_URL or url + '.git' == CHROMIUM_SRC_URL:
# This is for performance investigation of `git fetch` in chromium/src.
env = {
'GIT_TRACE': 'true',
'GIT_TRACE_PERFORMANCE': 'true',
}
# Step 1: populate/refresh cache, if necessary.
pin = get_target_pin(name, url, revisions)
if not pin:
# Refresh only once.
git(*populate_cmd, env=env)
elif _has_in_git_cache(pin, refs, git_cache_dir, url):
# No need to fetch at all, because we already have needed revision.
pass
else:
# We may need to retry a bit due to eventual consinstency in replication of
# git servers.
soft_deadline = time.time() + 60
attempt = 0
while True:
attempt += 1
# TODO(tandrii): propagate the pin to git server per recommendation of
# maintainers of *.googlesource.com (workaround git server replication
# lag).
git(*populate_cmd, env=env)
if _has_in_git_cache(pin, refs, git_cache_dir, url):
break
overrun = time.time() - soft_deadline
# Only kick in deadline after second attempt to ensure we retry at least
# once after initial fetch from not-yet-replicated server.
if attempt >= 2 and overrun > 0:
print 'Ran %s seconds past deadline. Aborting.' % (overrun,)
# TODO(tandrii): raise exception immediately here, instead of doing
# useless step 2 trying to fetch something that we know doesn't exist
# in cache **after production data gives us confidence to do so**.
break
sleep_secs = min(60, 2**attempt)
print 'waiting %s seconds and trying to fetch again...' % sleep_secs
time.sleep(sleep_secs)
# Step 2: populate a checkout from local cache. All operations are local.
mirror_dir = git(
'cache', 'exists', '--quiet', '--cache-dir', git_cache_dir, url).strip()
first_try = True
while True:
try:
# If repo deletion was aborted midway, it may have left .git in broken
# state.
if path.exists(sln_dir) and is_broken_repo_dir(sln_dir):
print 'Git repo %s appears to be broken, removing it' % sln_dir
remove(sln_dir, cleanup_dir)
# Use "tries=1", since we retry manually in this loop.
if not path.isdir(sln_dir):
git('clone', '--no-checkout', '--local', '--shared', mirror_dir,
sln_dir)
_git_disable_gc(sln_dir)
else:
_git_disable_gc(sln_dir)
git('remote', 'set-url', 'origin', mirror_dir, cwd=sln_dir)
git('fetch', 'origin', cwd=sln_dir)
git('remote', 'set-url', '--push', 'origin', url, cwd=sln_dir)
for ref in refs:
refspec = '%s:%s' % (ref, ref.lstrip('+'))
git('fetch', 'origin', refspec, cwd=sln_dir)
# Windows sometimes has trouble deleting files.
# This can make git commands that rely on locks fail.
# Try a few times in case Windows has trouble again (and again).
if sys.platform.startswith('win'):
_maybe_break_locks(sln_dir, tries=3)
force_solution_revision(name, url, revisions, sln_dir)
git('clean', '-dff', cwd=sln_dir)
return
except SubprocessFailed as e:
# Exited abnormally, theres probably something wrong.
print 'Something failed: %s.' % str(e)
if first_try:
first_try = False
# Lets wipe the checkout and try again.
remove(sln_dir, cleanup_dir)
else:
raise
def _git_disable_gc(cwd):
git('config', 'gc.auto', '0', cwd=cwd)
git('config', 'gc.autodetach', '0', cwd=cwd)
git('config', 'gc.autopacklimit', '0', cwd=cwd)
def _download(url):
"""Fetch url and return content, with retries for flake."""
for attempt in xrange(ATTEMPTS):
try:
return urllib2.urlopen(url).read()
except Exception:
if attempt == ATTEMPTS - 1:
raise
def get_commit_position(git_path, revision='HEAD'):
"""Dumps the 'git' log for a specific revision and parses out the commit
position.
If a commit position metadata key is found, its value will be returned.
"""
# TODO(iannucci): Use git-footers for this.
git_log = git('log', '--format=%B', '-n1', revision, cwd=git_path)
footer_map = get_commit_message_footer_map(git_log)
# Search for commit position metadata
value = (footer_map.get(COMMIT_POSITION_FOOTER_KEY) or
footer_map.get(COMMIT_ORIGINAL_POSITION_FOOTER_KEY))
if value:
return value
return None
def parse_got_revision(gclient_output, got_revision_mapping):
"""Translate git gclient revision mapping to build properties."""
properties = {}
solutions_output = {
# Make sure path always ends with a single slash.
'%s/' % path.rstrip('/') : solution_output for path, solution_output
in gclient_output['solutions'].iteritems()
}
for property_name, dir_name in got_revision_mapping.iteritems():
# Make sure dir_name always ends with a single slash.
dir_name = '%s/' % dir_name.rstrip('/')
if dir_name not in solutions_output:
continue
solution_output = solutions_output[dir_name]
if solution_output.get('scm') is None:
# This is an ignored DEPS, so the output got_revision should be 'None'.
revision = commit_position = None
else:
# Since we are using .DEPS.git, everything had better be git.
assert solution_output.get('scm') == 'git'
revision = git('rev-parse', 'HEAD', cwd=dir_name).strip()
commit_position = get_commit_position(dir_name)
properties[property_name] = revision
if commit_position:
properties['%s_cp' % property_name] = commit_position
return properties
def emit_json(out_file, did_run, gclient_output=None, **kwargs):
"""Write run information into a JSON file."""
output = {}
output.update(gclient_output if gclient_output else {})
output.update({'did_run': did_run})
output.update(kwargs)
with open(out_file, 'wb') as f:
f.write(json.dumps(output, sort_keys=True))
def ensure_checkout(solutions, revisions, first_sln, target_os, target_os_only,
target_cpu, patch_root, patch_refs,
gerrit_rebase_patch_ref, refs, git_cache_dir,
cleanup_dir, gerrit_reset, disable_syntax_validation):
# Get a checkout of each solution, without DEPS or hooks.
# Calling git directly because there is no way to run Gclient without
# invoking DEPS.
print 'Fetching Git checkout'
git_checkouts(solutions, revisions, refs, git_cache_dir, cleanup_dir)
# Ensure our build/ directory is set up with the correct .gclient file.
gclient_configure(solutions, target_os, target_os_only, target_cpu,
git_cache_dir)
# Windows sometimes has trouble deleting files. This can make git commands
# that rely on locks fail.
break_repo_locks = True if sys.platform.startswith('win') else False
# We want to pass all non-solution revisions into the gclient sync call.
solution_dirs = {sln['name'] for sln in solutions}
gc_revisions = {
dirname: rev for dirname, rev in revisions.iteritems()
if dirname not in solution_dirs}
# Gclient sometimes ignores "unmanaged": "False" in the gclient solution
# if --revision <anything> is passed (for example, for subrepos).
# This forces gclient to always treat solutions deps as unmanaged.
for solution_name in list(solution_dirs):
gc_revisions[solution_name] = 'unmanaged'
with git_config_if_not_set('user.name', 'chrome-bot'), \
git_config_if_not_set('user.email', '[email protected]'):
# Let gclient do the DEPS syncing.
# The branch-head refspec is a special case because it's possible Chrome
# src, which contains the branch-head refspecs, is DEPSed in.
gclient_output = gclient_sync(
BRANCH_HEADS_REFSPEC in refs,
TAGS_REFSPEC in refs,
gc_revisions,
break_repo_locks,
disable_syntax_validation,
patch_refs,
gerrit_reset,
gerrit_rebase_patch_ref)
# Now that gclient_sync has finished, we should revert any .DEPS.git so that
# presubmit doesn't complain about it being modified.
if git('ls-files', '.DEPS.git', cwd=first_sln).strip():
git('checkout', 'HEAD', '--', '.DEPS.git', cwd=first_sln)
# Reset the deps_file point in the solutions so that hooks get run properly.
for sln in solutions:
sln['deps_file'] = sln.get('deps_file', 'DEPS').replace('.DEPS.git', 'DEPS')
gclient_configure(solutions, target_os, target_os_only, target_cpu,
git_cache_dir)
return gclient_output
def parse_revisions(revisions, root):
"""Turn a list of revision specs into a nice dictionary.
We will always return a dict with {root: something}. By default if root
is unspecified, or if revisions is [], then revision will be assigned 'HEAD'
"""
results = {root.strip('/'): 'HEAD'}
expanded_revisions = []
for revision in revisions:
# Allow rev1,rev2,rev3 format.
# TODO(hinoka): Delete this when webkit switches to recipes.
expanded_revisions.extend(revision.split(','))
for revision in expanded_revisions:
split_revision = revision.split('@', 1)
if len(split_revision) == 1:
# This is just a plain revision, set it as the revision for root.
results[root] = split_revision[0]
else:
# This is an alt_root@revision argument.
current_root, current_rev = split_revision
parsed_root = urlparse.urlparse(current_root)
if parsed_root.scheme in ['http', 'https']:
# We want to normalize git urls into .git urls.
normalized_root = 'https://' + parsed_root.netloc + parsed_root.path
if not normalized_root.endswith('.git'):
normalized_root += '.git'
elif parsed_root.scheme:
print 'WARNING: Unrecognized scheme %s, ignoring' % parsed_root.scheme
continue
else:
# This is probably a local path.
normalized_root = current_root.strip('/')
results[normalized_root] = current_rev
return results
def parse_args():
parse = optparse.OptionParser()
parse.add_option('--root', dest='patch_root',
help='DEPRECATED: Use --patch_root.')
parse.add_option('--patch_root', help='Directory to patch on top of.')
parse.add_option('--patch_ref', dest='patch_refs', action='append', default=[],
help='Git repository & ref to apply, as REPO@REF.')
parse.add_option('--gerrit_no_rebase_patch_ref', action='store_true',
help='Bypass rebase of Gerrit patch ref after checkout.')
parse.add_option('--gerrit_no_reset', action='store_true',
help='Bypass calling reset after applying a gerrit ref.')
parse.add_option('--specs', help='Gcilent spec.')
parse.add_option('--spec-path', help='Path to a Gcilent spec file.')
parse.add_option('--revision_mapping_file',
help=('Path to a json file of the form '
'{"property_name": "path/to/repo/"}'))
parse.add_option('--revision', action='append', default=[],
help='Revision to check out. Can be any form of git ref. '
'Can prepend root@<rev> to specify which repository, '
'where root is either a filesystem path or git https '
'url. To specify Tip of Tree, set rev to HEAD. ')
# TODO(machenbach): Remove the flag when all uses have been removed.
parse.add_option('--output_manifest', action='store_true',
help=('Deprecated.'))
parse.add_option('--clobber', action='store_true',
help='Delete checkout first, always')
parse.add_option('--output_json',
help='Output JSON information into a specified file')
parse.add_option('--refs', action='append',
help='Also fetch this refspec for the main solution(s). '
'Eg. +refs/branch-heads/*')
parse.add_option('--with_branch_heads', action='store_true',
help='Always pass --with_branch_heads to gclient. This '
'does the same thing as --refs +refs/branch-heads/*')
parse.add_option('--with_tags', action='store_true',
help='Always pass --with_tags to gclient. This '
'does the same thing as --refs +refs/tags/*')
parse.add_option('--git-cache-dir', help='Path to git cache directory.')
parse.add_option('--cleanup-dir',
help='Path to a cleanup directory that can be used for '
'deferred file cleanup.')
parse.add_option(
'--disable-syntax-validation', action='store_true',
help='Disable validation of .gclient and DEPS syntax.')
options, args = parse.parse_args()
if options.spec_path:
if options.specs:
parse.error('At most one of --spec-path and --specs may be specified.')
with open(options.spec_path, 'r') as fd:
options.specs = fd.read()
if not options.output_json:
parse.error('--output_json is required')
if not options.git_cache_dir:
parse.error('--git-cache-dir is required')
if not options.refs:
options.refs = []
if options.with_branch_heads:
options.refs.append(BRANCH_HEADS_REFSPEC)
del options.with_branch_heads
if options.with_tags:
options.refs.append(TAGS_REFSPEC)
del options.with_tags
try:
if not options.revision_mapping_file:
parse.error('--revision_mapping_file is required')
with open(options.revision_mapping_file, 'r') as f:
options.revision_mapping = json.load(f)
except Exception as e:
print (
'WARNING: Caught execption while parsing revision_mapping*: %s'
% (str(e),)
)
# Because we print CACHE_DIR out into a .gclient file, and then later run
# eval() on it, backslashes need to be escaped, otherwise "E:\b\build" gets
# parsed as "E:[\x08][\x08]uild".
if sys.platform.startswith('win'):
options.git_cache_dir = options.git_cache_dir.replace('\\', '\\\\')
return options, args
def prepare(options, git_slns, active):
"""Prepares the target folder before we checkout."""
dir_names = [sln.get('name') for sln in git_slns if 'name' in sln]
if options.clobber:
ensure_no_checkout(dir_names, options.cleanup_dir)
# Make sure we tell recipes that we didn't run if the script exits here.
emit_json(options.output_json, did_run=active)
total_disk_space, free_disk_space = get_total_disk_space()
total_disk_space_gb = int(total_disk_space / (1024 * 1024 * 1024))
used_disk_space_gb = int((total_disk_space - free_disk_space)
/ (1024 * 1024 * 1024))
percent_used = int(used_disk_space_gb * 100 / total_disk_space_gb)
step_text = '[%dGB/%dGB used (%d%%)]' % (used_disk_space_gb,
total_disk_space_gb,
percent_used)
# The first solution is where the primary DEPS file resides.
first_sln = dir_names[0]
# Split all the revision specifications into a nice dict.
print 'Revisions: %s' % options.revision
revisions = parse_revisions(options.revision, first_sln)
print 'Fetching Git checkout at %s@%s' % (first_sln, revisions[first_sln])
return revisions, step_text
def checkout(options, git_slns, specs, revisions, step_text):
print 'Using Python version: %s' % (sys.version,)
print 'Checking git version...'
ver = git('version').strip()
print 'Using %s' % ver
try:
protocol = git('config', '--get', 'protocol.version')
print 'Using git protocol version %s' % protocol
except SubprocessFailed as e:
print 'git protocol version is not specified.'
first_sln = git_slns[0]['name']
dir_names = [sln.get('name') for sln in git_slns if 'name' in sln]
try:
# Outer try is for catching patch failures and exiting gracefully.
# Inner try is for catching gclient failures and retrying gracefully.
try:
checkout_parameters = dict(
# First, pass in the base of what we want to check out.
solutions=git_slns,
revisions=revisions,
first_sln=first_sln,
# Also, target os variables for gclient.
target_os=specs.get('target_os', []),
target_os_only=specs.get('target_os_only', False),
# Also, target cpu variables for gclient.
target_cpu=specs.get('target_cpu', []),
# Then, pass in information about how to patch.
patch_root=options.patch_root,
patch_refs=options.patch_refs,
gerrit_rebase_patch_ref=not options.gerrit_no_rebase_patch_ref,
# Finally, extra configurations cleanup dir location.
refs=options.refs,
git_cache_dir=options.git_cache_dir,
cleanup_dir=options.cleanup_dir,
gerrit_reset=not options.gerrit_no_reset,
disable_syntax_validation=options.disable_syntax_validation)
gclient_output = ensure_checkout(**checkout_parameters)
except GclientSyncFailed:
print 'We failed gclient sync, lets delete the checkout and retry.'
ensure_no_checkout(dir_names, options.cleanup_dir)
gclient_output = ensure_checkout(**checkout_parameters)
except PatchFailed as e:
# Tell recipes information such as root, got_revision, etc.
emit_json(options.output_json,
did_run=True,
root=first_sln,
patch_apply_return_code=e.code,
patch_root=options.patch_root,
patch_failure=True,
failed_patch_body=e.output,
step_text='%s PATCH FAILED' % step_text,
fixed_revisions=revisions)
raise
# Take care of got_revisions outputs.
revision_mapping = GOT_REVISION_MAPPINGS.get(git_slns[0]['url'], {})
if options.revision_mapping:
revision_mapping.update(options.revision_mapping)
# If the repo is not in the default GOT_REVISION_MAPPINGS and no
# revision_mapping were specified on the command line then
# default to setting 'got_revision' based on the first solution.
if not revision_mapping:
revision_mapping['got_revision'] = first_sln
got_revisions = parse_got_revision(gclient_output, revision_mapping)
if not got_revisions:
# TODO(hinoka): We should probably bail out here, but in the interest
# of giving mis-configured bots some time to get fixed use a dummy
# revision here.
got_revisions = { 'got_revision': 'BOT_UPDATE_NO_REV_FOUND' }
#raise Exception('No got_revision(s) found in gclient output')
# Tell recipes information such as root, got_revision, etc.
emit_json(options.output_json,
did_run=True,
root=first_sln,
patch_root=options.patch_root,
step_text=step_text,
fixed_revisions=revisions,
properties=got_revisions,
manifest=create_manifest_old(),
source_manifest=create_manifest(
gclient_output, options.patch_root))
def print_debug_info():
print "Debugging info:"
debug_params = {
'CURRENT_DIR': path.abspath(os.getcwd()),
'THIS_DIR': THIS_DIR,
'DEPOT_TOOLS_DIR': DEPOT_TOOLS_DIR,
}
for k, v in sorted(debug_params.iteritems()):
print "%s: %r" % (k, v)
def main():
# Get inputs.
options, _ = parse_args()
# Check if this script should activate or not.
active = True
# Print a helpful message to tell developers whats going on with this step.
print_debug_info()
# Parse, manipulate, and print the gclient solutions.
specs = {}
exec(options.specs, specs)
orig_solutions = specs.get('solutions', [])
git_slns = modify_solutions(orig_solutions)
solutions_printer(git_slns)
try:
# Dun dun dun, the main part of bot_update.
# gn creates hardlinks during the build. By default, this makes
# `git reset` overwrite the sources of the hardlinks, which causes
# unnecessary rebuilds. (See crbug.com/330461#c13 for an explanation.)
with git_config_if_not_set('core.trustctime', 'false'):
revisions, step_text = prepare(options, git_slns, active)
checkout(options, git_slns, specs, revisions, step_text)
except PatchFailed as e:
# Return a specific non-zero exit code for patch failure (because it is
# a failure), but make it different than other failures to distinguish
# between infra failures (independent from patch author), and patch
# failures (that patch author can fix). However, PatchFailure due to
# download patch failure is still an infra problem.
if e.code == 3:
# Patch download problem.
return 87
# Genuine patch problem.
return 88
if __name__ == '__main__':
sys.exit(main())
| []
| []
| []
| [] | [] | python | 0 | 0 | |
example/example_create_servicegateway_test.go | // Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Example code for creating a Service Gateway
package example
import (
"context"
"fmt"
"os"
"github.com/oracle/oci-go-sdk/v36/common"
"github.com/oracle/oci-go-sdk/v36/core"
"github.com/oracle/oci-go-sdk/v36/example/helpers"
)
func ExampleCreateServiceGateway() {
displayName := "OCI-GOSDK-CreateServiceGateway-Example" // displayName for created VCN and ServiceGateway
compartmentID := os.Getenv("OCI_COMPARTMENT_ID") // OCI_COMPARTMENT_ID env variable must be defined
// initialize VirtualNetworkClient
client, err := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
ctx := context.Background()
// create VCN
createVcnRequest := core.CreateVcnRequest{
RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
}
createVcnRequest.CompartmentId = common.String(compartmentID)
createVcnRequest.DisplayName = common.String(displayName)
createVcnRequest.CidrBlock = common.String("10.0.0.0/16")
createVcnResponse, err := client.CreateVcn(ctx, createVcnRequest)
helpers.FatalIfError(err)
// create ServiceGateway
createServiceGatewayRequest := core.CreateServiceGatewayRequest{
RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
}
createServiceGatewayRequest.CompartmentId = common.String(compartmentID)
createServiceGatewayRequest.DisplayName = common.String(displayName)
createServiceGatewayRequest.VcnId = createVcnResponse.Id
createServiceGatewayRequest.Services = []core.ServiceIdRequestDetails{}
_, err = client.CreateServiceGateway(ctx, createServiceGatewayRequest)
helpers.FatalIfError(err)
fmt.Println("ServiceGateway created")
// Output:
// ServiceGateway created
}
| [
"\"OCI_COMPARTMENT_ID\""
]
| []
| [
"OCI_COMPARTMENT_ID"
]
| [] | ["OCI_COMPARTMENT_ID"] | go | 1 | 0 | |
datastore/tasks/tasks.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A simple command-line task list manager to demonstrate using the
// cloud.google.com/go/datastore package.
package main
import (
"bufio"
"context"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
"cloud.google.com/go/datastore"
)
func main() {
projID := os.Getenv("DATASTORE_PROJECT_ID")
if projID == "" {
log.Fatal(`You need to set the environment variable "DATASTORE_PROJECT_ID"`)
}
// [START datastore_build_service]
ctx := context.Background()
client, err := datastore.NewClient(ctx, projID)
// [END datastore_build_service]
if err != nil {
log.Fatalf("Could not create datastore client: %v", err)
}
// Print welcome message.
fmt.Println("Cloud Datastore Task List")
fmt.Println()
usage()
// Read commands from stdin.
scanner := bufio.NewScanner(os.Stdin)
fmt.Print("> ")
for scanner.Scan() {
cmd, args, n := parseCmd(scanner.Text())
switch cmd {
case "new":
if args == "" {
log.Printf("Missing description in %q command", cmd)
usage()
break
}
key, err := AddTask(ctx, client, args)
if err != nil {
log.Printf("Failed to create task: %v", err)
break
}
fmt.Printf("Created new task with ID %d\n", key.ID)
case "done":
if n == 0 {
log.Printf("Missing numerical task ID in %q command", cmd)
usage()
break
}
if err := MarkDone(ctx, client, n); err != nil {
log.Printf("Failed to mark task done: %v", err)
break
}
fmt.Printf("Task %d marked done\n", n)
case "list":
tasks, err := ListTasks(ctx, client)
if err != nil {
log.Printf("Failed to fetch task list: %v", err)
break
}
PrintTasks(os.Stdout, tasks)
case "delete":
if n == 0 {
log.Printf("Missing numerical task ID in %q command", cmd)
usage()
break
}
if err := DeleteTask(ctx, client, n); err != nil {
log.Printf("Failed to delete task: %v", err)
break
}
fmt.Printf("Task %d deleted\n", n)
default:
log.Printf("Unknown command %q", cmd)
usage()
}
fmt.Print("> ")
}
if err := scanner.Err(); err != nil {
log.Fatalf("Failed reading stdin: %v", err)
}
}
// [START datastore_add_entity]
// Task is the model used to store tasks in the datastore.
type Task struct {
Desc string `datastore:"description"`
Created time.Time `datastore:"created"`
Done bool `datastore:"done"`
id int64 // The integer ID used in the datastore.
}
// AddTask adds a task with the given description to the datastore,
// returning the key of the newly created entity.
func AddTask(ctx context.Context, client *datastore.Client, desc string) (*datastore.Key, error) {
task := &Task{
Desc: desc,
Created: time.Now(),
}
key := datastore.IncompleteKey("Task", nil)
return client.Put(ctx, key, task)
}
// [END datastore_add_entity]
// [START datastore_update_entity]
// MarkDone marks the task done with the given ID.
func MarkDone(ctx context.Context, client *datastore.Client, taskID int64) error {
// Create a key using the given integer ID.
key := datastore.IDKey("Task", taskID, nil)
// In a transaction load each task, set done to true and store.
_, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
var task Task
if err := tx.Get(key, &task); err != nil {
return err
}
task.Done = true
_, err := tx.Put(key, &task)
return err
})
return err
}
// [END datastore_update_entity]
// [START datastore_retrieve_entities]
// ListTasks returns all the tasks in ascending order of creation time.
func ListTasks(ctx context.Context, client *datastore.Client) ([]*Task, error) {
var tasks []*Task
// Create a query to fetch all Task entities, ordered by "created".
query := datastore.NewQuery("Task").Order("created")
keys, err := client.GetAll(ctx, query, &tasks)
if err != nil {
return nil, err
}
// Set the id field on each Task from the corresponding key.
for i, key := range keys {
tasks[i].id = key.ID
}
return tasks, nil
}
// [END datastore_retrieve_entities]
// [START datastore_delete_entity]
// DeleteTask deletes the task with the given ID.
func DeleteTask(ctx context.Context, client *datastore.Client, taskID int64) error {
return client.Delete(ctx, datastore.IDKey("Task", taskID, nil))
}
// [END datastore_delete_entity]
// PrintTasks prints the tasks to the given writer.
func PrintTasks(w io.Writer, tasks []*Task) {
// Use a tab writer to help make results pretty.
tw := tabwriter.NewWriter(w, 8, 8, 1, ' ', 0) // Min cell size of 8.
fmt.Fprintf(tw, "ID\tDescription\tStatus\n")
for _, t := range tasks {
if t.Done {
fmt.Fprintf(tw, "%d\t%s\tdone\n", t.id, t.Desc)
} else {
fmt.Fprintf(tw, "%d\t%s\tcreated %v\n", t.id, t.Desc, t.Created)
}
}
tw.Flush()
}
func usage() {
fmt.Print(`Usage:
new <description> Adds a task with a description <description>
done <task-id> Marks a task as done
list Lists all tasks by creation time
delete <task-id> Deletes a task
`)
}
// parseCmd splits a line into a command and optional extra args.
// n will be set if the extra args can be parsed as an int64.
func parseCmd(line string) (cmd, args string, n int64) {
if f := strings.Fields(line); len(f) > 0 {
cmd = f[0]
args = strings.Join(f[1:], " ")
}
if i, err := strconv.ParseInt(args, 10, 64); err == nil {
n = i
}
return cmd, args, n
}
| [
"\"DATASTORE_PROJECT_ID\""
]
| []
| [
"DATASTORE_PROJECT_ID"
]
| [] | ["DATASTORE_PROJECT_ID"] | go | 1 | 0 | |
app.py | import os
import uuid
from flask import Flask, request, render_template, jsonify, redirect, url_for
from utils import get_parsed_file
app = Flask(__name__)
IS_PROD = os.environ.get("IS_PROD", False)
def allowed_file(filename):
allowed_filetypes = ['txt', 'json']
return '.' in filename and filename.rsplit('.', 1)[1].lower() in allowed_filetypes
@app.route('/parse-file', methods=['POST'])
def parse_file():
file = request.files['0']
if not allowed_file(file.filename):
response = {
"success": False,
"error_message": "Please upload a valid file!",
}
else:
filename, file_extension = os.path.splitext(file.filename)
filename = str(uuid.uuid4())
tmp_filepath = os.path.join("conversations", filename + file_extension)
file.save(tmp_filepath)
try:
parsed_items, persons_list = get_parsed_file(tmp_filepath)
response = {
"success": True,
"chat": parsed_items,
"users": persons_list
}
except Exception as e:
response = {
"success": False,
"error_message": str(e)
}
os.remove(tmp_filepath)
return jsonify(response), 200
@app.route('/', methods=['GET'])
def main():
ctx = {
'is_prod': IS_PROD
}
if request.args.get('redirect'):
message = "Sorry, we couldn't find the page"
return render_template("index.html", data=ctx, error_message=message)
else:
return render_template("index.html", data=ctx)
@app.errorhandler(404)
def not_found(e):
return redirect(url_for('main', redirect='home'))
if __name__ == "__main__":
app.run(debug=not IS_PROD, host="0.0.0.0", threaded=True)
| []
| []
| [
"IS_PROD"
]
| [] | ["IS_PROD"] | python | 1 | 0 | |
projects/robots/gctronic/e-puck/transfer/populate_xc16_directory.py | #!/usr/bin/env python
# Copyright 1996-2018 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
xc16_version = '1.24'
import glob, os, errno, zipfile, shutil, platform, sys
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise RuntimeError("Cannot create directory %s" % path);
def zipdir(path, zip):
for root, dirs, files in os.walk(path):
for file in files:
zip.write(os.path.join(root, file))
scriptdir = os.path.dirname(os.path.realpath(__file__)) + os.sep
dstdir = scriptdir + 'xc16' + os.sep
if platform.system() == 'Darwin':
srcdir = '/Applications/microchip/xc16/v' + xc16_version + '/'
elif platform.system() == 'Linux':
srcdir = '/opt/microchip/xc16/v' + xc16_version + '/'
elif platform.system() == 'Windows':
srcdir = os.environ["PROGRAMFILES"] + '\\Microchip\\xc16\\v' + xc16_version + '\\'
else:
raise RuntimeError('Unsupported platform')
files = [
'bin/bin/elf-ar.exe',
'bin/bin/elf-as.exe',
'bin/bin/elf-bin2hex.exe',
'bin/bin/elf-cc1.exe',
'bin/bin/elf-gcc.exe',
'bin/bin/elf-ld.exe',
'bin/device_files/30F6014A.info',
'bin/c30_device.info',
'bin/xc16-ar.exe',
'bin/xc16-as.exe',
'bin/xc16-bin2hex.exe',
'bin/xc16-cc1.exe',
'bin/xc16-gcc.exe',
'bin/xc16-ld.exe',
'include/*.h',
'lib/dsPIC30F/libp30F6014A-elf.a',
'lib/libc-elf.a',
'lib/libdsp-elf.a',
'lib/libm-elf.a',
'lib/libpic30-elf.a',
'support/dsPIC30F/gld/p30F6014A.gld',
'support/dsPIC30F/h/p30F6014A.h',
'support/dsPIC30F/h/p30Fxxxx.h',
'support/dsPIC30F/inc/p30F6014A.inc',
'support/dsPIC30F/inc/p30Fxxxx.inc'
]
print ('copying files...')
sys.stdout.flush()
for f in files:
if platform.system() != 'Windows':
f = f.replace('.exe', '')
f = f.replace('/', os.sep)
dirpath = os.path.dirname(f) + os.sep
dstfile = dstdir + f
dstdirpath = os.path.dirname(dstfile) + os.sep
mkdir_p(dstdirpath)
absolutepaths = glob.glob(srcdir + f)
if len(absolutepaths) == 0:
raise RuntimeError('Could not find ' + srcdir + f)
for path in absolutepaths:
basename = os.path.basename(path)
try:
shutil.copy(path, dstdirpath + basename)
except IOError, e:
raise RuntimeError("Unable to copy file. %s" % e)
print ('zipping xc16 directory...')
sys.stdout.flush()
zipf = zipfile.ZipFile('xc16-' + xc16_version + '.zip', 'w')
zipdir('xc16', zipf)
zipf.close()
print ('done.')
| []
| []
| [
"PROGRAMFILES"
]
| [] | ["PROGRAMFILES"] | python | 1 | 0 | |
localtileserver/configure.py | # flake8: noqa: W503
import os
def get_default_client_params(host: str = None, port: int = None, prefix: str = None):
if (
host is None
and "LOCALTILESERVER_CLIENT_HOST" in os.environ
and os.environ["LOCALTILESERVER_CLIENT_HOST"]
):
host = str(os.environ["LOCALTILESERVER_CLIENT_HOST"])
if (
port is None
and "LOCALTILESERVER_CLIENT_PORT" in os.environ
and os.environ["LOCALTILESERVER_CLIENT_PORT"]
):
port = int(os.environ["LOCALTILESERVER_CLIENT_PORT"])
if (
prefix is None
and "LOCALTILESERVER_CLIENT_PREFIX" in os.environ
and os.environ["LOCALTILESERVER_CLIENT_PREFIX"]
):
prefix = str(os.environ["LOCALTILESERVER_CLIENT_PREFIX"])
return host, port, prefix
| []
| []
| [
"LOCALTILESERVER_CLIENT_PREFIX",
"LOCALTILESERVER_CLIENT_HOST",
"LOCALTILESERVER_CLIENT_PORT"
]
| [] | ["LOCALTILESERVER_CLIENT_PREFIX", "LOCALTILESERVER_CLIENT_HOST", "LOCALTILESERVER_CLIENT_PORT"] | python | 3 | 0 | |
libminiooni/libminiooni.go | // Package libminiooni implements the cmd/miniooni CLI. Miniooni is our
// experimental client used for research and QA testing.
//
// This CLI has CLI options that do not conflict with Measurement Kit
// v0.10.x CLI options. There are some options conflict with the legacy
// OONI Probe CLI options. Perfect backwards compatibility is not a
// design goal for miniooni. Rather, we aim to have as little conflict
// as possible such that we can run side by side QA checks.
//
// We extracted this package from cmd/miniooni to allow us to further
// integrate the miniooni CLI into other binaries (see for example the
// code at github.com/bassosimone/aladdin). In retrospect, this isn't
// particularly simple to keep up to date because it is complex to sync
// the dependencies used by Psiphon, which need precise pinning.
package libminiooni
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/apex/log"
engine "github.com/ooni/probe-engine"
"github.com/ooni/probe-engine/internal/fsx"
"github.com/ooni/probe-engine/internal/humanizex"
"github.com/ooni/probe-engine/model"
"github.com/ooni/probe-engine/netx/selfcensor"
"github.com/pborman/getopt/v2"
)
// Options contains the options you can set from the CLI.
type Options struct {
Annotations []string
ExtraOptions []string
HomeDir string
Inputs []string
InputFilePath string
NoBouncer bool
NoGeoIP bool
NoJSON bool
NoCollector bool
ProbeServicesURL string
Proxy string
ReportFile string
SelfCensorSpec string
TorArgs []string
TorBinary string
Tunnel string
Verbose bool
}
const (
softwareName = "miniooni"
softwareVersion = engine.Version
)
var (
globalOptions Options
startTime = time.Now()
)
func init() {
getopt.FlagLong(
&globalOptions.Annotations, "annotation", 'A', "Add annotaton", "KEY=VALUE",
)
getopt.FlagLong(
&globalOptions.ExtraOptions, "option", 'O',
"Pass an option to the experiment", "KEY=VALUE",
)
getopt.FlagLong(
&globalOptions.InputFilePath, "file", 'f',
"Path to input file to supply test-dependent input. File must contain one input per line.", "PATH",
)
getopt.FlagLong(
&globalOptions.HomeDir, "home", 0,
"Force specific home directory", "PATH",
)
getopt.FlagLong(
&globalOptions.Inputs, "input", 'i',
"Add test-dependent input to the test input", "INPUT",
)
getopt.FlagLong(
&globalOptions.NoBouncer, "no-bouncer", 0, "Don't use the OONI bouncer",
)
getopt.FlagLong(
&globalOptions.NoGeoIP, "no-geoip", 'g',
"Disable including ASN information into the report",
)
getopt.FlagLong(
&globalOptions.NoJSON, "no-json", 'N', "Disable writing to disk",
)
getopt.FlagLong(
&globalOptions.NoCollector, "no-collector", 'n', "Don't use a collector",
)
getopt.FlagLong(
&globalOptions.ProbeServicesURL, "probe-services", 0,
"Set the URL of the probe-services instance you want to use", "URL",
)
getopt.FlagLong(
&globalOptions.Proxy, "proxy", 0, "Set the proxy URL", "URL",
)
getopt.FlagLong(
&globalOptions.ReportFile, "reportfile", 'o',
"Set the report file path", "PATH",
)
getopt.FlagLong(
&globalOptions.SelfCensorSpec, "self-censor-spec", 0,
"Enable and configure self censorship", "JSON",
)
getopt.FlagLong(
&globalOptions.TorArgs, "tor-args", 0,
"Extra args for tor binary (may be specified multiple times)",
)
getopt.FlagLong(
&globalOptions.TorBinary, "tor-binary", 0,
"Specify path to a specific tor binary",
)
getopt.FlagLong(
&globalOptions.Tunnel, "tunnel", 0,
"Name of the tunnel to use (one of `tor`, `psiphon`)",
)
getopt.FlagLong(
&globalOptions.Verbose, "verbose", 'v', "Increase verbosity",
)
}
func fatalWithString(msg string) {
panic(msg)
}
func fatalIfFalse(cond bool, msg string) {
if !cond {
log.Warn(msg)
panic(msg)
}
}
// Main is the main function of miniooni. This function parses the command line
// options and uses a global state. Use MainWithConfiguration if you want to avoid
// using any global state and relying on command line options.
//
// This function will panic in case of a fatal error. It is up to you that
// integrate this function to either handle the panic of ignore it.
func Main() {
getopt.Parse()
fatalIfFalse(len(getopt.Args()) == 1, "Missing experiment name")
MainWithConfiguration(getopt.Arg(0), globalOptions)
}
func split(s string) (string, string, error) {
v := strings.SplitN(s, "=", 2)
if len(v) != 2 {
return "", "", errors.New("invalid key-value pair")
}
return v[0], v[1], nil
}
func fatalOnError(err error, msg string) {
if err != nil {
log.WithError(err).Warn(msg)
panic(msg)
}
}
func warnOnError(err error, msg string) {
if err != nil {
log.WithError(err).Warn(msg)
}
}
func mustMakeMap(input []string) (output map[string]string) {
output = make(map[string]string)
for _, opt := range input {
key, value, err := split(opt)
fatalOnError(err, "cannot split key-value pair")
output[key] = value
}
return
}
func mustParseURL(URL string) *url.URL {
rv, err := url.Parse(URL)
fatalOnError(err, "cannot parse URL")
return rv
}
type logHandler struct {
io.Writer
}
func (h *logHandler) HandleLog(e *log.Entry) (err error) {
s := fmt.Sprintf("[%14.6f] <%s> %s", time.Since(startTime).Seconds(), e.Level, e.Message)
if len(e.Fields) > 0 {
s += fmt.Sprintf(": %+v", e.Fields)
}
s += "\n"
_, err = h.Writer.Write([]byte(s))
return
}
// See https://gist.github.com/miguelmota/f30a04a6d64bd52d7ab59ea8d95e54da
func gethomedir(optionsHome string) string {
if optionsHome != "" {
return optionsHome
}
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
if runtime.GOOS == "linux" {
home := os.Getenv("XDG_CONFIG_HOME")
if home != "" {
return home
}
// fallthrough
}
return os.Getenv("HOME")
}
func loadFileInputs(opts *Options) {
if len(opts.InputFilePath) != 0 {
if len(opts.Inputs) != 0 {
fatalWithString("inputs can either be supplied through file or command line, but not both")
}
file, err := fsx.Open(opts.InputFilePath)
fatalOnError(err, "cannot read input file")
defer file.Close()
// Implementation note: when you save file with vim, you have newline at
// end of file and you don't want to consider that an input line. While there
// ignore any other empty line that may occur inside the file.
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if line != "" {
opts.Inputs = append(opts.Inputs, line)
}
}
}
}
// MainWithConfiguration is the miniooni main with a specific configuration
// represented by the experiment name and the current options.
//
// This function will panic in case of a fatal error. It is up to you that
// integrate this function to either handle the panic of ignore it.
func MainWithConfiguration(experimentName string, currentOptions Options) {
extraOptions := mustMakeMap(currentOptions.ExtraOptions)
annotations := mustMakeMap(currentOptions.Annotations)
err := selfcensor.MaybeEnable(currentOptions.SelfCensorSpec)
fatalOnError(err, "cannot parse --self-censor-spec argument")
logger := &log.Logger{Level: log.InfoLevel, Handler: &logHandler{Writer: os.Stderr}}
if currentOptions.Verbose {
logger.Level = log.DebugLevel
}
if currentOptions.ReportFile == "" {
currentOptions.ReportFile = "report.jsonl"
}
log.Log = logger
homeDir := gethomedir(currentOptions.HomeDir)
fatalIfFalse(homeDir != "", "home directory is empty")
miniooniDir := path.Join(homeDir, ".miniooni")
assetsDir := path.Join(miniooniDir, "assets")
err = os.MkdirAll(assetsDir, 0700)
fatalOnError(err, "cannot create assets directory")
log.Infof("miniooni state directory: %s", miniooniDir)
var proxyURL *url.URL
if currentOptions.Proxy != "" {
proxyURL = mustParseURL(currentOptions.Proxy)
}
kvstore2dir := filepath.Join(miniooniDir, "kvstore2")
kvstore, err := engine.NewFileSystemKVStore(kvstore2dir)
fatalOnError(err, "cannot create kvstore2 directory")
config := engine.SessionConfig{
AssetsDir: assetsDir,
KVStore: kvstore,
Logger: logger,
PrivacySettings: model.PrivacySettings{
// See https://github.com/ooni/explorer/issues/495#issuecomment-704101604
IncludeASN: currentOptions.NoGeoIP == false,
IncludeCountry: true,
},
ProxyURL: proxyURL,
SoftwareName: softwareName,
SoftwareVersion: softwareVersion,
TorArgs: currentOptions.TorArgs,
TorBinary: currentOptions.TorBinary,
}
if currentOptions.ProbeServicesURL != "" {
config.AvailableProbeServices = []model.Service{{
Address: currentOptions.ProbeServicesURL,
Type: "https",
}}
}
sess, err := engine.NewSession(config)
fatalOnError(err, "cannot create measurement session")
defer func() {
sess.Close()
log.Infof("whole session: recv %s, sent %s",
humanizex.SI(sess.KibiBytesReceived()*1024, "byte"),
humanizex.SI(sess.KibiBytesSent()*1024, "byte"),
)
}()
log.Infof("miniooni temporary directory: %s", sess.TempDir())
err = sess.MaybeStartTunnel(context.Background(), currentOptions.Tunnel)
fatalOnError(err, "cannot start session tunnel")
if !currentOptions.NoBouncer {
log.Info("Looking up OONI backends; please be patient...")
err := sess.MaybeLookupBackends()
fatalOnError(err, "cannot lookup OONI backends")
}
log.Info("Looking up your location; please be patient...")
err = sess.MaybeLookupLocation()
fatalOnError(err, "cannot lookup your location")
log.Infof("- IP: %s", sess.ProbeIP())
log.Infof("- country: %s", sess.ProbeCC())
log.Infof("- network: %s (%s)", sess.ProbeNetworkName(), sess.ProbeASNString())
log.Infof("- resolver's IP: %s", sess.ResolverIP())
log.Infof("- resolver's network: %s (%s)", sess.ResolverNetworkName(),
sess.ResolverASNString())
builder, err := sess.NewExperimentBuilder(experimentName)
fatalOnError(err, "cannot create experiment builder")
// load inputs from file, if present
loadFileInputs(¤tOptions)
if builder.InputPolicy() == engine.InputRequired {
if len(currentOptions.Inputs) <= 0 {
log.Info("Fetching test lists")
client, err := sess.NewOrchestraClient(context.Background())
fatalOnError(err, "cannot create new orchestra client")
list, err := client.FetchURLList(context.Background(), model.URLListConfig{
CountryCode: sess.ProbeCC(),
Limit: 17,
})
fatalOnError(err, "cannot fetch test lists")
for _, entry := range list {
currentOptions.Inputs = append(currentOptions.Inputs, entry.URL)
}
}
} else if builder.InputPolicy() == engine.InputOptional {
if len(currentOptions.Inputs) == 0 {
currentOptions.Inputs = append(currentOptions.Inputs, "")
}
} else if len(currentOptions.Inputs) != 0 {
fatalWithString("this experiment does not expect any input")
} else {
// Tests that do not expect input internally require an empty input to run
currentOptions.Inputs = append(currentOptions.Inputs, "")
}
intregexp := regexp.MustCompile("^[0-9]+$")
for key, value := range extraOptions {
if value == "true" || value == "false" {
err := builder.SetOptionBool(key, value == "true")
fatalOnError(err, "cannot set boolean option")
} else if intregexp.MatchString(value) {
number, err := strconv.ParseInt(value, 10, 64)
fatalOnError(err, "cannot parse integer option")
err = builder.SetOptionInt(key, number)
fatalOnError(err, "cannot set integer option")
} else {
err := builder.SetOptionString(key, value)
fatalOnError(err, "cannot set string option")
}
}
experiment := builder.NewExperiment()
defer func() {
log.Infof("experiment: recv %s, sent %s",
humanizex.SI(experiment.KibiBytesReceived()*1024, "byte"),
humanizex.SI(experiment.KibiBytesSent()*1024, "byte"),
)
}()
if !currentOptions.NoCollector {
log.Info("Opening report; please be patient...")
err := experiment.OpenReport()
fatalOnError(err, "cannot open report")
defer experiment.CloseReport()
log.Infof("Report ID: %s", experiment.ReportID())
}
inputCount := len(currentOptions.Inputs)
inputCounter := 0
for _, input := range currentOptions.Inputs {
inputCounter++
if input != "" {
log.Infof("[%d/%d] running with input: %s", inputCounter, inputCount, input)
}
measurement, err := experiment.Measure(input)
warnOnError(err, "measurement failed")
measurement.AddAnnotations(annotations)
measurement.Options = currentOptions.ExtraOptions
if !currentOptions.NoCollector {
log.Infof("submitting measurement to OONI collector; please be patient...")
err := experiment.SubmitAndUpdateMeasurement(measurement)
warnOnError(err, "submitting measurement failed")
}
if !currentOptions.NoJSON {
// Note: must be after submission because submission modifies
// the measurement to include the report ID.
log.Infof("saving measurement to disk")
err := experiment.SaveMeasurement(measurement, currentOptions.ReportFile)
warnOnError(err, "saving measurement failed")
}
}
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
]
| []
| [
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE",
"HOME",
"XDG_CONFIG_HOME"
]
| [] | ["HOMEPATH", "HOMEDRIVE", "USERPROFILE", "HOME", "XDG_CONFIG_HOME"] | go | 5 | 0 | |
btcutil/appdata_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/TrueNodes/btcd/btcutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := btcutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
pkg/cnidel/cnidel.go | package cnidel
import (
"errors"
"log"
"net"
"os"
"path/filepath"
"strings"
"encoding/json"
"io/ioutil"
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/nokia/danm/pkg/ipam"
danmtypes "github.com/nokia/danm/pkg/crd/apis/danm/v1"
danmclientset "github.com/nokia/danm/pkg/crd/client/clientset/versioned"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
ipamType = "fakeipam"
defaultDataDir = "/var/lib/cni/networks"
flannelBridge = getEnv("FLANNEL_BRIDGE", "cbr0")
)
type cniBackendConfig struct {
danmtypes.CniBackend
readConfig cniConfigReader
ipamNeeded bool
}
type cniConfigReader func(netInfo *danmtypes.DanmNet, ipam danmtypes.IpamConfig) ([]byte, error)
// sriovNet represent the configuration of sriov plugin
type sriovNet struct {
// the name of the network
Name string `json:"name"`
// currently constant "sriov"
Type string `json:"type"`
// name of the PF
PfName string `json:"if0"`
// interface name in the Container
IfName string `json:"if0name,omitEmpty"`
// if true then add VF as L2 mode only, IPAM will not be executed
L2Mode bool `json:"l2enable,omitEmpty"`
// VLAN ID to assign for the VF
Vlan int `json:"vlan,omitEmpty"`
// IPAM configuration to be used for this network.
Ipam danmtypes.IpamConfig `json:"ipam,omitEmpty"`
// DPDK configuration
Dpdk DpdkOption `json:"dpdk,omitEmpty"`
}
// DpdkOption represents the DPDK options for the sriov plugin
type DpdkOption struct {
// The name of kernel NIC driver
NicDriver string `json:"kernel_driver"`
// The name of DPDK capable driver
DpdkDriver string `json:"dpdk_driver"`
// Path to the dpdk-devbind.py script
DpdkTool string `json:"dpdk_tool"`
}
var (
dpdkNicDriver = os.Getenv("DPDK_NIC_DRIVER")
dpdkDriver = os.Getenv("DPDK_DRIVER")
dpdkTool = os.Getenv("DPDK_TOOL")
)
var (
supportedNativeCnis = []*cniBackendConfig {
&cniBackendConfig {
danmtypes.CniBackend {
BackendName: "sriov",
CniVersion: "0.3.1",
},
cniConfigReader(getSriovCniConfig),
true,
},
}
)
// IsDelegationRequired decides if the interface creation operations should be delegated to a 3rd party CNI, or can be handled by DANM
// Decision is made based on the NetworkType parameter of the DanmNet object
func IsDelegationRequired(danmClient danmclientset.Interface, nid, namespace string) (bool,*danmtypes.DanmNet,error) {
netInfo, err := danmClient.DanmV1().DanmNets(namespace).Get(nid, meta_v1.GetOptions{})
if err != nil {
return false, nil, err
}
neType := netInfo.Spec.NetworkType
if neType == "ipvlan" || neType == "" {
return false, netInfo, nil
}
return true, netInfo, nil
}
// DelegateInterfaceSetup delegates Ks8 Pod network interface setup task to the input 3rd party CNI plugin
// Returns the CNI compatible result object, or an error if interface creation was unsuccessful, or if the 3rd party CNI config could not be loaded
func DelegateInterfaceSetup(danmClient danmclientset.Interface, netInfo *danmtypes.DanmNet, iface danmtypes.Interface) (types.Result,error) {
var (
ip4 string
ip6 string
err error
ipamOptions danmtypes.IpamConfig
)
if isIpamNeeded(netInfo.Spec.NetworkType) {
ip4, ip6, _, err = ipam.Reserve(danmClient, *netInfo, iface.Ip, iface.Ip6)
if err != nil {
return nil, errors.New("IP address reservation failed for network:" + netInfo.Spec.NetworkID + " with error:" + err.Error())
}
ipamOptions = getCniIpamConfig(netInfo.Spec.Options, ip4, ip6)
}
rawConfig, err := getCniPluginConfig(netInfo, ipamOptions)
if err != nil {
if isIpamNeeded(netInfo.Spec.NetworkType) {
ipam.GarbageCollectIps(danmClient, netInfo, ip4, ip6)
}
return nil, err
}
cniType := netInfo.Spec.NetworkType
cniResult, err := invoke.DelegateAdd(cniType, rawConfig)
if err != nil {
if isIpamNeeded(netInfo.Spec.NetworkType) {
ipam.GarbageCollectIps(danmClient, netInfo, ip4, ip6)
}
return nil, errors.New("Error delegating ADD to CNI plugin:" + cniType + " because:" + err.Error())
}
return cniResult, nil
}
func isIpamNeeded(cniType string) bool {
for _, cni := range supportedNativeCnis {
if cni.BackendName == cniType {
return cni.ipamNeeded
}
}
return false
}
func getCniIpamConfig(options danmtypes.DanmNetOption, ip4 string, ip6 string) danmtypes.IpamConfig {
var (
subnet string
routes []danmtypes.IpamRoute
defaultGw string
ip string
)
if options.Cidr != "" {
ip = ip4
subnet = options.Cidr
routes, defaultGw = parseRoutes(options.Routes, subnet)
} else {
ip = ip6
subnet = options.Net6
routes, defaultGw = parseRoutes(options.Routes6, subnet)
}
return danmtypes.IpamConfig {
Type: ipamType,
Subnet: subnet,
Routes: routes,
DefaultGw: defaultGw,
Ip: strings.Split(ip, "/")[0],
}
}
func getCniPluginConfig(netInfo *danmtypes.DanmNet, ipamOptions danmtypes.IpamConfig) ([]byte, error) {
cniType := netInfo.Spec.NetworkType
for _, cni := range supportedNativeCnis {
if cni.BackendName == cniType {
return cni.readConfig(netInfo, ipamOptions)
}
}
return readCniConfigFile(netInfo)
}
func getSriovCniConfig(netInfo *danmtypes.DanmNet, ipamOptions danmtypes.IpamConfig) ([]byte, error) {
vlanid := netInfo.Spec.Options.Vlan
sriovConfig := sriovNet {
Name: netInfo.Spec.NetworkID,
Type: "sriov",
PfName: netInfo.Spec.Options.Device,
IfName: netInfo.Spec.Options.Prefix,
L2Mode: true,
Vlan: vlanid,
Dpdk: DpdkOption{},
Ipam: ipamOptions,
}
if ipamOptions.Ip != "" {
sriovConfig.L2Mode = false
}
if netInfo.Spec.Options.Dpdk {
sriovConfig.Dpdk = DpdkOption {
NicDriver: dpdkNicDriver,
DpdkDriver: dpdkDriver,
DpdkTool: dpdkTool,
}
}
rawConfig, err := json.Marshal(sriovConfig)
if err != nil {
return nil, errors.New("Error getting sriov plugin config: " + err.Error())
}
return rawConfig, nil
}
func readCniConfigFile(netInfo *danmtypes.DanmNet) ([]byte, error) {
cniType := netInfo.Spec.NetworkType
//TODO: the path from where the config is read should not be hard-coded
rawConfig, err := ioutil.ReadFile("/etc/cni/net.d/" + cniType + ".conf")
if err != nil {
return nil, errors.New("Could not load CNI config file for plugin:" + cniType)
}
return rawConfig, nil
}
func parseRoutes(rawRoutes map[string]string, netCidr string) ([]danmtypes.IpamRoute, string) {
defaultGw := ""
routes := []danmtypes.IpamRoute{}
for dst, gw := range rawRoutes {
routes = append(routes, danmtypes.IpamRoute{
Dst: dst,
Gw: gw,
})
if _, sn, _ := net.ParseCIDR(dst); sn.String() == netCidr {
defaultGw = gw
}
}
return routes, defaultGw
}
// DelegateInterfaceDelete delegates Ks8 Pod network interface delete task to the input 3rd party CNI plugin
// Returns an error if interface creation was unsuccessful, or if the 3rd party CNI config could not be loaded
func DelegateInterfaceDelete(danmClient danmclientset.Interface, netInfo *danmtypes.DanmNet, ip string) error {
rawConfig, err := getCniPluginConfig(netInfo, danmtypes.IpamConfig{})
if err != nil {
return err
}
cniType := netInfo.Spec.NetworkType
err = invoke.DelegateDel(cniType, rawConfig)
if err != nil {
//Best-effort clean-up because we know how to handle exceptions
freeDelegatedIps(danmClient, netInfo, ip)
return errors.New("Error delegating DEL to CNI plugin:" + cniType + " because:" + err.Error())
}
return freeDelegatedIps(danmClient, netInfo, ip)
}
func freeDelegatedIps(danmClient danmclientset.Interface, netInfo *danmtypes.DanmNet, ip string) error {
if netInfo.Spec.NetworkType == "flannel" && ip != ""{
flannelIpExhaustionWorkaround(ip)
}
if isIpamNeeded(netInfo.Spec.NetworkType) && ip != "" {
err := ipam.Free(danmClient, *netInfo, ip)
if err != nil {
return errors.New("cannot give back ip address for NID:" + netInfo.Spec.NetworkID + " addr:" + ip)
}
}
return nil
}
// Host-local IPAM management plugin uses disk-local Store by default.
// Right now it is buggy in a sense that it does not try to free IPs if the container being deleted does not exist already.
// But it should!
// Exception handling 101 dear readers: ALWAYS try and reset your environment to the best of your ability during an exception
// TODO: remove this once the problem is solved upstream
func flannelIpExhaustionWorkaround(ip string) {
var dataDir = filepath.Join(defaultDataDir, flannelBridge)
os.Remove(filepath.Join(dataDir, ip))
}
// ConvertCniResult converts a CNI result from an older API version to the latest format
// Returns nil if conversion is unsuccessful
func ConvertCniResult(rawCniResult types.Result) *current.Result {
convertedResult, err := current.NewResultFromResult(rawCniResult)
if err != nil {
log.Println("Delegated CNI result could not be converted:" + err.Error())
return nil
}
return convertedResult
}
func getEnv(key, fallback string) string {
if value, doesExist := os.LookupEnv(key); doesExist {
return value
}
return fallback
}
| [
"\"DPDK_NIC_DRIVER\"",
"\"DPDK_DRIVER\"",
"\"DPDK_TOOL\""
]
| []
| [
"DPDK_DRIVER",
"DPDK_NIC_DRIVER",
"DPDK_TOOL"
]
| [] | ["DPDK_DRIVER", "DPDK_NIC_DRIVER", "DPDK_TOOL"] | go | 3 | 0 | |
config.py | # -*- coding: utf-8 -*-
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
class Config:
secret_key = os.environ.get("SECRET_KEY")
team_info = os.environ.get("TEAM_INFO_JSON")
service_provider = os.environ.get("SERVICE_PROVIDER")
tinydb_document = os.environ.get("TINYDB_DOCUMENT")
background_color = [
'rgba(255, 99, 132, 0.2)',
'rgba(54, 162, 235, 0.2)',
'rgba(255, 206, 86, 0.2)',
'rgba(75, 192, 192, 0.2)',
'rgba(153, 102, 255, 0.2)',
'rgba(255, 159, 64, 0.2)'
]
border_color = [
'rgba(255,99,132,1)',
'rgba(54, 162, 235, 1)',
'rgba(255, 206, 86, 1)',
'rgba(75, 192, 192, 1)',
'rgba(153, 102, 255, 1)',
'rgba(255, 159, 64, 1)'
]
opeg_commit = int(os.environ.get("OPEG_COMMIT"))
opeg_issue = int(os.environ.get("OPEG_ISSUE"))
opeg_pr = int(os.environ.get("OPEG_PR"))
opeg_license = int(os.environ.get("OPEG_LICENSE"))
opeg_contributor = int(os.environ.get("OPEG_CONTRIBUTOR"))
opeg_branch = int(os.environ.get("OPEG_BRANCH"))
| []
| []
| [
"OPEG_CONTRIBUTOR",
"TEAM_INFO_JSON",
"OPEG_ISSUE",
"SERVICE_PROVIDER",
"OPEG_COMMIT",
"SECRET_KEY",
"OPEG_LICENSE",
"OPEG_BRANCH",
"OPEG_PR",
"TINYDB_DOCUMENT"
]
| [] | ["OPEG_CONTRIBUTOR", "TEAM_INFO_JSON", "OPEG_ISSUE", "SERVICE_PROVIDER", "OPEG_COMMIT", "SECRET_KEY", "OPEG_LICENSE", "OPEG_BRANCH", "OPEG_PR", "TINYDB_DOCUMENT"] | python | 10 | 0 | |
train_color_human.py | import tensorflow as tf
import numpy as np
import os
import sys
sys.path.append('./utils')
sys.path.append('./models')
import dataset_human as dataset
import model_color as model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', './train_color_human',
"""Directory where to write summaries and checkpoint.""")
tf.app.flags.DEFINE_string('base_dir', './data/human_im2avatar',
"""The path containing all the samples.""")
tf.app.flags.DEFINE_string('data_list_path', './data_list',
"""The path containing data lists.""")
tf.app.flags.DEFINE_integer('train_epochs', 501, """""")
tf.app.flags.DEFINE_integer('batch_size', 55, """""")
tf.app.flags.DEFINE_integer('gpu', 1, """""")
tf.app.flags.DEFINE_float('learning_rate', 0.0003, """""")
tf.app.flags.DEFINE_float('wd', 0.00001, """""")
tf.app.flags.DEFINE_integer('epochs_to_save',20, """""")
tf.app.flags.DEFINE_integer('decay_step',20000, """for lr""")
tf.app.flags.DEFINE_float('decay_rate', 0.7, """for lr""")
IM_DIM = 128
VOL_DIM = 64
BATCH_SIZE = FLAGS.batch_size
TRAIN_EPOCHS = FLAGS.train_epochs
GPU_INDEX = FLAGS.gpu
BASE_LEARNING_RATE = FLAGS.learning_rate
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU_INDEX)
TRAIN_DIR = FLAGS.train_dir
if not os.path.exists(TRAIN_DIR):
os.makedirs(TRAIN_DIR)
LOG_FOUT = open(os.path.join(TRAIN_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(tf.flags._global_parser.parse_args())+'\n')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train(dataset_):
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
is_train_pl = tf.placeholder(tf.bool)
img_pl, vol_clr_pl, vol_flow_pl = model.placeholder_inputs(BATCH_SIZE, IM_DIM, VOL_DIM)
global_step = tf.Variable(0)
bn_decay = get_bn_decay(global_step)
pred_reg_clr, pred_conf, pred_flow, pred_blended_clr = model.get_model(img_pl, is_train_pl, weight_decay=FLAGS.wd, bn_decay=bn_decay)
loss = model.get_loss(pred_reg_clr, pred_blended_clr, vol_clr_pl, pred_flow, vol_flow_pl)
learning_rate = get_learning_rate(global_step)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
model_path = os.path.join(TRAIN_DIR, "trained_models")
if tf.gfile.Exists(os.path.join(model_path, "checkpoint")):
ckpt = tf.train.get_checkpoint_state(model_path)
restorer = tf.train.Saver()
restorer.restore(sess, ckpt.model_checkpoint_path)
print ("Load parameters from checkpoint.")
else:
sess.run(tf.global_variables_initializer())
train_sample_size = dataset_.getTrainSampleSize()
train_batches = train_sample_size // BATCH_SIZE
for epoch in range(TRAIN_EPOCHS):
dataset_.shuffleTrainNames()
for batch_idx in range(train_batches):
imgs, vols_flow, vols_clr = dataset_.next_flow_batch(batch_idx * BATCH_SIZE, BATCH_SIZE, vol_dim=VOL_DIM)
feed_dict = {img_pl: imgs, vol_clr_pl: vols_clr, vol_flow_pl: vols_flow, is_train_pl: True}
step = sess.run(global_step)
_, loss_val = sess.run([train_op, loss], feed_dict=feed_dict)
log_string("<TRAIN> Epoch {} - Batch {}: loss: {}.".format(epoch, batch_idx, loss_val))
if epoch % FLAGS.epochs_to_save == 0:
checkpoint_path = os.path.join(model_path, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=epoch)
def main():
train_dataset = dataset.Dataset(base_path=FLAGS.base_dir,
data_list_path=FLAGS.data_list_path)
train(train_dataset)
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
api/server/server.go | package server
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"expvar"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/pprof"
"os"
"strconv"
"strings"
"syscall"
"code.google.com/p/go.net/websocket"
"github.com/docker/libcontainer/user"
"github.com/gorilla/mux"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
"github.com/docker/docker/engine"
"github.com/docker/docker/pkg/listenbuffer"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/systemd"
"github.com/docker/docker/pkg/version"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
var (
activationLock chan struct{}
)
type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error
func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
conn, _, err := w.(http.Hijacker).Hijack()
if err != nil {
return nil, nil, err
}
// Flush the options to make sure the client sets the raw mode
conn.Write([]byte{})
return conn, conn, nil
}
// Check to make sure request's Content-Type is application/json
func checkForJson(r *http.Request) error {
ct := r.Header.Get("Content-Type")
// No Content-Type header is ok as long as there's no Body
if ct == "" {
if r.Body == nil || r.ContentLength == 0 {
return nil
}
}
// Otherwise it better be json
if api.MatchesContentType(ct, "application/json") {
return nil
}
return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct)
}
//If we don't do this, POST method without Content-type (even with empty body) will fail
func parseForm(r *http.Request) error {
if r == nil {
return nil
}
if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
return err
}
return nil
}
func parseMultipartForm(r *http.Request) error {
if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
return err
}
return nil
}
func httpError(w http.ResponseWriter, err error) {
statusCode := http.StatusInternalServerError
// FIXME: this is brittle and should not be necessary.
// If we need to differentiate between different possible error types, we should
// create appropriate error types with clearly defined meaning.
errStr := strings.ToLower(err.Error())
if strings.Contains(errStr, "no such") {
statusCode = http.StatusNotFound
} else if strings.Contains(errStr, "bad parameter") {
statusCode = http.StatusBadRequest
} else if strings.Contains(errStr, "conflict") {
statusCode = http.StatusConflict
} else if strings.Contains(errStr, "impossible") {
statusCode = http.StatusNotAcceptable
} else if strings.Contains(errStr, "wrong login/password") {
statusCode = http.StatusUnauthorized
} else if strings.Contains(errStr, "hasn't been activated") {
statusCode = http.StatusForbidden
}
if err != nil {
log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
http.Error(w, err.Error(), statusCode)
}
}
func writeJSON(w http.ResponseWriter, code int, v engine.Env) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
return v.Encode(w)
}
func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) {
w.Header().Set("Content-Type", "application/json")
if flush {
job.Stdout.Add(utils.NewWriteFlusher(w))
} else {
job.Stdout.Add(w)
}
}
func getBoolParam(value string) (bool, error) {
if value == "" {
return false, nil
}
ret, err := strconv.ParseBool(value)
if err != nil {
return false, fmt.Errorf("Bad parameter")
}
return ret, nil
}
func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var (
authConfig, err = ioutil.ReadAll(r.Body)
job = eng.Job("auth")
stdoutBuffer = bytes.NewBuffer(nil)
)
if err != nil {
return err
}
job.Setenv("authConfig", string(authConfig))
job.Stdout.Add(stdoutBuffer)
if err = job.Run(); err != nil {
return err
}
if status := engine.Tail(stdoutBuffer, 1); status != "" {
var env engine.Env
env.Set("Status", status)
return writeJSON(w, http.StatusOK, env)
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Content-Type", "application/json")
eng.ServeHTTP(w, r)
return nil
}
func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
job := eng.Job("kill", vars["name"])
if sig := r.Form.Get("signal"); sig != "" {
job.Args = append(job.Args, sig)
}
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
job := eng.Job("pause", vars["name"])
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
job := eng.Job("unpause", vars["name"])
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
job := eng.Job("export", vars["name"])
job.Stdout.Add(w)
if err := job.Run(); err != nil {
return err
}
return nil
}
func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
var (
err error
outs *engine.Table
job = eng.Job("images")
)
job.Setenv("filters", r.Form.Get("filters"))
// FIXME this parameter could just be a match filter
job.Setenv("filter", r.Form.Get("filter"))
job.Setenv("all", r.Form.Get("all"))
if version.GreaterThanOrEqualTo("1.7") {
streamJSON(job, w, false)
} else if outs, err = job.Stdout.AddListTable(); err != nil {
return err
}
if err := job.Run(); err != nil {
return err
}
if version.LessThan("1.7") && outs != nil { // Convert to legacy format
outsLegacy := engine.NewTable("Created", 0)
for _, out := range outs.Data {
for _, repoTag := range out.GetList("RepoTags") {
repo, tag := parsers.ParseRepositoryTag(repoTag)
outLegacy := &engine.Env{}
outLegacy.Set("Repository", repo)
outLegacy.SetJson("Tag", tag)
outLegacy.Set("Id", out.Get("Id"))
outLegacy.SetInt64("Created", out.GetInt64("Created"))
outLegacy.SetInt64("Size", out.GetInt64("Size"))
outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize"))
outsLegacy.Add(outLegacy)
}
}
w.Header().Set("Content-Type", "application/json")
if _, err := outsLegacy.WriteListTo(w); err != nil {
return err
}
}
return nil
}
func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if version.GreaterThan("1.6") {
w.WriteHeader(http.StatusNotFound)
return fmt.Errorf("This is now implemented in the client.")
}
eng.ServeHTTP(w, r)
return nil
}
func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Content-Type", "application/json")
eng.ServeHTTP(w, r)
return nil
}
func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
var job = eng.Job("events")
streamJSON(job, w, true)
job.Setenv("since", r.Form.Get("since"))
job.Setenv("until", r.Form.Get("until"))
return job.Run()
}
func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var job = eng.Job("history", vars["name"])
streamJSON(job, w, false)
if err := job.Run(); err != nil {
return err
}
return nil
}
func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var job = eng.Job("container_changes", vars["name"])
streamJSON(job, w, false)
return job.Run()
}
func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if version.LessThan("1.4") {
return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.")
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
job := eng.Job("top", vars["name"], r.Form.Get("ps_args"))
streamJSON(job, w, false)
return job.Run()
}
func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
var (
err error
outs *engine.Table
job = eng.Job("containers")
)
job.Setenv("all", r.Form.Get("all"))
job.Setenv("size", r.Form.Get("size"))
job.Setenv("since", r.Form.Get("since"))
job.Setenv("before", r.Form.Get("before"))
job.Setenv("limit", r.Form.Get("limit"))
job.Setenv("filters", r.Form.Get("filters"))
if version.GreaterThanOrEqualTo("1.5") {
streamJSON(job, w, false)
} else if outs, err = job.Stdout.AddTable(); err != nil {
return err
}
if err = job.Run(); err != nil {
return err
}
if version.LessThan("1.5") { // Convert to legacy format
for _, out := range outs.Data {
ports := engine.NewTable("", 0)
ports.ReadListFrom([]byte(out.Get("Ports")))
out.Set("Ports", api.DisplayablePorts(ports))
}
w.Header().Set("Content-Type", "application/json")
if _, err = outs.WriteListTo(w); err != nil {
return err
}
}
return nil
}
func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var (
inspectJob = eng.Job("container_inspect", vars["name"])
logsJob = eng.Job("logs", vars["name"])
c, err = inspectJob.Stdout.AddEnv()
)
if err != nil {
return err
}
logsJob.Setenv("follow", r.Form.Get("follow"))
logsJob.Setenv("tail", r.Form.Get("tail"))
logsJob.Setenv("stdout", r.Form.Get("stdout"))
logsJob.Setenv("stderr", r.Form.Get("stderr"))
logsJob.Setenv("timestamps", r.Form.Get("timestamps"))
// Validate args here, because we can't return not StatusOK after job.Run() call
stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr")
if !(stdout || stderr) {
return fmt.Errorf("Bad parameters: you must choose at least one stream")
}
if err = inspectJob.Run(); err != nil {
return err
}
var outStream, errStream io.Writer
outStream = utils.NewWriteFlusher(w)
if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
} else {
errStream = outStream
}
logsJob.Stdout.Add(outStream)
logsJob.Stderr.Set(errStream)
if err := logsJob.Run(); err != nil {
fmt.Fprintf(outStream, "Error running logs job: %s\n", err)
}
return nil
}
func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag"))
job.Setenv("force", r.Form.Get("force"))
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
return nil
}
func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
var (
config engine.Env
env engine.Env
job = eng.Job("commit", r.Form.Get("container"))
stdoutBuffer = bytes.NewBuffer(nil)
)
if err := checkForJson(r); err != nil {
return err
}
if err := config.Decode(r.Body); err != nil {
log.Errorf("%s", err)
}
if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
job.Setenv("pause", "1")
} else {
job.Setenv("pause", r.FormValue("pause"))
}
job.Setenv("repo", r.Form.Get("repo"))
job.Setenv("tag", r.Form.Get("tag"))
job.Setenv("author", r.Form.Get("author"))
job.Setenv("comment", r.Form.Get("comment"))
job.SetenvSubEnv("config", &config)
job.Stdout.Add(stdoutBuffer)
if err := job.Run(); err != nil {
return err
}
env.Set("Id", engine.Tail(stdoutBuffer, 1))
return writeJSON(w, http.StatusCreated, env)
}
// Creates an image from Pull or from Import
func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
var (
image = r.Form.Get("fromImage")
repo = r.Form.Get("repo")
tag = r.Form.Get("tag")
job *engine.Job
)
authEncoded := r.Header.Get("X-Registry-Auth")
authConfig := ®istry.AuthConfig{}
if authEncoded != "" {
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = ®istry.AuthConfig{}
}
}
if image != "" { //pull
if tag == "" {
image, tag = parsers.ParseRepositoryTag(image)
}
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
job = eng.Job("pull", image, tag)
job.SetenvBool("parallel", version.GreaterThan("1.3"))
job.SetenvJson("metaHeaders", metaHeaders)
job.SetenvJson("authConfig", authConfig)
} else { //import
if tag == "" {
repo, tag = parsers.ParseRepositoryTag(repo)
}
job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag)
job.Stdin.Add(r.Body)
}
if version.GreaterThan("1.0") {
job.SetenvBool("json", true)
streamJSON(job, w, true)
} else {
job.Stdout.Add(utils.NewWriteFlusher(w))
}
if err := job.Run(); err != nil {
if !job.Stdout.Used() {
return err
}
sf := utils.NewStreamFormatter(version.GreaterThan("1.0"))
w.Write(sf.FormatError(err))
}
return nil
}
func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
var (
authEncoded = r.Header.Get("X-Registry-Auth")
authConfig = ®istry.AuthConfig{}
metaHeaders = map[string][]string{}
)
if authEncoded != "" {
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = ®istry.AuthConfig{}
}
}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
var job = eng.Job("search", r.Form.Get("term"))
job.SetenvJson("metaHeaders", metaHeaders)
job.SetenvJson("authConfig", authConfig)
streamJSON(job, w, false)
return job.Run()
}
func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
if err := parseForm(r); err != nil {
return err
}
authConfig := ®istry.AuthConfig{}
authEncoded := r.Header.Get("X-Registry-Auth")
if authEncoded != "" {
// the new format is to handle the authConfig as a header
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// to increase compatibility to existing api it is defaulting to be empty
authConfig = ®istry.AuthConfig{}
}
} else {
// the old format is supported for compatibility if there was no authConfig header
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
return err
}
}
job := eng.Job("push", vars["name"])
job.SetenvJson("metaHeaders", metaHeaders)
job.SetenvJson("authConfig", authConfig)
job.Setenv("tag", r.Form.Get("tag"))
if version.GreaterThan("1.0") {
job.SetenvBool("json", true)
streamJSON(job, w, true)
} else {
job.Stdout.Add(utils.NewWriteFlusher(w))
}
if err := job.Run(); err != nil {
if !job.Stdout.Used() {
return err
}
sf := utils.NewStreamFormatter(version.GreaterThan("1.0"))
w.Write(sf.FormatError(err))
}
return nil
}
func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
if version.GreaterThan("1.0") {
w.Header().Set("Content-Type", "application/x-tar")
}
var job *engine.Job
if name, ok := vars["name"]; ok {
job = eng.Job("image_export", name)
} else {
job = eng.Job("image_export", r.Form["names"]...)
}
job.Stdout.Add(w)
return job.Run()
}
func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
job := eng.Job("load")
job.Stdin.Add(r.Body)
return job.Run()
}
func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
var (
out engine.Env
job = eng.Job("create", r.Form.Get("name"))
outWarnings []string
stdoutBuffer = bytes.NewBuffer(nil)
warnings = bytes.NewBuffer(nil)
)
if err := checkForJson(r); err != nil {
return err
}
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
// Read container ID from the first line of stdout
job.Stdout.Add(stdoutBuffer)
// Read warnings from stderr
job.Stderr.Add(warnings)
if err := job.Run(); err != nil {
return err
}
// Parse warnings from stderr
scanner := bufio.NewScanner(warnings)
for scanner.Scan() {
outWarnings = append(outWarnings, scanner.Text())
}
out.Set("Id", engine.Tail(stdoutBuffer, 1))
out.SetList("Warnings", outWarnings)
return writeJSON(w, http.StatusCreated, out)
}
func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
job := eng.Job("restart", vars["name"])
job.Setenv("t", r.Form.Get("t"))
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
job := eng.Job("rm", vars["name"])
job.Setenv("forceRemove", r.Form.Get("force"))
job.Setenv("removeVolume", r.Form.Get("v"))
job.Setenv("removeLink", r.Form.Get("link"))
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var job = eng.Job("image_delete", vars["name"])
streamJSON(job, w, false)
job.Setenv("force", r.Form.Get("force"))
job.Setenv("noprune", r.Form.Get("noprune"))
return job.Run()
}
func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var (
name = vars["name"]
job = eng.Job("start", name)
)
// If contentLength is -1, we can assumed chunked encoding
// or more technically that the length is unknown
// http://golang.org/src/pkg/net/http/request.go#L139
// net/http otherwise seems to swallow any headers related to chunked encoding
// including r.TransferEncoding
// allow a nil body for backwards compatibility
if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) {
if err := checkForJson(r); err != nil {
return err
}
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
}
if err := job.Run(); err != nil {
if err.Error() == "Container already started" {
w.WriteHeader(http.StatusNotModified)
return nil
}
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
job := eng.Job("stop", vars["name"])
job.Setenv("t", r.Form.Get("t"))
if err := job.Run(); err != nil {
if err.Error() == "Container already stopped" {
w.WriteHeader(http.StatusNotModified)
return nil
}
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var (
env engine.Env
stdoutBuffer = bytes.NewBuffer(nil)
job = eng.Job("wait", vars["name"])
)
job.Stdout.Add(stdoutBuffer)
if err := job.Run(); err != nil {
return err
}
env.Set("StatusCode", engine.Tail(stdoutBuffer, 1))
return writeJSON(w, http.StatusOK, env)
}
func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
return err
}
return nil
}
func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var (
job = eng.Job("container_inspect", vars["name"])
c, err = job.Stdout.AddEnv()
)
if err != nil {
return err
}
if err = job.Run(); err != nil {
return err
}
inStream, outStream, err := hijackServer(w)
if err != nil {
return err
}
defer func() {
if tcpc, ok := inStream.(*net.TCPConn); ok {
tcpc.CloseWrite()
} else {
inStream.Close()
}
}()
defer func() {
if tcpc, ok := outStream.(*net.TCPConn); ok {
tcpc.CloseWrite()
} else if closer, ok := outStream.(io.Closer); ok {
closer.Close()
}
}()
var errStream io.Writer
fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
} else {
errStream = outStream
}
job = eng.Job("attach", vars["name"])
job.Setenv("logs", r.Form.Get("logs"))
job.Setenv("stream", r.Form.Get("stream"))
job.Setenv("stdin", r.Form.Get("stdin"))
job.Setenv("stdout", r.Form.Get("stdout"))
job.Setenv("stderr", r.Form.Get("stderr"))
job.Stdin.Add(inStream)
job.Stdout.Add(outStream)
job.Stderr.Set(errStream)
if err := job.Run(); err != nil {
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
}
return nil
}
func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil {
return err
}
h := websocket.Handler(func(ws *websocket.Conn) {
defer ws.Close()
job := eng.Job("attach", vars["name"])
job.Setenv("logs", r.Form.Get("logs"))
job.Setenv("stream", r.Form.Get("stream"))
job.Setenv("stdin", r.Form.Get("stdin"))
job.Setenv("stdout", r.Form.Get("stdout"))
job.Setenv("stderr", r.Form.Get("stderr"))
job.Stdin.Add(ws)
job.Stdout.Add(ws)
job.Stderr.Set(ws)
if err := job.Run(); err != nil {
log.Errorf("Error attaching websocket: %s", err)
}
})
h.ServeHTTP(w, r)
return nil
}
func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var job = eng.Job("container_inspect", vars["name"])
if version.LessThan("1.12") {
job.SetenvBool("raw", true)
}
streamJSON(job, w, false)
return job.Run()
}
func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var job = eng.Job("image_inspect", vars["name"])
if version.LessThan("1.12") {
job.SetenvBool("raw", true)
}
streamJSON(job, w, false)
return job.Run()
}
func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if version.LessThan("1.3") {
return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
}
var (
authEncoded = r.Header.Get("X-Registry-Auth")
authConfig = ®istry.AuthConfig{}
configFileEncoded = r.Header.Get("X-Registry-Config")
configFile = ®istry.ConfigFile{}
job = eng.Job("build")
)
// This block can be removed when API versions prior to 1.9 are deprecated.
// Both headers will be parsed and sent along to the daemon, but if a non-empty
// ConfigFile is present, any value provided as an AuthConfig directly will
// be overridden. See BuildFile::CmdFrom for details.
if version.LessThan("1.9") && authEncoded != "" {
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = ®istry.AuthConfig{}
}
}
if configFileEncoded != "" {
configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded))
if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
configFile = ®istry.ConfigFile{}
}
}
if version.GreaterThanOrEqualTo("1.8") {
job.SetenvBool("json", true)
streamJSON(job, w, true)
} else {
job.Stdout.Add(utils.NewWriteFlusher(w))
}
if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") {
job.Setenv("rm", "1")
} else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") {
job.Setenv("rm", "1")
} else {
job.Setenv("rm", r.FormValue("rm"))
}
job.Stdin.Add(r.Body)
job.Setenv("remote", r.FormValue("remote"))
job.Setenv("t", r.FormValue("t"))
job.Setenv("q", r.FormValue("q"))
job.Setenv("nocache", r.FormValue("nocache"))
job.Setenv("forcerm", r.FormValue("forcerm"))
job.SetenvJson("authConfig", authConfig)
job.SetenvJson("configFile", configFile)
if err := job.Run(); err != nil {
if !job.Stdout.Used() {
return err
}
sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8"))
w.Write(sf.FormatError(err))
}
return nil
}
func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var copyData engine.Env
if err := checkForJson(r); err != nil {
return err
}
if err := copyData.Decode(r.Body); err != nil {
return err
}
if copyData.Get("Resource") == "" {
return fmt.Errorf("Path cannot be empty")
}
origResource := copyData.Get("Resource")
if copyData.Get("Resource")[0] == '/' {
copyData.Set("Resource", copyData.Get("Resource")[1:])
}
job := eng.Job("container_copy", vars["name"], copyData.Get("Resource"))
job.Stdout.Add(w)
w.Header().Set("Content-Type", "application/x-tar")
if err := job.Run(); err != nil {
log.Errorf("%s", err.Error())
if strings.Contains(strings.ToLower(err.Error()), "no such container") {
w.WriteHeader(http.StatusNotFound)
} else if strings.Contains(err.Error(), "no such file or directory") {
return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"])
}
}
return nil
}
func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
var (
out engine.Env
name = vars["name"]
job = eng.Job("execCreate", name)
stdoutBuffer = bytes.NewBuffer(nil)
)
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
job.Stdout.Add(stdoutBuffer)
// Register an instance of Exec in container.
if err := job.Run(); err != nil {
fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err)
return err
}
// Return the ID
out.Set("Id", engine.Tail(stdoutBuffer, 1))
return writeJSON(w, http.StatusCreated, out)
}
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
var (
name = vars["name"]
job = eng.Job("execStart", name)
errOut io.Writer = os.Stderr
)
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
if !job.GetenvBool("Detach") {
// Setting up the streaming http interface.
inStream, outStream, err := hijackServer(w)
if err != nil {
return err
}
defer func() {
if tcpc, ok := inStream.(*net.TCPConn); ok {
tcpc.CloseWrite()
} else {
inStream.Close()
}
}()
defer func() {
if tcpc, ok := outStream.(*net.TCPConn); ok {
tcpc.CloseWrite()
} else if closer, ok := outStream.(io.Closer); ok {
closer.Close()
}
}()
var errStream io.Writer
fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
} else {
errStream = outStream
}
job.Stdin.Add(inStream)
job.Stdout.Add(outStream)
job.Stderr.Set(errStream)
errOut = outStream
}
// Now run the user process in container.
job.SetCloseIO(false)
if err := job.Run(); err != nil {
fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err)
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
return err
}
return nil
}
func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.WriteHeader(http.StatusOK)
return nil
}
func writeCorsHeaders(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
}
func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
_, err := w.Write([]byte{'O', 'K'})
return err
}
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// log the request
log.Debugf("Calling %s %s", localMethod, localRoute)
if logging {
log.Infof("%s %s", r.Method, r.RequestURI)
}
if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
}
}
version := version.Version(mux.Vars(r)["version"])
if version == "" {
version = api.APIVERSION
}
if enableCors {
writeCorsHeaders(w, r)
}
if version.GreaterThan(api.APIVERSION) {
http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound)
return
}
if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
httpError(w, err)
}
}
}
// Replicated from expvar.go as not public.
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func AttachProfiler(router *mux.Router) {
router.HandleFunc("/debug/vars", expvarHandler)
router.HandleFunc("/debug/pprof/", pprof.Index)
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
}
func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) {
r := mux.NewRouter()
if os.Getenv("DEBUG") != "" {
AttachProfiler(r)
}
m := map[string]map[string]HttpApiFunc{
"GET": {
"/_ping": ping,
"/events": getEvents,
"/info": getInfo,
"/version": getVersion,
"/images/json": getImagesJSON,
"/images/viz": getImagesViz,
"/images/search": getImagesSearch,
"/images/get": getImagesGet,
"/images/{name:.*}/get": getImagesGet,
"/images/{name:.*}/history": getImagesHistory,
"/images/{name:.*}/json": getImagesByName,
"/containers/ps": getContainersJSON,
"/containers/json": getContainersJSON,
"/containers/{name:.*}/export": getContainersExport,
"/containers/{name:.*}/changes": getContainersChanges,
"/containers/{name:.*}/json": getContainersByName,
"/containers/{name:.*}/top": getContainersTop,
"/containers/{name:.*}/logs": getContainersLogs,
"/containers/{name:.*}/attach/ws": wsContainersAttach,
},
"POST": {
"/auth": postAuth,
"/commit": postCommit,
"/build": postBuild,
"/images/create": postImagesCreate,
"/images/load": postImagesLoad,
"/images/{name:.*}/push": postImagesPush,
"/images/{name:.*}/tag": postImagesTag,
"/containers/create": postContainersCreate,
"/containers/{name:.*}/kill": postContainersKill,
"/containers/{name:.*}/pause": postContainersPause,
"/containers/{name:.*}/unpause": postContainersUnpause,
"/containers/{name:.*}/restart": postContainersRestart,
"/containers/{name:.*}/start": postContainersStart,
"/containers/{name:.*}/stop": postContainersStop,
"/containers/{name:.*}/wait": postContainersWait,
"/containers/{name:.*}/resize": postContainersResize,
"/containers/{name:.*}/attach": postContainersAttach,
"/containers/{name:.*}/copy": postContainersCopy,
"/containers/{name:.*}/exec": postContainerExecCreate,
"/exec/{name:.*}/start": postContainerExecStart,
"/exec/{name:.*}/resize": postContainerExecResize,
},
"DELETE": {
"/containers/{name:.*}": deleteContainers,
"/images/{name:.*}": deleteImages,
},
"OPTIONS": {
"": optionsHandler,
},
}
for method, routes := range m {
for route, fct := range routes {
log.Debugf("Registering %s, %s", method, route)
// NOTE: scope issue, make sure the variables are local and won't be changed
localRoute := route
localFct := fct
localMethod := method
// build the handler function
f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion))
// add the new route
if localRoute == "" {
r.Methods(localMethod).HandlerFunc(f)
} else {
r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f)
r.Path(localRoute).Methods(localMethod).HandlerFunc(f)
}
}
}
return r, nil
}
// ServeRequest processes a single http request to the docker remote api.
// FIXME: refactor this to be part of Server and not require re-creating a new
// router each time. This requires first moving ListenAndServe into Server.
func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error {
router, err := createRouter(eng, false, true, "")
if err != nil {
return err
}
// Insert APIVERSION into the request as a convenience
req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path)
router.ServeHTTP(w, req)
return nil
}
// ServeFD creates an http.Server and sets it up to serve given a socket activated
// argument.
func ServeFd(addr string, handle http.Handler) error {
ls, e := systemd.ListenFD(addr)
if e != nil {
return e
}
chErrors := make(chan error, len(ls))
// We don't want to start serving on these sockets until the
// daemon is initialized and installed. Otherwise required handlers
// won't be ready.
<-activationLock
// Since ListenFD will return one or more sockets we have
// to create a go func to spawn off multiple serves
for i := range ls {
listener := ls[i]
go func() {
httpSrv := http.Server{Handler: handle}
chErrors <- httpSrv.Serve(listener)
}()
}
for i := 0; i < len(ls); i++ {
err := <-chErrors
if err != nil {
return err
}
}
return nil
}
func lookupGidByName(nameOrGid string) (int, error) {
groupFile, err := user.GetGroupFile()
if err != nil {
return -1, err
}
groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
})
if err != nil {
return -1, err
}
if groups != nil && len(groups) > 0 {
return groups[0].Gid, nil
}
return -1, fmt.Errorf("Group %s not found", nameOrGid)
}
func changeGroup(addr string, nameOrGid string) error {
gid, err := lookupGidByName(nameOrGid)
if err != nil {
return err
}
log.Debugf("%s group found. gid: %d", nameOrGid, gid)
return os.Chown(addr, 0, gid)
}
// ListenAndServe sets up the required http.Server and gets it listening for
// each addr passed in and does protocol specific checking.
func ListenAndServe(proto, addr string, job *engine.Job) error {
var l net.Listener
r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
if err != nil {
return err
}
if proto == "fd" {
return ServeFd(addr, r)
}
if proto == "unix" {
if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
return err
}
}
var oldmask int
if proto == "unix" {
oldmask = syscall.Umask(0777)
}
if job.GetenvBool("BufferRequests") {
l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock)
} else {
l, err = net.Listen(proto, addr)
}
if proto == "unix" {
syscall.Umask(oldmask)
}
if err != nil {
return err
}
if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) {
tlsCert := job.Getenv("TlsCert")
tlsKey := job.Getenv("TlsKey")
cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey)
if err != nil {
return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?",
tlsCert, tlsKey, err)
}
tlsConfig := &tls.Config{
NextProtos: []string{"http/1.1"},
Certificates: []tls.Certificate{cert},
// Avoid fallback on insecure SSL protocols
MinVersion: tls.VersionTLS10,
}
if job.GetenvBool("TlsVerify") {
certPool := x509.NewCertPool()
file, err := ioutil.ReadFile(job.Getenv("TlsCa"))
if err != nil {
return fmt.Errorf("Couldn't read CA certificate: %s", err)
}
certPool.AppendCertsFromPEM(file)
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
tlsConfig.ClientCAs = certPool
}
l = tls.NewListener(l, tlsConfig)
}
// Basic error and sanity checking
switch proto {
case "tcp":
if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
}
case "unix":
socketGroup := job.Getenv("SocketGroup")
if socketGroup != "" {
if err := changeGroup(addr, socketGroup); err != nil {
if socketGroup == "docker" {
// if the user hasn't explicitly specified the group ownership, don't fail on errors.
log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
} else {
return err
}
}
}
if err := os.Chmod(addr, 0660); err != nil {
return err
}
default:
return fmt.Errorf("Invalid protocol format.")
}
httpSrv := http.Server{Addr: addr, Handler: r}
return httpSrv.Serve(l)
}
// ServeApi loops through all of the protocols sent in to docker and spawns
// off a go routine to setup a serving http.Server for each.
func ServeApi(job *engine.Job) engine.Status {
if len(job.Args) == 0 {
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
}
var (
protoAddrs = job.Args
chErrors = make(chan error, len(protoAddrs))
)
activationLock = make(chan struct{})
for _, protoAddr := range protoAddrs {
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
}
go func() {
log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
}()
}
for i := 0; i < len(protoAddrs); i++ {
err := <-chErrors
if err != nil {
return job.Error(err)
}
}
return engine.StatusOK
}
func AcceptConnections(job *engine.Job) engine.Status {
// Tell the init daemon we are accepting requests
go systemd.SdNotify("READY=1")
// close the lock so the listeners start accepting connections
if activationLock != nil {
close(activationLock)
}
return engine.StatusOK
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
clubChinois/wsgi.py | """
WSGI config for clubChinois project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "clubChinois.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/src/github.com/zenazn/goji/graceful/einhorn.go | // +build !windows
package graceful
import (
"os"
"strconv"
"syscall"
)
func init() {
// This is a little unfortunate: goji/bind already knows whether we're
// running under einhorn, but we don't want to introduce a dependency
// between the two packages. Since the check is short enough, inlining
// it here seems "fine."
mpid, err := strconv.Atoi(os.Getenv("EINHORN_MASTER_PID"))
if err != nil || mpid != os.Getppid() {
return
}
stdSignals = append(stdSignals, syscall.SIGUSR2)
}
| [
"\"EINHORN_MASTER_PID\""
]
| []
| [
"EINHORN_MASTER_PID"
]
| [] | ["EINHORN_MASTER_PID"] | go | 1 | 0 | |
src/python/tests/test_libs/untrusted_runner_helpers.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helpers for untrusted_runner."""
import os
import shutil
import subprocess
import tempfile
import unittest
from bot.tasks import commands
from bot.untrusted_runner import file_host
from bot.untrusted_runner import host
from bot.untrusted_runner import untrusted
from datastore import data_types
from datastore import ndb
from datastore import ndb_patcher
from metrics import logs
from system import environment
from system import shell
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
TEST_LIBS_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_LIBS_DATA_DIR = os.path.join(TEST_LIBS_DIR, 'data')
def untrusted_process():
"""Start an untrusted process."""
os.environ['BOT_NAME'] = 'localhost'
untrusted.start_server()
def _test_data_dir():
"""Return path to directory for bot and server data."""
root_dir = os.environ['ROOT_DIR']
return os.path.join(root_dir, '_test_data')
def _create_test_bot():
"""Start test bot."""
# TODO(ochang): Use Docker container instead.
bot_path = os.path.join(_test_data_dir(), 'worker_bot')
if os.path.exists(bot_path):
shutil.rmtree(bot_path, ignore_errors=True)
env = os.environ.copy()
env['UNTRUSTED_WORKER'] = 'True'
env['BOT_NAME'] = 'localhost'
bot_proc = subprocess.Popen(
['python', 'butler.py', 'run_bot', '--skip-install-deps', bot_path],
env=env)
return bot_proc, os.path.join(bot_path, 'clusterfuzz')
def _create_test_root():
"""Create test ROOT_DIR for the trusted host."""
root_path = os.path.join(_test_data_dir(), 'test_root')
if os.path.exists(root_path):
shutil.rmtree(root_path, ignore_errors=True)
real_root = os.environ['ROOT_DIR']
os.makedirs(root_path)
# TODO(ochang): Make sure we don't copy files that aren't tracked in git.
shutil.copytree(
os.path.join(real_root, 'bot'), os.path.join(root_path, 'bot'))
shutil.copytree(
os.path.join(real_root, 'resources'), os.path.join(
root_path, 'resources'))
os.mkdir(os.path.join(root_path, 'src'))
shutil.copytree(
os.path.join(real_root, 'src', 'appengine'),
os.path.join(root_path, 'src', 'appengine'))
shutil.copytree(
os.path.join(real_root, 'src', 'python'),
os.path.join(root_path, 'src', 'python'))
shutil.copytree(
os.path.join(real_root, 'src', 'third_party'),
os.path.join(root_path, 'src', 'third_party'))
return root_path
def _which(prog):
"""Return full path to |prog| (based on $PATH)."""
for path in os.getenv('PATH', '').split(':'):
candidate = os.path.join(path, prog)
if os.path.exists(candidate):
return candidate
return None
@unittest.skipIf(not os.getenv('UNTRUSTED_RUNNER_TESTS'),
'Skipping untrusted runner tests.')
@test_utils.with_cloud_emulators('datastore')
class UntrustedRunnerIntegrationTest(unittest.TestCase):
"""Base class for doing integration testing of untrusted_runner."""
@classmethod
def setUpClass(cls):
logs.configure_for_tests()
os.environ['HOST_INSTANCE_NAME'] = 'host'
os.environ['HOST_INSTANCE_NUM'] = '0'
os.environ['BOT_NAME'] = 'host-0'
os.environ['LOCAL_DEVELOPMENT'] = 'True'
os.environ['SOURCE_VERSION_OVERRIDE'] = 'VERSION'
os.environ['CONFIG_DIR_OVERRIDE'] = os.path.abspath(
os.path.join(os.environ['ROOT_DIR'], 'configs', 'test'))
cert_location = os.path.join(TEST_LIBS_DATA_DIR, 'untrusted_cert.pem')
key_location = os.path.join(TEST_LIBS_DATA_DIR, 'untrusted_key.pem')
os.environ['UNTRUSTED_TLS_CERT_FOR_TESTING'] = cert_location
os.environ['UNTRUSTED_TLS_KEY_FOR_TESTING'] = key_location
cls.bot_proc, bot_root_dir = _create_test_bot()
os.environ['TRUSTED_HOST'] = 'True'
os.environ['WORKER_ROOT_DIR'] = bot_root_dir
os.environ['WORKER_BOT_TMPDIR'] = os.path.join(bot_root_dir, 'bot_tmpdir')
# Explicitly patch datastore.ndb here, as otherwise we patch
# google.appengine.ext.ndb which is not what is imported everywhere.
ndb_patcher.patch_ndb(ndb)
environment.set_default_vars()
data_types.HostWorkerAssignment(
host_name='host',
instance_num=0,
worker_name='localhost',
project_name='project',
id='host-0').put()
with open(cert_location) as f:
cert_contents = f.read()
with open(key_location) as f:
key_contents = f.read()
data_types.WorkerTlsCert(
project_name='project',
cert_contents=cert_contents,
key_contents=key_contents,
id='project').put()
host.init()
@classmethod
def tearDownClass(cls):
if cls.bot_proc:
try:
cls.bot_proc.terminate()
except OSError:
# Could already be killed.
pass
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
os.environ['BOT_TMPDIR'] = os.path.join(self.tmp_dir, 'bot_tmpdir')
test_helpers.patch(self, [
'datastore.data_handler.get_data_bundle_bucket_name',
'system.environment.set_environment_parameters_from_file',
])
test_helpers.patch_environ(self)
# Our tests write data/logs into subdirs of ROOT_DIR. Pivot the ROOT_DIR to
# a temporary one.
new_root = _create_test_root()
os.environ['ROOT_DIR'] = new_root
self.saved_cwd = os.getcwd()
os.chdir(new_root)
environment.set_bot_environment()
fuzz_inputs = os.environ['FUZZ_INPUTS']
shell.remove_directory(fuzz_inputs, recreate=True)
worker_fuzz_inputs = file_host.rebase_to_worker_root(fuzz_inputs)
shell.remove_directory(worker_fuzz_inputs, recreate=True)
environment.set_value('GSUTIL_PATH', os.path.dirname(_which('gsutil')))
def tearDown(self):
shutil.rmtree(self.tmp_dir)
os.chdir(self.saved_cwd)
def _setup_env(self, job_type=None):
"""Set up bot environment."""
if not job_type:
return
job = data_types.Job.query(data_types.Job.name == job_type).get()
environment.set_value('JOB_NAME', job_type)
commands.update_environment_for_job(job.environment_string)
| []
| []
| [
"TRUSTED_HOST",
"CONFIG_DIR_OVERRIDE",
"UNTRUSTED_TLS_CERT_FOR_TESTING",
"FUZZ_INPUTS",
"BOT_NAME",
"ROOT_DIR",
"BOT_TMPDIR",
"SOURCE_VERSION_OVERRIDE",
"WORKER_ROOT_DIR",
"LOCAL_DEVELOPMENT",
"UNTRUSTED_TLS_KEY_FOR_TESTING",
"HOST_INSTANCE_NUM",
"UNTRUSTED_RUNNER_TESTS",
"PATH",
"WORKER_BOT_TMPDIR",
"HOST_INSTANCE_NAME"
]
| [] | ["TRUSTED_HOST", "CONFIG_DIR_OVERRIDE", "UNTRUSTED_TLS_CERT_FOR_TESTING", "FUZZ_INPUTS", "BOT_NAME", "ROOT_DIR", "BOT_TMPDIR", "SOURCE_VERSION_OVERRIDE", "WORKER_ROOT_DIR", "LOCAL_DEVELOPMENT", "UNTRUSTED_TLS_KEY_FOR_TESTING", "HOST_INSTANCE_NUM", "UNTRUSTED_RUNNER_TESTS", "PATH", "WORKER_BOT_TMPDIR", "HOST_INSTANCE_NAME"] | python | 16 | 0 | |
venv/Lib/site-packages/zmq/tests/__init__.py | # Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import os
import platform
import sys
import time
import signal
from functools import partial
from threading import Thread
from typing import List
from unittest import SkipTest, TestCase
from pytest import mark
import zmq
from zmq.utils import jsonapi
try:
import gevent
from zmq import green as gzmq
have_gevent = True
except ImportError:
have_gevent = False
PYPY = platform.python_implementation() == 'PyPy'
# -----------------------------------------------------------------------------
# skip decorators (directly from unittest)
# -----------------------------------------------------------------------------
_id = lambda x: x
skip_pypy = mark.skipif(PYPY, reason="Doesn't work on PyPy")
require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4")
# -----------------------------------------------------------------------------
# Base test class
# -----------------------------------------------------------------------------
def term_context(ctx, timeout):
"""Terminate a context with a timeout"""
t = Thread(target=ctx.term)
t.daemon = True
t.start()
t.join(timeout=timeout)
if t.is_alive():
# reset Context.instance, so the failure to term doesn't corrupt subsequent tests
zmq.sugar.context.Context._instance = None
raise RuntimeError(
"context could not terminate, open sockets likely remain in test"
)
class BaseZMQTestCase(TestCase):
green = False
teardown_timeout = 10
test_timeout_seconds = int(os.environ.get("ZMQ_TEST_TIMEOUT") or 60)
sockets: List[zmq.Socket]
@property
def _is_pyzmq_test(self):
return self.__class__.__module__.split(".", 1)[0] == __name__.split(".", 1)[0]
@property
def _should_test_timeout(self):
return (
self._is_pyzmq_test
and hasattr(signal, 'SIGALRM')
and self.test_timeout_seconds
)
@property
def Context(self):
if self.green:
return gzmq.Context
else:
return zmq.Context
def socket(self, socket_type):
s = self.context.socket(socket_type)
self.sockets.append(s)
return s
def _alarm_timeout(self, timeout, *args):
raise TimeoutError(f"Test did not complete in {timeout} seconds")
def setUp(self):
super(BaseZMQTestCase, self).setUp()
if self.green and not have_gevent:
raise SkipTest("requires gevent")
self.context = self.Context.instance()
self.sockets = []
if self._should_test_timeout:
# use SIGALRM to avoid test hangs
signal.signal(
signal.SIGALRM, partial(self._alarm_timeout, self.test_timeout_seconds)
)
signal.alarm(self.test_timeout_seconds)
def tearDown(self):
if self._should_test_timeout:
# cancel the timeout alarm, if there was one
signal.alarm(0)
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close(0)
for ctx in contexts:
try:
term_context(ctx, self.teardown_timeout)
except Exception:
# reset Context.instance, so the failure to term doesn't corrupt subsequent tests
zmq.sugar.context.Context._instance = None
raise
super(BaseZMQTestCase, self).tearDown()
def create_bound_pair(
self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'
):
"""Create a bound socket pair using a random port."""
s1 = self.context.socket(type1)
s1.setsockopt(zmq.LINGER, 0)
port = s1.bind_to_random_port(interface)
s2 = self.context.socket(type2)
s2.setsockopt(zmq.LINGER, 0)
s2.connect('%s:%s' % (interface, port))
self.sockets.extend([s1, s2])
return s1, s2
def ping_pong(self, s1, s2, msg):
s1.send(msg)
msg2 = s2.recv()
s2.send(msg2)
msg3 = s1.recv()
return msg3
def ping_pong_json(self, s1, s2, o):
if jsonapi.jsonmod is None:
raise SkipTest("No json library")
s1.send_json(o)
o2 = s2.recv_json()
s2.send_json(o2)
o3 = s1.recv_json()
return o3
def ping_pong_pyobj(self, s1, s2, o):
s1.send_pyobj(o)
o2 = s2.recv_pyobj()
s2.send_pyobj(o2)
o3 = s1.recv_pyobj()
return o3
def assertRaisesErrno(self, errno, func, *args, **kwargs):
try:
func(*args, **kwargs)
except zmq.ZMQError as e:
self.assertEqual(
e.errno,
errno,
"wrong error raised, expected '%s' \
got '%s'"
% (zmq.ZMQError(errno), zmq.ZMQError(e.errno)),
)
else:
self.fail("Function did not raise any error")
def _select_recv(self, multipart, socket, **kwargs):
"""call recv[_multipart] in a way that raises if there is nothing to receive"""
if zmq.zmq_version_info() >= (3, 1, 0):
# zmq 3.1 has a bug, where poll can return false positives,
# so we wait a little bit just in case
# See LIBZMQ-280 on JIRA
time.sleep(0.1)
r, w, x = zmq.select([socket], [], [], timeout=kwargs.pop('timeout', 5))
assert len(r) > 0, "Should have received a message"
kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
recv = socket.recv_multipart if multipart else socket.recv
return recv(**kwargs)
def recv(self, socket, **kwargs):
"""call recv in a way that raises if there is nothing to receive"""
return self._select_recv(False, socket, **kwargs)
def recv_multipart(self, socket, **kwargs):
"""call recv_multipart in a way that raises if there is nothing to receive"""
return self._select_recv(True, socket, **kwargs)
class PollZMQTestCase(BaseZMQTestCase):
pass
class GreenTest:
"""Mixin for making green versions of test classes"""
green = True
teardown_timeout = 10
def assertRaisesErrno(self, errno, func, *args, **kwargs):
if errno == zmq.EAGAIN:
raise SkipTest("Skipping because we're green.")
try:
func(*args, **kwargs)
except zmq.ZMQError:
e = sys.exc_info()[1]
self.assertEqual(
e.errno,
errno,
"wrong error raised, expected '%s' \
got '%s'"
% (zmq.ZMQError(errno), zmq.ZMQError(e.errno)),
)
else:
self.fail("Function did not raise any error")
def tearDown(self):
if self._should_test_timeout:
# cancel the timeout alarm, if there was one
signal.alarm(0)
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close()
try:
gevent.joinall(
[gevent.spawn(ctx.term) for ctx in contexts],
timeout=self.teardown_timeout,
raise_error=True,
)
except gevent.Timeout:
raise RuntimeError(
"context could not terminate, open sockets likely remain in test"
)
def skip_green(self):
raise SkipTest("Skipping because we are green")
def skip_green(f):
def skipping_test(self, *args, **kwargs):
if self.green:
raise SkipTest("Skipping because we are green")
else:
return f(self, *args, **kwargs)
return skipping_test
| []
| []
| [
"ZMQ_TEST_TIMEOUT"
]
| [] | ["ZMQ_TEST_TIMEOUT"] | python | 1 | 0 | |
ProyectoFinal/ProyectoFinal/wsgi.py | """
WSGI config for ProyectoFinal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProyectoFinal.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
setup.py | from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(name='zomato_distribution_api',
version='0.2.3',
description='provides wrapper for the zomato web api',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
author='Chetan Raj Rupakheti',
author_email='[email protected]',
python_requires='>=3',
zip_safe=False)
| []
| []
| []
| [] | [] | python | null | null | null |
setup.py | import setuptools
import os
# Conditionally include additional modules for docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
requirements = list()
if on_rtd:
requirements.append('gevent')
requirements.append('tornado')
requirements.append('twisted')
long_description = ('Pika is a pure-Python implementation of the AMQP 0-9-1 '
'protocol that tries to stay fairly independent of the '
'underlying network support library. Pika was developed '
'primarily for use with RabbitMQ, but should also work '
'with other AMQP 0-9-1 brokers.')
setuptools.setup(
name='pika',
version='1.2.0',
description='Pika Python AMQP Client Library',
long_description=open('README.rst').read(),
maintainer='Gavin M. Roy',
maintainer_email='[email protected]',
url='https://pika.readthedocs.io',
packages=setuptools.find_packages(include=['pika', 'pika.*']),
license='BSD',
install_requires=requirements,
package_data={'': ['LICENSE', 'README.rst']},
extras_require={
'gevent': ['gevent'],
'tornado': ['tornado'],
'twisted': ['twisted'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Communications', 'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking'
],
zip_safe=True)
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java | /*
* Copyright 2017-2018, Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.operator.cluster.model;
import io.fabric8.kubernetes.api.model.Affinity;
import io.fabric8.kubernetes.api.model.ConfigMap;
import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
import io.fabric8.kubernetes.api.model.ConfigMapVolumeSource;
import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder;
import io.fabric8.kubernetes.api.model.Container;
import io.fabric8.kubernetes.api.model.ContainerBuilder;
import io.fabric8.kubernetes.api.model.ContainerPort;
import io.fabric8.kubernetes.api.model.ContainerPortBuilder;
import io.fabric8.kubernetes.api.model.EnvVar;
import io.fabric8.kubernetes.api.model.EnvVarBuilder;
import io.fabric8.kubernetes.api.model.EnvVarSource;
import io.fabric8.kubernetes.api.model.EnvVarSourceBuilder;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.LabelSelector;
import io.fabric8.kubernetes.api.model.LabelSelectorBuilder;
import io.fabric8.kubernetes.api.model.OwnerReference;
import io.fabric8.kubernetes.api.model.OwnerReferenceBuilder;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder;
import io.fabric8.kubernetes.api.model.PodSecurityContext;
import io.fabric8.kubernetes.api.model.PodSecurityContextBuilder;
import io.fabric8.kubernetes.api.model.Probe;
import io.fabric8.kubernetes.api.model.ProbeBuilder;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.SecretBuilder;
import io.fabric8.kubernetes.api.model.SecretVolumeSource;
import io.fabric8.kubernetes.api.model.SecretVolumeSourceBuilder;
import io.fabric8.kubernetes.api.model.Service;
import io.fabric8.kubernetes.api.model.ServiceBuilder;
import io.fabric8.kubernetes.api.model.ServicePort;
import io.fabric8.kubernetes.api.model.ServicePortBuilder;
import io.fabric8.kubernetes.api.model.Toleration;
import io.fabric8.kubernetes.api.model.Volume;
import io.fabric8.kubernetes.api.model.VolumeBuilder;
import io.fabric8.kubernetes.api.model.VolumeMount;
import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
import io.fabric8.kubernetes.api.model.extensions.Deployment;
import io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder;
import io.fabric8.kubernetes.api.model.extensions.DeploymentStrategy;
import io.fabric8.kubernetes.api.model.extensions.StatefulSet;
import io.fabric8.kubernetes.api.model.extensions.StatefulSetBuilder;
import io.fabric8.kubernetes.api.model.extensions.StatefulSetUpdateStrategyBuilder;
import io.strimzi.api.kafka.model.CpuMemory;
import io.strimzi.api.kafka.model.ExternalLogging;
import io.strimzi.api.kafka.model.InlineLogging;
import io.strimzi.api.kafka.model.JvmOptions;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.Logging;
import io.strimzi.api.kafka.model.PersistentClaimStorage;
import io.strimzi.api.kafka.model.Resources;
import io.strimzi.api.kafka.model.Storage;
import io.strimzi.operator.common.Annotations;
import io.strimzi.operator.common.model.Labels;
import io.vertx.core.json.JsonObject;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static java.util.Arrays.asList;
public abstract class AbstractModel {
protected static final Logger log = LogManager.getLogger(AbstractModel.class.getName());
protected static final int CERTS_EXPIRATION_DAYS = 365;
protected static final String DEFAULT_JVM_XMS = "128M";
private static final String VOLUME_MOUNT_HACK_IMAGE =
System.getenv().getOrDefault("STRIMZI_VOLUME_MOUNT_INIT_IMAGE", "busybox");
protected static final String VOLUME_MOUNT_HACK_NAME = "volume-mount-hack";
private static final Long VOLUME_MOUNT_HACK_USERID = 1001L;
private static final Long VOLUME_MOUNT_HACK_GROUPID = 0L;
public static final String ANCILLARY_CM_KEY_METRICS = "metrics-config.yml";
public static final String ANCILLARY_CM_KEY_LOG_CONFIG = "log4j.properties";
public static final String ENV_VAR_DYNAMIC_HEAP_FRACTION = "DYNAMIC_HEAP_FRACTION";
public static final String ENV_VAR_KAFKA_HEAP_OPTS = "KAFKA_HEAP_OPTS";
public static final String ENV_VAR_KAFKA_JVM_PERFORMANCE_OPTS = "KAFKA_JVM_PERFORMANCE_OPTS";
public static final String ENV_VAR_DYNAMIC_HEAP_MAX = "DYNAMIC_HEAP_MAX";
public static final String NETWORK_POLICY_KEY_SUFFIX = "-network-policy";
public static final String ENV_VAR_STRIMZI_KAFKA_GC_LOG_OPTS = "STRIMZI_KAFKA_GC_LOG_OPTS";
public static final String ENV_VAR_STRIMZI_GC_LOG_OPTS = "STRIMZI_GC_LOG_OPTS";
private static final String ANNO_STRIMZI_IO_DELETE_CLAIM = Annotations.STRIMZI_DOMAIN + "/delete-claim";
@Deprecated
private static final String ANNO_CO_STRIMZI_IO_DELETE_CLAIM = "cluster.operator.strimzi.io/delete-claim";
protected static final String DEFAULT_KAFKA_GC_LOGGING = "-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps";
protected static final String DEFAULT_STRIMZI_GC_LOGGING = "-XX:NativeMemoryTracking=summary -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps";
protected final String cluster;
protected final String namespace;
protected final Labels labels;
// Docker image configuration
protected String image;
// Number of replicas
protected int replicas;
protected String readinessPath;
protected int readinessTimeout;
protected int readinessInitialDelay;
protected String livenessPath;
protected int livenessTimeout;
protected int livenessInitialDelay;
protected String serviceName;
protected String headlessServiceName;
protected String name;
protected static final int METRICS_PORT = 9404;
protected static final String METRICS_PORT_NAME = "metrics";
protected boolean isMetricsEnabled;
protected Iterable<Map.Entry<String, Object>> metricsConfig;
protected String ancillaryConfigName;
protected String logConfigName;
protected Storage storage;
protected AbstractConfiguration configuration;
protected String mountPath;
public static final String VOLUME_NAME = "data";
protected String logAndMetricsConfigMountPath;
protected String logAndMetricsConfigVolumeName;
private JvmOptions jvmOptions;
private Resources resources;
private Affinity userAffinity;
private List<Toleration> tolerations;
protected Map validLoggerFields;
private final String[] validLoggerValues = new String[]{"INFO", "ERROR", "WARN", "TRACE", "DEBUG", "FATAL", "OFF" };
private Logging logging;
protected boolean gcLoggingEnabled = true;
// Templates
protected Map<String, String> templateStatefulSetLabels;
protected Map<String, String> templateStatefulSetAnnotations;
protected Map<String, String> templateDeploymentLabels;
protected Map<String, String> templateDeploymentAnnotations;
protected Map<String, String> templatePodLabels;
protected Map<String, String> templatePodAnnotations;
protected Map<String, String> templateServiceLabels;
protected Map<String, String> templateServiceAnnotations;
protected Map<String, String> templateHeadlessServiceLabels;
protected Map<String, String> templateHeadlessServiceAnnotations;
// Owner Reference information
private String ownerApiVersion;
private String ownerKind;
private String ownerUid;
/**
* Constructor
*
* @param namespace Kubernetes/OpenShift namespace where cluster resources are going to be created
* @param cluster overall cluster name
*/
protected AbstractModel(String namespace, String cluster, Labels labels) {
this.cluster = cluster;
this.namespace = namespace;
this.labels = labels.withCluster(cluster);
}
public Labels getLabels() {
return labels;
}
public int getReplicas() {
return replicas;
}
protected void setReplicas(int replicas) {
this.replicas = replicas;
}
protected void setImage(String image) {
this.image = image;
}
protected void setReadinessTimeout(int readinessTimeout) {
this.readinessTimeout = readinessTimeout;
}
protected void setReadinessInitialDelay(int readinessInitialDelay) {
this.readinessInitialDelay = readinessInitialDelay;
}
protected void setLivenessTimeout(int livenessTimeout) {
this.livenessTimeout = livenessTimeout;
}
protected void setLivenessInitialDelay(int livenessInitialDelay) {
this.livenessInitialDelay = livenessInitialDelay;
}
/**
* Returns the Docker image which should be used by this cluster
*
* @return
*/
public String getName() {
return name;
}
public String getServiceName() {
return serviceName;
}
public String getHeadlessServiceName() {
return headlessServiceName;
}
protected Map<String, String> getSelectorLabels() {
return labels.withName(name).strimziLabels().toMap();
}
protected Map<String, String> getLabelsWithName() {
return getLabelsWithName(name);
}
protected Map<String, String> getLabelsWithName(Map<String, String> userLabels) {
return getLabelsWithName(name, userLabels);
}
protected Map<String, String> getLabelsWithName(String name) {
return labels.withName(name).toMap();
}
protected Map<String, String> getLabelsWithName(String name, Map<String, String> userLabels) {
return labels.withName(name).withUserLabels(userLabels).toMap();
}
public boolean isMetricsEnabled() {
return isMetricsEnabled;
}
protected void setMetricsEnabled(boolean isMetricsEnabled) {
this.isMetricsEnabled = isMetricsEnabled;
}
public String getGcLoggingOptions() {
return gcLoggingEnabled ? DEFAULT_KAFKA_GC_LOGGING : " ";
}
protected void setGcLoggingEnabled(boolean gcLoggingEnabled) {
this.gcLoggingEnabled = gcLoggingEnabled;
}
protected abstract String getDefaultLogConfigFileName();
/**
* Returns map with all available loggers for current pod and default values.
* @return
*/
protected Properties getDefaultLogConfig() {
Properties properties = new Properties();
String defaultLogConfigFileName = getDefaultLogConfigFileName();
try {
properties = getDefaultLoggingProperties(defaultLogConfigFileName);
} catch (IOException e) {
log.warn("Unable to read default log config from '{}'", defaultLogConfigFileName);
}
return properties;
}
/**
* Takes resource file containing default log4j properties and returns it as a Properties.
* @param defaultConfigResourceFileName name of file, where default log4j properties are stored
* @return
*/
protected Properties getDefaultLoggingProperties(String defaultConfigResourceFileName) throws IOException {
Properties defaultSettings = new Properties();
InputStream is = null;
try {
is = AbstractModel.class.getResourceAsStream("/" + defaultConfigResourceFileName);
defaultSettings.load(is);
} finally {
if (is != null) {
is.close();
}
}
return defaultSettings;
}
/**
* Transforms map to log4j properties file format
* @param newSettings map with properties
* @return
*/
protected static String createPropertiesString(Properties newSettings) {
StringWriter sw = new StringWriter();
try {
newSettings.store(sw, "Do not change this generated file. Logging can be configured in the corresponding kubernetes/openshift resource.");
} catch (IOException e) {
log.warn("Error creating properties", e);
}
// remove date comment, because it is updated with each reconciliation which leads to restarting pods
return sw.toString().replaceAll("#[A-Za-z]+ [A-Za-z]+ [0-9]+ [0-9]+:[0-9]+:[0-9]+ [A-Z]+ [0-9]+", "");
}
public Logging getLogging() {
return logging;
}
protected void setLogging(Logging logging) {
this.logging = logging;
}
public String parseLogging(Logging logging, ConfigMap externalCm) {
if (logging instanceof InlineLogging) {
// validate all entries
((InlineLogging) logging).getLoggers().forEach((key, tmpEntry) -> {
if (validLoggerFields.containsKey(key)) {
// correct logger, test appender appearance for log4j.rootLogger
String appender = tmpEntry.replaceAll(" ", "");
if (key.equals("log4j.rootLogger") && !appender.contains(",CONSOLE")) {
((InlineLogging) logging).getLoggers().replace(key, tmpEntry + ", CONSOLE");
log.warn("Appender for {} was not set. Using \"{}: {}, CONSOLE\"", key, key, tmpEntry);
}
} else {
// incorrect logger
log.warn(key + " is not a valid logger");
return;
}
if (key.toString().contains("log4j.appender.CONSOLE")) {
log.warn("You cannot set appender");
return;
}
if ((asList(validLoggerValues).contains(tmpEntry.toString().replaceAll(",[ ]+CONSOLE", ""))) || (asList(validLoggerValues).contains(tmpEntry))) {
// correct value
} else {
Pattern p = Pattern.compile("\\$\\{(.*)\\}, ([A-Z]+)");
Matcher m = p.matcher(tmpEntry.toString());
String logger = "";
String value = "";
boolean regexMatch = false;
while (m.find()) {
logger = m.group(1);
value = m.group(2);
regexMatch = true;
}
if (regexMatch) {
if (!validLoggerFields.containsKey(logger)) {
log.warn(logger + " is not a valid logger");
return;
}
if (!value.equals("CONSOLE")) {
log.warn(value + " is not a valid value.");
return;
}
} else {
log.warn(tmpEntry + " is not a valid value. Use one of " + Arrays.toString(validLoggerValues));
return;
}
}
});
// update fields otherwise use default values
Properties newSettings = getDefaultLogConfig();
newSettings.putAll(((InlineLogging) logging).getLoggers());
return createPropertiesString(newSettings);
} else if (logging instanceof ExternalLogging) {
if (externalCm != null) {
return externalCm.getData().get(getAncillaryConfigMapKeyLogConfig());
} else {
log.warn("Configmap " + ((ExternalLogging) getLogging()).getName() + " does not exist. Default settings are used");
return createPropertiesString(getDefaultLogConfig());
}
} else {
// field is not in the cluster CM
return createPropertiesString(getDefaultLogConfig());
}
}
/**
* Generates a metrics and logging ConfigMap according to configured defaults
* @return The generated ConfigMap
*/
public ConfigMap generateMetricsAndLogConfigMap(ConfigMap cm) {
Map<String, String> data = new HashMap<>();
data.put(getAncillaryConfigMapKeyLogConfig(), parseLogging(getLogging(), cm));
if (isMetricsEnabled()) {
HashMap m = new HashMap();
for (Map.Entry<String, Object> entry : getMetricsConfig()) {
m.put(entry.getKey(), entry.getValue());
}
data.put(ANCILLARY_CM_KEY_METRICS, new JsonObject(m).toString());
}
return createConfigMap(getAncillaryConfigName(), data);
}
public String getLogConfigName() {
return logConfigName;
}
/**
* Sets name of field in cluster config map, where logging configuration is stored
* @param logConfigName
*/
protected void setLogConfigName(String logConfigName) {
this.logConfigName = logConfigName;
}
protected Iterable<Map.Entry<String, Object>> getMetricsConfig() {
return metricsConfig;
}
protected void setMetricsConfig(Iterable<Map.Entry<String, Object>> metricsConfig) {
this.metricsConfig = metricsConfig;
}
/**
* Returns name of config map used for storing metrics and logging configuration
* @return
*/
public String getAncillaryConfigName() {
return ancillaryConfigName;
}
protected void setMetricsConfigName(String metricsAndLogsConfigName) {
this.ancillaryConfigName = metricsAndLogsConfigName;
}
protected List<EnvVar> getEnvVars() {
return null;
}
public Storage getStorage() {
return storage;
}
protected void setStorage(Storage storage) {
this.storage = storage;
}
/**
* Returns the Configuration object which is passed to the cluster as EnvVar
*
* @return Configuration object with cluster configuration
*/
public AbstractConfiguration getConfiguration() {
return configuration;
}
/**
* Set the configuration object which might be passed to the cluster as EnvVar
*
* @param configuration Configuration object with cluster configuration
*/
protected void setConfiguration(AbstractConfiguration configuration) {
this.configuration = configuration;
}
public String getVolumeName() {
return this.VOLUME_NAME;
}
public String getImage() {
return this.image;
}
/**
* @return the service account used by the deployed cluster for Kubernetes/OpenShift API operations
*/
protected String getServiceAccountName() {
return null;
}
/**
* @return the cluster name
*/
public String getCluster() {
return cluster;
}
public String getPersistentVolumeClaimName(int podId) {
return getPersistentVolumeClaimName(name, podId);
}
public static String getPersistentVolumeClaimName(String name, int podId) {
return VOLUME_NAME + "-" + name + "-" + podId;
}
public String getPodName(int podId) {
return name + "-" + podId;
}
/**
* Sets the affinity as configured by the user in the cluster CR
* @param affinity
*/
protected void setUserAffinity(Affinity affinity) {
this.userAffinity = affinity;
}
/**
* Gets the affinity as configured by the user in the cluster CR
*/
protected Affinity getUserAffinity() {
return this.userAffinity;
}
/**
* Gets the tolerations as configured by the user in the cluster CR
*/
public List<Toleration> getTolerations() {
return tolerations;
}
/**
* Sets the tolerations as configured by the user in the cluster CR
*
* @param tolerations
*/
public void setTolerations(List<Toleration> tolerations) {
this.tolerations = tolerations;
}
/**
* Gets the affinity to use in a template Pod (in a StatefulSet, or Deployment).
* In general this may include extra rules than just the {@link #userAffinity}.
* By default it is just the {@link #userAffinity}.
*/
protected Affinity getMergedAffinity() {
return getUserAffinity();
}
/**
* @return a list of init containers to add to the StatefulSet/Deployment
*/
protected List<Container> getInitContainers() {
return null;
}
/**
* @return a list of containers to add to the StatefulSet/Deployment
*/
protected abstract List<Container> getContainers();
protected VolumeMount createVolumeMount(String name, String path) {
VolumeMount volumeMount = new VolumeMountBuilder()
.withName(name)
.withMountPath(path)
.build();
log.trace("Created volume mount {}", volumeMount);
return volumeMount;
}
protected ContainerPort createContainerPort(String name, int port, String protocol) {
ContainerPort containerPort = new ContainerPortBuilder()
.withName(name)
.withProtocol(protocol)
.withContainerPort(port)
.build();
log.trace("Created container port {}", containerPort);
return containerPort;
}
protected ServicePort createServicePort(String name, int port, int targetPort, String protocol) {
ServicePort servicePort = new ServicePortBuilder()
.withName(name)
.withProtocol(protocol)
.withPort(port)
.withNewTargetPort(targetPort)
.build();
log.trace("Created service port {}", servicePort);
return servicePort;
}
protected PersistentVolumeClaim createPersistentVolumeClaim(String name) {
PersistentClaimStorage storage = (PersistentClaimStorage) this.storage;
Map<String, Quantity> requests = new HashMap<>();
requests.put("storage", new Quantity(storage.getSize(), null));
LabelSelector selector = null;
if (storage.getSelector() != null && !storage.getSelector().isEmpty()) {
selector = new LabelSelector(null, storage.getSelector());
}
PersistentVolumeClaimBuilder pvcb = new PersistentVolumeClaimBuilder()
.withNewMetadata()
.withName(name)
.endMetadata()
.withNewSpec()
.withAccessModes("ReadWriteOnce")
.withNewResources()
.withRequests(requests)
.endResources()
.withStorageClassName(storage.getStorageClass())
.withSelector(selector)
.endSpec();
return pvcb.build();
}
protected Volume createEmptyDirVolume(String name) {
Volume volume = new VolumeBuilder()
.withName(name)
.withNewEmptyDir()
.endEmptyDir()
.build();
log.trace("Created emptyDir Volume named '{}'", name);
return volume;
}
protected Volume createConfigMapVolume(String name, String configMapName) {
ConfigMapVolumeSource configMapVolumeSource = new ConfigMapVolumeSourceBuilder()
.withName(configMapName)
.build();
Volume volume = new VolumeBuilder()
.withName(name)
.withConfigMap(configMapVolumeSource)
.build();
log.trace("Created configMap Volume named '{}' with source configMap '{}'", name, configMapName);
return volume;
}
protected ConfigMap createConfigMap(String name, Map<String, String> data) {
return new ConfigMapBuilder()
.withNewMetadata()
.withName(name)
.withNamespace(namespace)
.withLabels(labels.toMap())
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withData(data)
.build();
}
protected Volume createSecretVolume(String name, String secretName, boolean isOpenshift) {
int mode = 0444;
if (isOpenshift) {
mode = 0440;
}
SecretVolumeSource secretVolumeSource = new SecretVolumeSourceBuilder()
.withDefaultMode(mode)
.withSecretName(secretName)
.build();
Volume volume = new VolumeBuilder()
.withName(name)
.withSecret(secretVolumeSource)
.build();
log.trace("Created secret Volume named '{}' with source secret '{}'", name, secretName);
return volume;
}
protected Secret createSecret(String name, Map<String, String> data) {
Secret s = new SecretBuilder()
.withNewMetadata()
.withName(name)
.withNamespace(namespace)
.withLabels(labels.toMap())
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withData(data)
.build();
return s;
}
protected Probe createTcpSocketProbe(int port, int initialDelay, int timeout) {
Probe probe = new ProbeBuilder()
.withNewTcpSocket()
.withNewPort()
.withIntVal(port)
.endPort()
.endTcpSocket()
.withInitialDelaySeconds(initialDelay)
.withTimeoutSeconds(timeout)
.build();
log.trace("Created TCP socket probe {}", probe);
return probe;
}
protected Probe createHttpProbe(String path, String port, int initialDelay, int timeout) {
Probe probe = new ProbeBuilder().withNewHttpGet()
.withPath(path)
.withNewPort(port)
.endHttpGet()
.withInitialDelaySeconds(initialDelay)
.withTimeoutSeconds(timeout)
.build();
log.trace("Created http probe {}", probe);
return probe;
}
protected Service createService(String type, List<ServicePort> ports, Map<String, String> annotations) {
return createService(serviceName, type, ports, getLabelsWithName(serviceName, templateServiceLabels), getSelectorLabels(), annotations);
}
protected Service createService(String name, String type, List<ServicePort> ports, Map<String, String> labels, Map<String, String> selector, Map<String, String> annotations) {
Service service = new ServiceBuilder()
.withNewMetadata()
.withName(name)
.withLabels(labels)
.withNamespace(namespace)
.withAnnotations(annotations)
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withType(type)
.withSelector(selector)
.withPorts(ports)
.endSpec()
.build();
log.trace("Created service {}", service);
return service;
}
protected Service createHeadlessService(List<ServicePort> ports, Map<String, String> annotations) {
Service service = new ServiceBuilder()
.withNewMetadata()
.withName(headlessServiceName)
.withLabels(getLabelsWithName(headlessServiceName, templateHeadlessServiceLabels))
.withNamespace(namespace)
.withAnnotations(mergeAnnotations(annotations, templateHeadlessServiceAnnotations))
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withType("ClusterIP")
.withClusterIP("None")
.withSelector(getSelectorLabels())
.withPorts(ports)
.endSpec()
.build();
log.trace("Created headless service {}", service);
return service;
}
protected StatefulSet createStatefulSet(
Map<String, String> annotations,
List<Volume> volumes,
List<PersistentVolumeClaim> volumeClaims,
List<VolumeMount> volumeMounts,
Affinity affinity,
List<Container> initContainers,
List<Container> containers,
boolean isOpenShift) {
annotations = new HashMap<>(annotations);
annotations.put(ANNO_STRIMZI_IO_DELETE_CLAIM,
String.valueOf(storage instanceof PersistentClaimStorage
&& ((PersistentClaimStorage) storage).isDeleteClaim()));
List<Container> initContainersInternal = new ArrayList<>();
PodSecurityContext securityContext = null;
// if a persistent volume claim is requested and the running cluster is a Kubernetes one
// there is an hack on volume mounting which needs an "init-container"
if (this.storage instanceof PersistentClaimStorage && !isOpenShift) {
String chown = String.format("chown -R %d:%d %s",
AbstractModel.VOLUME_MOUNT_HACK_USERID,
AbstractModel.VOLUME_MOUNT_HACK_GROUPID,
volumeMounts.get(0).getMountPath());
Container initContainer = new ContainerBuilder()
.withName(AbstractModel.VOLUME_MOUNT_HACK_NAME)
.withImage(AbstractModel.VOLUME_MOUNT_HACK_IMAGE)
.withVolumeMounts(volumeMounts.get(0))
.withCommand("sh", "-c", chown)
.build();
initContainersInternal.add(initContainer);
securityContext = new PodSecurityContextBuilder()
.withFsGroup(AbstractModel.VOLUME_MOUNT_HACK_GROUPID)
.build();
}
// add all the other init containers provided by the specific model implementation
if (initContainers != null) {
initContainersInternal.addAll(initContainers);
}
StatefulSet statefulSet = new StatefulSetBuilder()
.withNewMetadata()
.withName(name)
.withLabels(getLabelsWithName(templateStatefulSetLabels))
.withNamespace(namespace)
.withAnnotations(mergeAnnotations(annotations, templateStatefulSetAnnotations))
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withPodManagementPolicy("Parallel")
.withUpdateStrategy(new StatefulSetUpdateStrategyBuilder().withType("OnDelete").build())
.withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels()).build())
.withServiceName(headlessServiceName)
.withReplicas(replicas)
.withNewTemplate()
.withNewMetadata()
.withName(name)
.withLabels(getLabelsWithName(templatePodLabels))
.withAnnotations(mergeAnnotations(null, templatePodAnnotations))
.endMetadata()
.withNewSpec()
.withServiceAccountName(getServiceAccountName())
.withAffinity(affinity)
.withSecurityContext(securityContext)
.withInitContainers(initContainersInternal)
.withContainers(containers)
.withVolumes(volumes)
.withTolerations(getTolerations())
.endSpec()
.endTemplate()
.withVolumeClaimTemplates(volumeClaims)
.endSpec()
.build();
return statefulSet;
}
protected Deployment createDeployment(
DeploymentStrategy updateStrategy,
Map<String, String> deploymentAnnotations,
Map<String, String> podAnnotations,
Affinity affinity,
List<Container> initContainers,
List<Container> containers,
List<Volume> volumes) {
Deployment dep = new DeploymentBuilder()
.withNewMetadata()
.withName(name)
.withLabels(getLabelsWithName(templateDeploymentLabels))
.withNamespace(namespace)
.withAnnotations(mergeAnnotations(deploymentAnnotations, templateDeploymentAnnotations))
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withStrategy(updateStrategy)
.withReplicas(replicas)
.withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels()).build())
.withNewTemplate()
.withNewMetadata()
.withLabels(getLabelsWithName(templatePodLabels))
.withAnnotations(mergeAnnotations(podAnnotations, templatePodAnnotations))
.endMetadata()
.withNewSpec()
.withAffinity(affinity)
.withServiceAccountName(getServiceAccountName())
.withInitContainers(initContainers)
.withContainers(containers)
.withVolumes(volumes)
.withTolerations(getTolerations())
.endSpec()
.endTemplate()
.endSpec()
.build();
return dep;
}
/**
* Build an environment variable instance with the provided name and value
*
* @param name The name of the environment variable
* @param value The value of the environment variable
* @return The environment variable instance
*/
protected static EnvVar buildEnvVar(String name, String value) {
return new EnvVarBuilder().withName(name).withValue(value).build();
}
/**
* Build an environment variable instance with the provided name from a field reference
* using Downward API
*
* @param name The name of the environment variable
* @param field The field path from which getting the value
* @return The environment variable instance
*/
protected static EnvVar buildEnvVarFromFieldRef(String name, String field) {
EnvVarSource envVarSource = new EnvVarSourceBuilder()
.withNewFieldRef()
.withFieldPath(field)
.endFieldRef()
.build();
return new EnvVarBuilder().withName(name).withValueFrom(envVarSource).build();
}
/**
* Gets the given container's environment.
*/
public static Map<String, String> containerEnvVars(Container container) {
return container.getEnv().stream().collect(
Collectors.toMap(EnvVar::getName, EnvVar::getValue,
// On duplicates, last in wins
(u, v) -> v));
}
public void setResources(Resources resources) {
this.resources = resources;
}
public Resources getResources() {
return resources;
}
public void setJvmOptions(JvmOptions jvmOptions) {
this.jvmOptions = jvmOptions;
}
/**
* Adds KAFKA_HEAP_OPTS variable to the EnvVar list if any heap related options were specified.
*
* @param envVars List of Environment Variables
*/
protected void heapOptions(List<EnvVar> envVars, double dynamicHeapFraction, long dynamicHeapMaxBytes) {
StringBuilder kafkaHeapOpts = new StringBuilder();
String xms = jvmOptions != null ? jvmOptions.getXms() : null;
if (xms != null) {
kafkaHeapOpts.append("-Xms").append(xms);
}
String xmx = jvmOptions != null ? jvmOptions.getXmx() : null;
if (xmx != null) {
// Honour explicit max heap
kafkaHeapOpts.append(' ').append("-Xmx").append(xmx);
} else {
Resources resources = getResources();
CpuMemory cpuMemory = resources == null ? null : resources.getLimits();
// Delegate to the container to figure out only when CGroup memory limits are defined to prevent allocating
// too much memory on the kubelet.
if (cpuMemory != null && cpuMemory.getMemory() != null) {
envVars.add(buildEnvVar(ENV_VAR_DYNAMIC_HEAP_FRACTION, Double.toString(dynamicHeapFraction)));
if (dynamicHeapMaxBytes > 0) {
envVars.add(buildEnvVar(ENV_VAR_DYNAMIC_HEAP_MAX, Long.toString(dynamicHeapMaxBytes)));
}
// When no memory limit, `Xms`, and `Xmx` are defined then set a default `Xms` and
// leave `Xmx` undefined.
} else if (xms == null) {
kafkaHeapOpts.append("-Xms").append(DEFAULT_JVM_XMS);
}
}
String trim = kafkaHeapOpts.toString().trim();
if (!trim.isEmpty()) {
envVars.add(buildEnvVar(ENV_VAR_KAFKA_HEAP_OPTS, trim));
}
}
/**
* Adds KAFKA_JVM_PERFORMANCE_OPTS variable to the EnvVar list if any performance related options were specified.
*
* @param envVars List of Environment Variables
*/
protected void jvmPerformanceOptions(List<EnvVar> envVars) {
StringBuilder jvmPerformanceOpts = new StringBuilder();
Boolean server = jvmOptions != null ? jvmOptions.isServer() : null;
if (server != null && server) {
jvmPerformanceOpts.append("-server");
}
Map<String, String> xx = jvmOptions != null ? jvmOptions.getXx() : null;
if (xx != null) {
xx.forEach((k, v) -> {
jvmPerformanceOpts.append(' ').append("-XX:");
if ("true".equalsIgnoreCase(v)) {
jvmPerformanceOpts.append("+").append(k);
} else if ("false".equalsIgnoreCase(v)) {
jvmPerformanceOpts.append("-").append(k);
} else {
jvmPerformanceOpts.append(k).append("=").append(v);
}
});
}
String trim = jvmPerformanceOpts.toString().trim();
if (!trim.isEmpty()) {
envVars.add(buildEnvVar(ENV_VAR_KAFKA_JVM_PERFORMANCE_OPTS, trim));
}
}
/**
* Generate the OwnerReference object to link newly created objects to their parent (the custom resource)
*
* @return
*/
protected OwnerReference createOwnerReference() {
return new OwnerReferenceBuilder()
.withApiVersion(ownerApiVersion)
.withKind(ownerKind)
.withName(cluster)
.withUid(ownerUid)
.withBlockOwnerDeletion(false)
.withController(false)
.build();
}
/**
* Set fields needed to generate the OwnerReference object
*
* @param parent The resource which should be used as parent. It will be used to gather the date needed for generating OwnerReferences.
*/
protected void setOwnerReference(HasMetadata parent) {
this.ownerApiVersion = parent.getApiVersion();
this.ownerKind = parent.getKind();
this.ownerUid = parent.getMetadata().getUid();
}
public static boolean deleteClaim(StatefulSet ss) {
if (!ss.getSpec().getVolumeClaimTemplates().isEmpty()) {
return Annotations.booleanAnnotation(ss, ANNO_STRIMZI_IO_DELETE_CLAIM,
false, ANNO_CO_STRIMZI_IO_DELETE_CLAIM);
} else {
return false;
}
}
/**
* Generated a Map with Prometheus annotations
*
* @return Map with Prometheus annotations using the default port (9404) and path (/metrics)
*/
protected Map<String, String> getPrometheusAnnotations() {
Map<String, String> annotations = new HashMap<String, String>(3);
annotations.put("prometheus.io/port", String.valueOf(METRICS_PORT));
annotations.put("prometheus.io/scrape", "true");
annotations.put("prometheus.io/path", "/metrics");
return annotations;
}
String getAncillaryConfigMapKeyLogConfig() {
return ANCILLARY_CM_KEY_LOG_CONFIG;
}
public static String clusterCaCertSecretName(String cluster) {
return KafkaResources.clusterCaCertificateSecretName(cluster);
}
public static String clusterCaKeySecretName(String cluster) {
return KafkaResources.clusterCaKeySecretName(cluster);
}
protected static Map<String, String> mergeAnnotations(Map<String, String> internal, Map<String, String> template) {
Map<String, String> merged = new HashMap<>();
if (internal != null) {
merged.putAll(internal);
}
if (template != null) {
for (String key : template.keySet()) {
if (key.contains("strimzi.io")) {
throw new IllegalArgumentException("User annotations includes a Strimzi annotation: " + key);
}
}
merged.putAll(template);
}
return merged;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
ytvi_test.go | package ytvi
import (
"os"
"testing"
)
func TestGetVideoInfo(t *testing.T) {
_, err := GetVideoInfo(os.Getenv("VIDEO_ID"))
if err != nil {
t.Fatal(err)
}
}
| [
"\"VIDEO_ID\""
]
| []
| [
"VIDEO_ID"
]
| [] | ["VIDEO_ID"] | go | 1 | 0 | |
tools/test.py | import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
from thirdparty.mtransformer import build_mtransformer
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
if hasattr(cfg, "quant_transformer"):
model_transformer = build_mtransformer(cfg.quant_transformer)
model = model_transformer(model, logger= None)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| []
| []
| [
"LOCAL_RANK"
]
| [] | ["LOCAL_RANK"] | python | 1 | 0 | |
test/kind/image.go | // Copyright Jetstack Ltd. See LICENSE for details.
package kind
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
)
const (
ProxyImageName = "kube-oidc-proxy-e2e"
IssuerImageName = "oidc-issuer-e2e"
FakeAPIServerImageName = "fake-apiserver-e2e"
AuditWebhookImageName = "audit-webhook-e2e"
)
func (k *Kind) LoadAllImages() error {
if err := k.LoadKubeOIDCProxy(); err != nil {
return err
}
if err := k.LoadIssuer(); err != nil {
return err
}
if err := k.LoadFakeAPIServer(); err != nil {
return err
}
if err := k.LoadAuditWebhook(); err != nil {
return err
}
return nil
}
func (k *Kind) LoadKubeOIDCProxy() error {
binPath := filepath.Join(k.rootPath, "./bin/kube-oidc-proxy")
mainPath := filepath.Join(k.rootPath, "./cmd/.")
return k.loadImage(binPath, mainPath, ProxyImageName, k.rootPath)
}
func (k *Kind) LoadIssuer() error {
binPath := filepath.Join(k.rootPath, "./test/tools/issuer/bin/oidc-issuer-linux")
dockerfilePath := filepath.Join(k.rootPath, "./test/tools/issuer")
mainPath := filepath.Join(dockerfilePath, "cmd")
return k.loadImage(binPath, mainPath, IssuerImageName, dockerfilePath)
}
func (k *Kind) LoadFakeAPIServer() error {
binPath := filepath.Join(k.rootPath, "./test/tools/fake-apiserver/bin/fake-apiserver-linux")
dockerfilePath := filepath.Join(k.rootPath, "./test/tools/fake-apiserver")
mainPath := filepath.Join(dockerfilePath, "cmd")
return k.loadImage(binPath, mainPath, FakeAPIServerImageName, dockerfilePath)
}
func (k *Kind) LoadAuditWebhook() error {
binPath := filepath.Join(k.rootPath, "./test/tools/audit-webhook/bin/audit-webhook")
dockerfilePath := filepath.Join(k.rootPath, "./test/tools/audit-webhook")
mainPath := filepath.Join(dockerfilePath, "cmd")
return k.loadImage(binPath, mainPath, AuditWebhookImageName, dockerfilePath)
}
func (k *Kind) loadImage(binPath, mainPath, image, dockerfilePath string) error {
log.Infof("kind: building %q", mainPath)
if err := os.MkdirAll(filepath.Dir(binPath), 0755); err != nil {
return err
}
err := k.runCmd("go", "build", "-v", "-o", binPath, mainPath)
if err != nil {
return err
}
err = k.runCmd("docker", "build", "-t", image, dockerfilePath)
if err != nil {
return err
}
tmpDir, err := ioutil.TempDir(os.TempDir(), "kube-oidc-proxy-e2e")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
imageArchive := filepath.Join(tmpDir, fmt.Sprintf("%s-e2e.tar", image))
log.Infof("kind: saving image to archive %q", imageArchive)
err = k.runCmd("docker", "save", "--output="+imageArchive, image)
if err != nil {
return err
}
nodes, err := k.Nodes()
if err != nil {
return err
}
b, err := ioutil.ReadFile(imageArchive)
if err != nil {
return err
}
for _, node := range nodes {
log.Infof("kind: loading image %q to node %q", image, node.String())
r := bytes.NewBuffer(b)
if err := nodeutils.LoadImageArchive(node, r); err != nil {
return err
}
err := node.Command("mkdir", "-p", "/tmp/kube-oidc-proxy").Run()
if err != nil {
return fmt.Errorf("failed to create directory %q: %s",
"/tmp/kube-oidc-proxy", err)
}
}
return nil
}
func (k *Kind) runCmd(command string, args ...string) error {
return k.runCmdWithOut(os.Stdout, command, args...)
}
func (k *Kind) runCmdWithOut(w io.Writer, command string, args ...string) error {
log.Infof("kind: running command '%s %s'", command, strings.Join(args, " "))
cmd := exec.Command(command, args...)
cmd.Stderr = os.Stderr
cmd.Stdout = w
cmd.Env = append(cmd.Env,
"GO111MODULE=on", "CGO_ENABLED=0", "HOME="+os.Getenv("HOME"),
"PATH="+os.Getenv("PATH"),
"GOARCH=amd64", "GOOS=linux")
if err := cmd.Start(); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
return nil
}
| [
"\"HOME\"",
"\"PATH\""
]
| []
| [
"HOME",
"PATH"
]
| [] | ["HOME", "PATH"] | go | 2 | 0 | |
engine/etcdv2ng/etcd_test.go | package etcdv2ng
import (
"os"
"strings"
"testing"
etcd "github.com/coreos/etcd/client"
"github.com/vulcand/vulcand/engine/test"
"github.com/vulcand/vulcand/plugin/registry"
"github.com/vulcand/vulcand/secret"
"golang.org/x/net/context"
. "gopkg.in/check.v1"
)
func TestEtcd(t *testing.T) { TestingT(t) }
type EtcdSuite struct {
ng *ng
suite test.EngineSuite
nodes []string
etcdPrefix string
consistency string
client etcd.Client
kapi etcd.KeysAPI
context context.Context
changesC chan interface{}
key string
stopC chan bool
}
var _ = Suite(&EtcdSuite{
etcdPrefix: "/vulcandtest",
consistency: "STRONG",
})
func (s *EtcdSuite) SetUpSuite(c *C) {
key, err := secret.NewKeyString()
if err != nil {
panic(err)
}
s.key = key
nodes_string := os.Getenv("VULCAND_TEST_ETCD_NODES")
if nodes_string == "" {
// Skips the entire suite
c.Skip("This test requires etcd, provide comma separated nodes in VULCAND_TEST_ETCD_NODES environment variable")
return
}
s.nodes = strings.Split(nodes_string, ",")
}
func (s *EtcdSuite) SetUpTest(c *C) {
// Initiate a backend with a registry
key, err := secret.KeyFromString(s.key)
c.Assert(err, IsNil)
box, err := secret.NewBox(key)
c.Assert(err, IsNil)
engine, err := New(
s.nodes,
s.etcdPrefix,
registry.GetRegistry(),
Options{
EtcdConsistency: s.consistency,
Box: box,
})
c.Assert(err, IsNil)
s.ng = engine.(*ng)
s.client = s.ng.client
s.kapi = s.ng.kapi
// Delete all values under the given prefix
_, err = s.kapi.Get(s.context, s.etcdPrefix, &etcd.GetOptions{Recursive: false, Sort: false})
if err != nil {
// There's no key like this
if !notFound(err) {
// We haven't expected this error, oops
c.Assert(err, IsNil)
}
} else {
_, err = s.ng.kapi.Delete(s.context, s.etcdPrefix, &etcd.DeleteOptions{Recursive: true})
c.Assert(err, IsNil)
}
s.changesC = make(chan interface{})
s.stopC = make(chan bool)
go s.ng.Subscribe(s.changesC, s.stopC)
s.suite.ChangesC = s.changesC
s.suite.Engine = engine
}
func (s *EtcdSuite) TearDownTest(c *C) {
close(s.stopC)
s.ng.Close()
}
func (s *EtcdSuite) TestEmptyParams(c *C) {
s.suite.EmptyParams(c)
}
func (s *EtcdSuite) TestHostCRUD(c *C) {
s.suite.HostCRUD(c)
}
func (s *EtcdSuite) TestHostWithKeyPair(c *C) {
s.suite.HostWithKeyPair(c)
}
func (s *EtcdSuite) TestHostUpsertKeyPair(c *C) {
s.suite.HostUpsertKeyPair(c)
}
func (s *EtcdSuite) TestHostWithOCSP(c *C) {
s.suite.HostWithOCSP(c)
}
func (s *EtcdSuite) TestListenerCRUD(c *C) {
s.suite.ListenerCRUD(c)
}
func (s *EtcdSuite) TestListenerSettingsCRUD(c *C) {
s.suite.ListenerSettingsCRUD(c)
}
func (s *EtcdSuite) TestBackendCRUD(c *C) {
s.suite.BackendCRUD(c)
}
func (s *EtcdSuite) TestBackendDeleteUsed(c *C) {
s.suite.BackendDeleteUsed(c)
}
func (s *EtcdSuite) TestBackendDeleteUnused(c *C) {
s.suite.BackendDeleteUnused(c)
}
func (s *EtcdSuite) TestServerCRUD(c *C) {
s.suite.ServerCRUD(c)
}
func (s *EtcdSuite) TestServerExpire(c *C) {
s.suite.ServerExpire(c)
}
func (s *EtcdSuite) TestFrontendCRUD(c *C) {
s.suite.FrontendCRUD(c)
}
func (s *EtcdSuite) TestFrontendExpire(c *C) {
s.suite.FrontendExpire(c)
}
func (s *EtcdSuite) TestFrontendBadBackend(c *C) {
s.suite.FrontendBadBackend(c)
}
func (s *EtcdSuite) TestMiddlewareCRUD(c *C) {
s.suite.MiddlewareCRUD(c)
}
func (s *EtcdSuite) TestMiddlewareExpire(c *C) {
s.suite.MiddlewareExpire(c)
}
func (s *EtcdSuite) TestMiddlewareBadFrontend(c *C) {
s.suite.MiddlewareBadFrontend(c)
}
func (s *EtcdSuite) TestMiddlewareBadType(c *C) {
s.suite.MiddlewareBadType(c)
}
| [
"\"VULCAND_TEST_ETCD_NODES\""
]
| []
| [
"VULCAND_TEST_ETCD_NODES"
]
| [] | ["VULCAND_TEST_ETCD_NODES"] | go | 1 | 0 | |
vendor/github.com/lib/pq/ssl_test.go | package pq
// This file contains SSL tests
import (
_ "crypto/sha256"
"crypto/x509"
"database/sql"
"fmt"
"os"
"path/filepath"
"testing"
)
func maybeSkipSSLTests(t *testing.T) {
// Require some special variables for testing certificates
if os.Getenv("PQSSLCERTTEST_PATH") == "" {
t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests")
}
value := os.Getenv("PQGOSSLTESTS")
if value == "" || value == "0" {
t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests")
} else if value != "1" {
t.Fatalf("unexpected value %q for PQGOSSLTESTS", value)
}
}
func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) {
db, err := openTestConnConninfo(conninfo)
if err != nil {
// should never fail
t.Fatal(err)
}
// Do something with the connection to see whether it's working or not.
tx, err := db.Begin()
if err == nil {
return db, tx.Rollback()
}
_ = db.Close()
return nil, err
}
func checkSSLSetup(t *testing.T, conninfo string) {
db, err := openSSLConn(t, conninfo)
if err == nil {
db.Close()
t.Fatalf("expected error with conninfo=%q", conninfo)
}
}
// Connect over SSL and run a simple query to test the basics
func TestSSLConnection(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
db, err := openSSLConn(t, "sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
rows, err := db.Query("SELECT 1")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
// Test sslmode=verify-full
func TestSSLVerifyFull(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Not OK according to the system CA
_, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest")
if err == nil {
t.Fatal("expected error")
}
_, ok := err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
}
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
rootCert := "sslrootcert=" + rootCertPath + " "
// No match on Common Name
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest")
if err == nil {
t.Fatal("expected error")
}
_, ok = err.(x509.HostnameError)
if !ok {
t.Fatalf("expected x509.HostnameError, got %#+v", err)
}
// OK
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest")
if err != nil {
t.Fatal(err)
}
}
// Test sslmode=require sslrootcert=rootCertPath
func TestSSLRequireWithRootCert(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
bogusRootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "bogus_root.crt")
bogusRootCert := "sslrootcert=" + bogusRootCertPath + " "
// Not OK according to the bogus CA
_, err := openSSLConn(t, bogusRootCert+"host=postgres sslmode=require user=pqgossltest")
if err == nil {
t.Fatal("expected error")
}
_, ok := err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("expected x509.UnknownAuthorityError, got %s, %#+v", err, err)
}
nonExistentCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "non_existent.crt")
nonExistentCert := "sslrootcert=" + nonExistentCertPath + " "
// No match on Common Name, but that's OK because we're not validating anything.
_, err = openSSLConn(t, nonExistentCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
rootCert := "sslrootcert=" + rootCertPath + " "
// No match on Common Name, but that's OK because we're not validating the CN.
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
// Everything OK
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
}
// Test sslmode=verify-ca
func TestSSLVerifyCA(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Not OK according to the system CA
_, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
if err == nil {
t.Fatal("expected error")
}
_, ok := err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
}
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
rootCert := "sslrootcert=" + rootCertPath + " "
// No match on Common Name, but that's OK
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest")
if err != nil {
t.Fatal(err)
}
// Everything OK
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest")
if err != nil {
t.Fatal(err)
}
}
func getCertConninfo(t *testing.T, source string) string {
var sslkey string
var sslcert string
certpath := os.Getenv("PQSSLCERTTEST_PATH")
switch source {
case "missingkey":
sslkey = "/tmp/filedoesnotexist"
sslcert = filepath.Join(certpath, "postgresql.crt")
case "missingcert":
sslkey = filepath.Join(certpath, "postgresql.key")
sslcert = "/tmp/filedoesnotexist"
case "certtwice":
sslkey = filepath.Join(certpath, "postgresql.crt")
sslcert = filepath.Join(certpath, "postgresql.crt")
case "valid":
sslkey = filepath.Join(certpath, "postgresql.key")
sslcert = filepath.Join(certpath, "postgresql.crt")
default:
t.Fatalf("invalid source %q", source)
}
return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert)
}
// Authenticate over SSL using client certificates
func TestSSLClientCertificates(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Should also fail without a valid certificate
db, err := openSSLConn(t, "sslmode=require user=pqgosslcert")
if err == nil {
db.Close()
t.Fatal("expected error")
}
pge, ok := err.(*Error)
if !ok {
t.Fatal("expected pq.Error")
}
if pge.Code.Name() != "invalid_authorization_specification" {
t.Fatalf("unexpected error code %q", pge.Code.Name())
}
// Should work
db, err = openSSLConn(t, getCertConninfo(t, "valid"))
if err != nil {
t.Fatal(err)
}
rows, err := db.Query("SELECT 1")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
// Test errors with ssl certificates
func TestSSLClientCertificatesMissingFiles(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Key missing, should fail
_, err := openSSLConn(t, getCertConninfo(t, "missingkey"))
if err == nil {
t.Fatal("expected error")
}
// should be a PathError
_, ok := err.(*os.PathError)
if !ok {
t.Fatalf("expected PathError, got %#+v", err)
}
// Cert missing, should fail
_, err = openSSLConn(t, getCertConninfo(t, "missingcert"))
if err == nil {
t.Fatal("expected error")
}
// should be a PathError
_, ok = err.(*os.PathError)
if !ok {
t.Fatalf("expected PathError, got %#+v", err)
}
// Key has wrong permissions, should fail
_, err = openSSLConn(t, getCertConninfo(t, "certtwice"))
if err == nil {
t.Fatal("expected error")
}
if err != ErrSSLKeyHasWorldPermissions {
t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err)
}
}
| [
"\"PQSSLCERTTEST_PATH\"",
"\"PQGOSSLTESTS\"",
"\"PQSSLCERTTEST_PATH\"",
"\"PQSSLCERTTEST_PATH\"",
"\"PQSSLCERTTEST_PATH\"",
"\"PQSSLCERTTEST_PATH\"",
"\"PQSSLCERTTEST_PATH\"",
"\"PQSSLCERTTEST_PATH\""
]
| []
| [
"PQSSLCERTTEST_PATH",
"PQGOSSLTESTS"
]
| [] | ["PQSSLCERTTEST_PATH", "PQGOSSLTESTS"] | go | 2 | 0 | |
command/message.py | import os
import logging
import aprslib
from . import MessageTypes
from .command import Command
logger = logging.getLogger(__name__)
class CommandMessage(Command):
"""
AA5RObot command to send an APRS message.
"""
def __init__(self):
self.command = "message"
self.syntax = "message <callsign> <message>"
self.help = "Send an APRS message to the callsign."
# check if APRS is configured
APRS_CALLSIGN = os.environ.get('APRS_CALLSIGN')
APRS_PASSWORD = os.environ.get('APRS_PASSWORD')
if not APRS_CALLSIGN:
logger.warning('APRS message sending not enabled. APRS_CALLSIGN must be set in environment.')
raise RuntimeError('APRS_CALLSIGN must be set in environment.')
if not APRS_PASSWORD:
logger.warning('APRS message sending not enabled. APRS_PASSWORD must be set in environment.')
raise RuntimeError('APRS_PASSWORD must be set in environment.')
self.APRS_CALLSIGN = APRS_CALLSIGN
self.APRS_PASSWORD = APRS_PASSWORD
# configure aprslib
self.ais = aprslib.IS(self.APRS_CALLSIGN, passwd=self.APRS_PASSWORD, port=14580)
self.ais.connect()
# instance variable to track message IDs
self.message_id = 1
def shutdown(self):
logger.info('Shutting down APRS-IS connection.')
self.ais.close()
def do_command(self, data):
"""
Sends an APRS message to the given callsign.
"""
split_data = data.split()
try:
split_data = data.split()
ssid = split_data[1].upper()
except IndexError:
return (MessageTypes.RTM_MESSAGE, "Sorry, I need an SSID to send a message.\nType this command as '{}'.".format(self.syntax))
try:
message = ' '.join(split_data[2:])
except IndexError:
return (MessageTypes.RTM_MESSAGE, "Sorry, I need a message to send to {}.\nType this command as '{}'.".format(ssid, self.syntax))
if message == '':
return (MessageTypes.RTM_MESSAGE, "Sorry, I need a message to send to {}.\nType this command as '{}'.".format(ssid, self.syntax))
# check that message length is less than 67 characters (APRS max message length)
if len(message) > 67:
return (MessageTypes.RTM_MESSAGE, "Sorry that message is too long to send via APRS.")
# create APRS-IS packet
aprs_packet = "{}>{},TCPIP::{:<9}:{}{{{}".format(self.APRS_CALLSIGN, self.APRS_CALLSIGN, ssid, message, self.message_id)
# send packet to APRS-IS
logger.info("Sending APRS packet: {}".format(aprs_packet))
self.ais.sendall(aprs_packet)
self.message_id += 1
return (MessageTypes.RTM_MESSAGE, "Sent!")
| []
| []
| [
"APRS_CALLSIGN",
"APRS_PASSWORD"
]
| [] | ["APRS_CALLSIGN", "APRS_PASSWORD"] | python | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.