filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
vendor/github.com/tokuhirom/go-hsperfdata/hsperfdata/repository.go
|
package hsperfdata
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
)
type Repository struct {
dir string
}
func New() (*Repository, error) {
var user string
if runtime.GOOS == "windows" {
user = os.Getenv("USERNAME")
} else {
user = os.Getenv("USER")
}
if user == "" {
return nil, fmt.Errorf("error: Environment variable USER not set")
}
return NewUser(user)
}
func NewUser(userName string) (*Repository, error) {
dir := filepath.Join(os.TempDir(), "hsperfdata_"+userName)
return &Repository{dir}, nil
}
func NewDir(dirName string) (*Repository, error) {
dir := dirName
return &Repository{dir}, nil
}
func (repository *Repository) GetFile(pid string) File {
return File{filepath.Join(repository.dir, pid)}
}
func (repository *Repository) GetFiles() ([]File, error) {
files, err := ioutil.ReadDir(repository.dir)
if err != nil {
return nil, err
}
retval := make([]File, len(files))
for i, f := range files {
retval[i] = File{filepath.Join(repository.dir, f.Name())}
}
return retval, nil
}
|
[
"\"USERNAME\"",
"\"USER\""
] |
[] |
[
"USER",
"USERNAME"
] |
[]
|
["USER", "USERNAME"]
|
go
| 2 | 0 | |
test/test_fasta_stats.py
|
#!/usr/bin/env python3
"""Test the fasta_stats script is behaving"""
# Note this will get discovered and run as a test. This is fine.
import sys, os, re
import unittest
import logging
DATA_DIR = os.path.abspath(os.path.dirname(__file__) + '/sample_fasta')
VERBOSE = os.environ.get('VERBOSE', '0') != '0'
from fasta_stats import read_fasta, fasta_to_histo, histo_to_result, fastaline
class T(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Prevent the logger from printing messages - I like my tests to look pretty.
if VERBOSE:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.CRITICAL)
def load_fasta(self, filename):
with open(os.path.join(DATA_DIR, filename + '.fasta')) as fh:
return list(read_fasta(fh))
def load_histo(self, filename):
with open(os.path.join(DATA_DIR, filename + '.fasta')) as fh:
return fasta_to_histo(read_fasta(fh))
### THE TESTS ###
def test_empty(self):
"""Test loading the empty file.
"""
self.assertEqual(self.load_fasta('empty'), [])
self.assertEqual(self.load_histo('empty'), [])
self.assertEqual(dict(histo_to_result([])),
{ '_headings': [ 'Max read length',
'Reads',
'Total bases',
'N50',
'GC %',
'Mean length'],
'Max read length': -1,
'Reads' : 0,
'Total bases': 0,
'N50': -1,
'GC %': 0.0,
'Mean length': 0.0 } )
self.assertEqual(dict(histo_to_result([], headings=False)),
{ 'Max read length': -1,
'Reads' : 0,
'Total bases': 0,
'N50': -1,
'GC %': 0.0,
'Mean length': 0.0 } )
def test_simplestats(self):
"""Test on the foo3.fasta sample file
"""
res3 = histo_to_result(self.load_histo('foo3'), cutoffs=[0, 6])
self.assertEqual( res3['Reads'], 3 )
self.assertEqual( res3['Total bases'], 20 )
self.assertEqual( res3['Reads >=6'], 1 )
self.assertEqual( res3['Total bases for reads >=6'], 10 )
def test_n50(self):
"""Look at some N50 values for simple files.
"""
# Single sequence
res1 = histo_to_result(self.load_histo('foo1'), cutoffs=[0,1,2])
self.assertEqual( res1['N50'], 1 )
self.assertEqual( res1['N50 for reads >=1'], 1 )
self.assertEqual( res1['N50 for reads >=2'], 1 )
# Two sequences of length 8 and 9
res2 = histo_to_result(self.load_histo('foo2'), cutoffs=[0,9])
self.assertEqual( res2['N50'], 9 )
self.assertEqual( res2['N50 for reads >=9'], 9 )
# Three sequences of lengths 5, 5 and 10
res3 = histo_to_result(self.load_histo('foo3'), cutoffs=[0, 6])
self.assertEqual( res3['N50'], 10 ) # I think??
self.assertEqual( res3['N50 for reads >=6'], 10 )
# Or indeed 5, 5, 9 or 5, 5, 11
res4 = histo_to_result(self.load_histo('foo4'), cutoffs=[0,5,6])
self.assertEqual( res4['N50'], 5 )
self.assertEqual( res4['N50 for reads >=5'], 5 )
self.assertEqual( res4['N50 for reads >=6'], 9 )
self.assertEqual( histo_to_result(self.load_histo('foo5'))['N50'], 11 )
def test_10000(self):
"""Look at the file which is a subsample of some real PacBio data.
"""
res = histo_to_result(self.load_histo('m54041_180926_125231.subreads+sub10000'))
# These numbers come from the old Perl script:
# Max subread length,Num subreads >=1,Total bases in subreads >=1,N50 for subreads >=1,GC subreads >=1,Mean length for subreads >=1
# 10765, 10000, 10989199, 1306, 50.1, 1098.9
self.assertEqual( res['Max read length'], 10765 )
self.assertEqual( res['Reads'], 10000 )
self.assertEqual( res['Total bases'], 10989199 )
self.assertEqual( res['N50'], 1306 )
self.assertEqual( "{:.1f}".format(res['GC %']), "50.1" )
self.assertEqual( "{:.1f}".format(res['Mean length']), "1098.9" )
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"VERBOSE"
] |
[]
|
["VERBOSE"]
|
python
| 1 | 0 | |
python/uw/like2/pipeline/check_converge.py
|
"""
Run after a successful UWpipeline/job_task
Summarize the execution,
mostly zipping the files generated by the multiple jobs and submitting follow-up streams
diagnostic plots are now done by the summary task
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/pipeline/check_converge.py,v 1.41 2018/01/27 15:38:26 burnett Exp $
"""
import os, sys, glob, zipfile, logging, datetime, argparse, subprocess
import numpy as np
import pandas as pd
from uw.like2 import (tools, maps, seeds,)
from uw.like2.pipeline import (pipe, stream, stagedict, check_ts, )
from uw.utilities import healpix_map
def streamInfo(stream_id ,path='.'):
si = stream.SubStreamStats(stream_id, path)
return si.times
def main(args):
"""
"""
def fixpath(s):
if os.path.exists(s): return s
r= s.replace('/a/wain025/g.glast.u55','/afs/slac/g/glast/groups')
assert os.path.exists(r), 'paths do not exist: %s or %s' % (s,r)
return r
pointlike_dir=fixpath(args.pointlike_dir) # = os.environ.get('POINTLIKE_DIR', '.')
skymodel =fixpath(args.skymodel) # = os.environ.get('SKYMODEL_SUBDIR', sys.argv[1] )
stream_id = args.stream
stagelist = args.stage
if hasattr(stagelist, '__iter__'): stagelist=stagelist[0] #handle local or from uwpipeline.py
#piperaise Exception('killing this sequence!')
absskymodel = os.path.join(pointlike_dir, skymodel)
def make_zip(fname, ext='pickle', select=None):
"""fname : string
name of a folder containing files to be zipped
"""
if os.path.exists(fname+'zip') and \
os.path.getmtime(fname) < os.path.getmtime(fname+'.zip'):
print ('Not updating %s' % fname+'.zip')
return
if select is not None:
ff = glob.glob(os.path.join(absskymodel, select))
else:
ff = glob.glob(os.path.join(absskymodel, fname, '*.'+ext))
if len(ff)==0:
print ('no files found to zip in folder %s' %fname)
return
if len(ff)!=1728 and fname=='pickle' and not stagelist.startswith('addseeds'):
raise Exception('Stage {}: Found {} pickle files, expected 1728'.format(stagelist, len(ff)))
print ('found %d *.%s in folder %s ...' % ( len(ff),ext, fname,) ,)
with zipfile.ZipFile(os.path.join(absskymodel, fname+'.zip'), 'w') as z:
for filename in ff:
z.write( filename, os.path.join(fname,os.path.split(filename)[-1]))
print (' zipped into file %s.zip' %fname)
def create_stream(newstage, job_list=None):
if job_list is None:
job_list = stagedict.stagenames[newstage].get('job_list', 'job_list')
print ('Starting stage {} with job_list {}'.format( newstage, job_list))
ps = stream.PipelineStream()
ps(newstage, job_list=job_list, test=False)
next = args.next
print ('next: {}'.format(next))
if not args.test:
tee = tools.OutputTee(os.path.join(absskymodel, 'summary_log.txt'))
if os.path.exists(str(stream_id)):
print ('Abort since detected file with stream name')
raise Exception('Abort since detected file with stream name')
if stream_id!=-1:
streamInfo(stream_id, absskymodel)
os.chdir(absskymodel) # useful for diagnostics below
current = str(datetime.datetime.today())[:16]
print ('\n%s stage %s stream %s model %s ' % (current, stagelist, stream_id, absskymodel))
if os.path.exists('failed_rois.txt'):
failed = sorted(map(int, open('failed_rois.txt').read().split()))
print ('failed rois %s' % failed)
raise Exception('failed rois %s' % failed)
t = stagelist.split(':',1)
if len(t)==2:
stage, nextstage = t
else: stage,nextstage = t[0], None
ss = stage.split('_')
stage = ss[0]
stage_args = ss[1:] if len(ss)>1 else ['none']
next_stage = stagedict.stagenames[stage].get('next', None)
# always update the pickle for the ROIs, if changed
make_zip('pickle')
if stage=='update' or stage=='betafix':
logto = open(os.path.join(absskymodel,'converge.txt'), 'a')
qq=pipe.check_converge(absskymodel, tol=12, log=logto)
r = pipe.roirec(absskymodel)
q = pipe.check_converge(absskymodel, tol=12 , add_neighbors=False)
open('update_roi_list.txt', 'w').write('\n'.join(map(str, sorted(qq))))
if stage_args[0]!='only' and stage_args[0]!='associations':
if len(q)>1:
if len(qq)> 200:
create_stream('update')
else:
create_stream('update', job_list='$SKYMODEL_SUBDIR/update_roi_list.txt')
else:
model_name = os.getcwd().split('/')[-1]
create_stream('finish' if model_name.find('month')<0 else 'finish_month')
elif stage=='sedinfo':
make_zip('sedinfo')
make_zip('sedfig','png')
elif stage=='create' or stage=='create_reloc':
# always do one more stream after initial
if nextstage is None:
create_stream('update_full') # always start an update
elif stage=='diffuse':
make_zip('galfit_plots', 'png')
make_zip('galfits_all')
elif stage=='isodiffuse':
make_zip('isofit_plots', 'png')
make_zip('isofits')
elif stage=='limb':
make_zip('limb')
#elif stage=='finish' or stage=='counts':
elif stage=='tables':
if len(stage_args)>0:
names = 'ts kde'.split() if stage_args==['none'] else stage_args
if 'ts' in names:
if not os.path.exists('hptables_ts_kde_512.fits'):
healpix_map.assemble_tables(names)
modelname = os.getcwd().split('/')[-1]
check_ts.pipe_make_seeds( modelname, 'seeds_ts.txt')
if next_stage is not None:
create_stream(next_stage)
elif 'hard' in names:
healpix_map.assemble_tables('hard')
modelname = os.getcwd().split('/')[-1]
check_ts.pipe_make_seeds( modelname, 'seeds_hard.txt')
if next_stage is not None:
create_stream(next_stage)
elif 'mspsens' in names:
healpix_map.assemble_tables(names, nside=256) # assume nside
elif 'all' in names:
tsmin=16
print ('Performing analysis of tables_all, with tsmin={}'.format(tsmin))
mm = maps.MultiMap()
print (mm.summary())
mm.write_fits()
seeds.create_seedfiles(mm, seed_folder='seeds', tsmin=tsmin)
else:
raise Exception( 'Unexpected table name: {}'.format(names))
elif stage=="sourcefinding":
nside=256
maps.nside=nside
tsmin=12
print ('Performing analysis of tables_all, with tsmin={}'.format(tsmin))
mm = maps.MultiMap(nside=nside)
print (mm.summary())
mm.write_fits()
seeds.create_seedfiles(mm, seed_folder='seeds', tsmin=tsmin, nside=nside)
elif stage=='ptables':
names = ['tsp']
healpix_map.assemble_tables(names)
elif stage=='pulsar':
healpix_map.assemble_tables(['pts'])
elif stage=='seedcheck':
key = stage_args[0]
print ('Processing seedcheck with key %s' % key)
make_zip('seedcheck' , select=None if key=='none' else 'seedcheck_%s/*' %key)
if key=='pgw': # processing a month
seedcheck.SeedCheck().all_plots();
create_stream('update_seeds')
elif stage=='sourcefinding':
raise Exception('place holder -- will call code for inclusive template ts maps')
else: # catch fluxcorr, any others like
# also perhaps start another stage
if os.path.exists(stage):
make_zip(stage)
if next_stage is not None:
create_stream(next_stage)
else:
print ('stage %s not recognized for summary'%stage )
if not args.test:
if nextstage:
create_stream(nextstage)
tee.close()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Run after a set of pipeline jobs, check status, accumulate info')
parser.add_argument('--stage', default=os.environ.get('stage', '?'), help='the stage indentifier(s)')
parser.add_argument('--pointlike_dir', default=os.environ.get('POINTLIKE_DIR', '.'),
help='top level folder with pointlike')
parser.add_argument('--skymodel', default= os.environ.get('SKYMODEL_SUBDIR', '.'),
help='folder, from pointlike_dir, to the skymodel. Default $SKYMODEL_SUBDIR, set by pipeline')
parser.add_argument('--stream', default = os.environ.get('PIPELINE_STREAM', '0'),
help='stream number')
parser.add_argument('--test', action='store_false', help='test mode') ######################
args = parser.parse_args()
main(args)
|
[] |
[] |
[
"POINTLIKE_DIR",
"stage",
"PIPELINE_STREAM",
"SKYMODEL_SUBDIR"
] |
[]
|
["POINTLIKE_DIR", "stage", "PIPELINE_STREAM", "SKYMODEL_SUBDIR"]
|
python
| 4 | 0 | |
var/spack/repos/builtin/packages/tau/package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import fnmatch
import glob
import platform
import sys
from llnl.util.filesystem import join_path
class Tau(Package):
"""A portable profiling and tracing toolkit for performance
analysis of parallel programs written in Fortran, C, C++, UPC,
Java, Python.
"""
homepage = "http://www.cs.uoregon.edu/research/tau"
url = "https://www.cs.uoregon.edu/research/tau/tau_releases/tau-2.28.1.tar.gz"
git = "https://github.com/UO-OACISS/tau2"
version('develop', branch='master')
version('2.29', sha256='146be769a23c869a7935e8fa5ba79f40ba36b9057a96dda3be6730fc9ca86086')
version('2.28.2', sha256='64e129a482056755012b91dae2fb4f728dbf3adbab53d49187eca952891c5457')
version('2.28.1', sha256='b262e5c9977471e9f5a8d729b3db743012df9b0ab8244da2842039f8a3b98b34')
version('2.28', sha256='68c6f13ae748d12c921456e494006796ca2b0efebdeef76ee7c898c81592883e')
version('2.27.2p1', sha256='3256771fb71c2b05932b44d0650e6eadc712f1bdedf4c0fb2781db3b266225dd')
version('2.27.2', sha256='d319a4588ad303b71082254f4f40aa76f6183a01b5bc4bd987f76e1a6026efa1')
version('2.27.1', sha256='315babab4da25dd08633ad8dbf33d93db77f57d240bcbd3527ed5b8710cb9d8f')
version('2.27', sha256='d48fdca49cda2d9f25a0cf5dbd961201c8a2b1f025bcbb121d96ad43f211f1a7')
version('2.26.3', sha256='bd785ed47f20e6b8b2a1d99ce383d292f70b1fb9e2eaab21f5eaf8e64b28e990')
version('2.26.2', sha256='92ca68db51fd5bd026187e70b397bcd1db9bfb07008d7e8bc935411a97978834')
version('2.26.1', sha256='d084ff87e5f9fe640a3fc48aa5c8c52f586e7b739787f2bb9a4249005e459896')
version('2.26', sha256='458228646a13a228841d4133f24af14cc182f4978eb15ef6244d71735abe8d16')
version('2.25', sha256='ab8a8c15a075af69aa23b4790b4e2d9dffc3b880fc1ff806c21535ab69b6a088')
version('2.24.1', sha256='bc27052c36377e4b8fc0bbb4afaa57eaa8bcb3f5e5066e576b0f40d341c28a0e')
version('2.24', sha256='5d28e8b26561c7cd7d0029b56ec0f95fc26803ac0b100c98e00af0b02e7f55e2')
version('2.23.1', sha256='31a4d0019cec6ef57459a9cd18a220f0130838a5f1a0b5ea7879853f5a38cf88')
# Disable some default dependencies on Darwin/OSX
darwin_default = False
if sys.platform != 'darwin':
darwin_default = True
variant('scorep', default=False, description='Activates SCOREP support')
variant('openmp', default=False, description='Use OpenMP threads')
variant('pthreads', default=True, description='Use POSIX threads')
variant('mpi', default=False, description='Specify use of TAU MPI wrapper library')
variant('phase', default=False, description='Generate phase based profiles')
variant('papi', default=darwin_default, description='Activates Performance API')
variant('binutils', default=True, description='Activates support of BFD GNU Binutils')
variant('libdwarf', default=darwin_default, description='Activates support of libdwarf')
variant('libelf', default=darwin_default, description='Activates support of libelf')
variant('libunwind', default=darwin_default, description='Activates support of libunwind')
variant('otf2', default=True, description='Activates support of Open Trace Format (OTF)')
variant('pdt', default=True, description='Use PDT for source code instrumentation')
variant('comm', default=False, description=' Generate profiles with MPI communicator info')
variant('python', default=False, description='Activates Python support')
variant('likwid', default=False, description='Activates LIKWID support')
variant('ompt', default=False, description='Activates OMPT instrumentation')
variant('opari', default=False, description='Activates Opari2 instrumentation')
variant('shmem', default=False, description='Activates SHMEM support')
variant('gasnet', default=False, description='Activates GASNET support')
variant('cuda', default=False, description='Activates CUDA support')
variant('fortran', default=darwin_default, description='Activates Fortran support')
variant('io', default=True, description='Activates POSIX I/O support')
variant('adios2', default=False, description='Activates ADIOS2 output support')
variant('sqlite', default=False, description='Activates SQLite3 output support')
# Support cross compiling.
# This is a _reasonable_ subset of the full set of TAU
# architectures supported:
variant('craycnl', default=False, description='Build for Cray compute nodes')
variant('bgq', default=False, description='Build for IBM BlueGene/Q compute nodes')
variant('ppc64le', default=False, description='Build for IBM Power LE nodes')
variant('x86_64', default=False, description='Force build for x86 Linux instead of auto-detect')
depends_on('zlib', type='link')
depends_on('pdt', when='+pdt') # Required for TAU instrumentation
depends_on('scorep', when='+scorep')
depends_on('[email protected]:', when='+otf2')
depends_on('likwid', when='+likwid')
depends_on('papi', when='+papi')
depends_on('libdwarf', when='+libdwarf')
depends_on('libelf', when='+libdwarf')
# TAU requires the ELF header support, libiberty and demangle.
depends_on('binutils@:2.33.1+libiberty+headers~nls', when='+binutils')
depends_on('[email protected]:', when='+python')
depends_on('libunwind', when='+libunwind')
depends_on('mpi', when='+mpi', type=('build', 'run', 'link'))
depends_on('cuda', when='+cuda')
depends_on('gasnet', when='+gasnet')
depends_on('adios2', when='+adios2')
depends_on('sqlite', when='+sqlite')
depends_on('hwloc')
# Elf only required from 2.28.1 on
conflicts('+libelf', when='@:2.28.0')
conflicts('+libdwarf', when='@:2.28.0')
# ADIOS2, SQLite only available from 2.29.1 on
conflicts('+adios2', when='@:2.29.1')
conflicts('+sqlite', when='@:2.29.1')
def set_compiler_options(self, spec):
useropt = ["-O2 -g", self.rpath_args]
##########
# Selecting a compiler with TAU configure is quite tricky:
# 1 - compilers are mapped to a given set of strings
# (and spack cc, cxx, etc. wrappers are not among them)
# 2 - absolute paths are not allowed
# 3 - the usual environment variables seems not to be checked
# ('CC', 'CXX' and 'FC')
# 4 - if no -cc=<compiler> -cxx=<compiler> is passed tau is built with
# system compiler silently
# (regardless of what %<compiler> is used in the spec)
#
# In the following we give TAU what he expects and put compilers into
# PATH
compiler_path = os.path.dirname(self.compiler.cc)
os.environ['PATH'] = ':'.join([compiler_path, os.environ['PATH']])
compiler_options = ['-c++=%s' % os.path.basename(self.compiler.cxx),
'-cc=%s' % os.path.basename(self.compiler.cc)]
if '+fortran' in spec and self.compiler.fc:
compiler_options.append('-fortran=%s' % self.compiler.fc_names[0])
##########
# Construct the string of custom compiler flags and append it to
# compiler related options
useropt = ' '.join(useropt)
useropt = "-useropt=%s" % useropt
compiler_options.append(useropt)
return compiler_options
def setup_build_environment(self, env):
env.prepend_path('LIBRARY_PATH', self.spec['zlib'].prefix.lib)
def install(self, spec, prefix):
# TAU isn't happy with directories that have '@' in the path. Sigh.
change_sed_delimiter('@', ';', 'configure')
change_sed_delimiter('@', ';', 'utils/FixMakefile')
change_sed_delimiter('@', ';', 'utils/FixMakefile.sed.default')
# TAU configure, despite the name , seems to be a manually
# written script (nothing related to autotools). As such it has
# a few #peculiarities# that make this build quite hackish.
options = ["-prefix=%s" % prefix]
if '+craycnl' in spec:
options.append('-arch=craycnl')
if '+bgq' in spec:
options.append('-arch=bgq')
if '+ppc64le' in spec:
options.append('-arch=ibm64linux')
if '+x86_64' in spec:
options.append('-arch=x86_64')
if ('platform=cray' in self.spec) and ('+x86_64' not in spec):
options.append('-arch=craycnl')
if '+pdt' in spec:
options.append("-pdt=%s" % spec['pdt'].prefix)
if '+scorep' in spec:
options.append("-scorep=%s" % spec['scorep'].prefix)
if '+pthreads' in spec:
options.append('-pthread')
if '+likwid' in spec:
options.append("-likwid=%s" % spec['likwid'].prefix)
if '+papi' in spec:
options.append("-papi=%s" % spec['papi'].prefix)
if '+openmp' in spec:
options.append('-openmp')
if '+opari' in spec:
options.append('-opari')
if '+ompt' in spec:
options.append('-ompt')
if '+io' in spec:
options.append('-iowrapper')
if '+binutils' in spec:
options.append("-bfd=%s" % spec['binutils'].prefix)
if '+libdwarf' in spec:
options.append("-dwarf=%s" % spec['libdwarf'].prefix)
if '+libelf' in spec:
options.append("-elf=%s" % spec['libelf'].prefix)
if '+libunwind' in spec:
options.append("-unwind=%s" % spec['libunwind'].prefix)
if '+otf2' in spec:
options.append("-otf=%s" % spec['otf2'].prefix)
if '+mpi' in spec:
env['CC'] = spec['mpi'].mpicc
env['CXX'] = spec['mpi'].mpicxx
env['F77'] = spec['mpi'].mpif77
env['FC'] = spec['mpi'].mpifc
options.append('-mpi')
if '+comm' in spec:
options.append('-PROFILECOMMUNICATORS')
if '+shmem' in spec:
options.append('-shmem')
if '+gasnet' in spec:
options.append('-gasnet=%s' % spec['gasnet'].prefix)
if '+cuda' in spec:
options.append("-cuda=%s" % spec['cuda'].prefix)
if '+adios2' in spec:
options.append("-adios=%s" % spec['adios2'].prefix)
if '+sqlite' in spec:
options.append("-sqlite3=%s" % spec['sqlite'].prefix)
if '+phase' in spec:
options.append('-PROFILEPHASE')
if '+python' in spec:
options.append('-python')
# find Python.h (i.e. include/python2.7/Python.h)
include_path = spec['python'].prefix.include
found = False
for root, dirs, files in os.walk(spec['python'].prefix.include):
for filename in fnmatch.filter(files, 'Python.h'):
include_path = root
break
found = True
if found:
break
options.append("-pythoninc=%s" % include_path)
# find libpython*.* (i.e. lib/python2.7/libpython2.7.so)
lib_path = spec['python'].prefix.lib
found = False
file_to_find = 'libpython*.so'
if (platform.system() == "Darwin"):
file_to_find = 'libpython*.dylib'
for root, dirs, files in os.walk(spec['python'].prefix.lib):
for filename in fnmatch.filter(files, file_to_find):
lib_path = root
break
found = True
if found:
break
options.append("-pythonlib=%s" % lib_path)
compiler_specific_options = self.set_compiler_options(spec)
options.extend(compiler_specific_options)
configure(*options)
make("install")
# Link arch-specific directories into prefix since there is
# only one arch per prefix the way spack installs.
self.link_tau_arch_dirs()
# TAU may capture Spack's internal compiler wrapper. Replace
# it with the correct compiler.
self.fix_tau_compilers()
def link_tau_arch_dirs(self):
for subdir in os.listdir(self.prefix):
for d in ('bin', 'lib'):
src = join_path(self.prefix, subdir, d)
dest = join_path(self.prefix, d)
if os.path.isdir(src) and not os.path.exists(dest):
os.symlink(join_path(subdir, d), dest)
def fix_tau_compilers(self):
filter_file('FULL_CC=' + spack_cc, 'FULL_CC=' + self.compiler.cc,
self.prefix + '/include/Makefile', backup=False,
string=True)
filter_file('FULL_CXX=' + spack_cxx, 'FULL_CXX=' +
self.compiler.cxx, self.prefix + '/include/Makefile',
backup=False, string=True)
for makefile in os.listdir(self.prefix.lib):
if makefile.startswith('Makefile.tau'):
filter_file('FULL_CC=' + spack_cc, 'FULL_CC=' +
self.compiler.cc, self.prefix.lib + "/" +
makefile, backup=False, string=True)
filter_file('FULL_CXX=' + spack_cxx, 'FULL_CXX=' +
self.compiler.cxx, self.prefix.lib +
"/" + makefile, backup=False, string=True)
def setup_run_environment(self, env):
pattern = join_path(self.prefix.lib, 'Makefile.*')
files = glob.glob(pattern)
# This function is called both at install time to set up
# the build environment and after install to generate the associated
# module file. In the former case there is no `self.prefix.lib`
# directory to inspect. The conditional below will set `TAU_MAKEFILE`
# in the latter case.
if files:
env.set('TAU_MAKEFILE', files[0])
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
examples/id/generate/generateASnowflakeId.go
|
package example
import (
"fmt"
"os"
"go.m3o.com/id"
)
// Generate a unique ID. Defaults to uuid.
func GenerateAsnowflakeId() {
idService := id.NewIdService(os.Getenv("M3O_API_TOKEN"))
rsp, err := idService.Generate(&id.GenerateRequest{
Type: "snowflake",
})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# cloneinsta directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'cloneinsta'))
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/fubanna/celery.py
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fubanna.settings')
App = Celery('fubanna')
App.config_from_object('django.conf:settings', namespace='CELERY')
App.autodiscover_tasks()
@App.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
run_explorer.py
|
import argparse
import logging
import os
from pathlib import Path
# TODO: Change to using kivy.config?
os.environ["KIVY_NO_ARGS"] = '1'
#os.environ['KIVY_NO_CONSOLELOG'] = '0'
from tonguetwister.gui.director_cast_explorer import DirectorCastExplorer
from tonguetwister.lib.logger import setup_logger
def main(base_dir, filename):
setup_logger(logging.DEBUG)
gui = DirectorCastExplorer(base_dir, filename)
gui.run()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--base-dir', type=str, default=str(Path.home()),
help='The initial directory shown in the load file dialog')
parser.add_argument('--filepath', type=str, default=None,
help='A director data filename to load on startup, e.g. link/to/folder/data.cst')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args.base_dir, args.filepath)
|
[] |
[] |
[
"KIVY_NO_ARGS",
"KIVY_NO_CONSOLELOG"
] |
[]
|
["KIVY_NO_ARGS", "KIVY_NO_CONSOLELOG"]
|
python
| 2 | 0 | |
vace_wpe.py
|
"""
author: Joon-Young Yang (E-mail: [email protected])
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_custom.torch_utils import shape, to_gpu, to_arr
from torch_custom.custom_layers import CustomModel, update_dict
from torch_custom.stft_helper import StftHelper
import torch_custom.spectral_ops as spo
from torch_custom.spectral_ops import MelFeatureExtractor
from torch_custom.wpe_th_utils import wpe_mb_torch_ri
# class VACEWPE(nn.Module):
class VACEWPE(CustomModel):
def __init__(self, stft_opts, lpsnet=None, vacenet=None, mfcc_opts={}):
super(VACEWPE, self).__init__()
assert lpsnet is not None and isinstance(lpsnet, nn.Module)
assert vacenet is not None and isinstance(vacenet, nn.Module)
assert len(stft_opts) >= 5
self.stft_helper = StftHelper(**stft_opts)
self.lpsnet = lpsnet
self.vacenet = vacenet
self.weights = vacenet.weights
self.weights_list = vacenet.weights_list
self.weights_name = vacenet.weights_name
## Loss functions for training this model
self.loss_mse = nn.MSELoss(reduction='mean')
self.loss_mae = nn.L1Loss(reduction='mean')
## MFCC loss
if len(mfcc_opts):
self.melfeatext = MelFeatureExtractor(**mfcc_opts)
def train(self):
self.vacenet.train()
def eval(self):
self.vacenet.eval()
self.lpsnet.eval()
# def to(self, device):
# self.vacenet.to(device)
# self.lpsnet.to(device)
@staticmethod
def parse_batch(data, target, device):
data, target = to_gpu(data, device), to_gpu(target, device)
return data, target
def forward(self, sig_x, delay=3, taps=10, drop=0.0):
""" sig_x is batched single-channel time-domain waveforms
shape: (B, T) == (batch, time)
"""
## Convert the time-domain signal to the STFT coefficients
nb, nt = sig_x.size() # (B,t)
stft_x = self.stft_helper.stft(sig_x) # (B,F,T,2)
## Compute early PSD using the LPSNet
lps_x = spo.stft2lps(stft_x) # (B,F,T)
psd_x = self.lpsnet(lps_x, drop=drop).exp() # (B,F,T)
## Compute virtual signal using the VACENet
stft_v = self.vacenet(stft_x, drop=drop) # (B,F,T,2)
## Stack the pair of actual and virtual signals
stft_xv = torch.stack((stft_x, stft_v), dim=1) # (B,C=2,F,T,2)
## Batch-mode WPE
## >> STFT and PSD must be in shape (B,C,F,T,2) and (B,F,T), respectively.
nfreq, nfrm = psd_x.size(1), psd_x.size(2)
stft_wpe = wpe_mb_torch_ri(
stft_xv, psd_x, taps=taps, delay=delay) # (B,C=2,F,T,2)
## Inverse STFT
stft_wpe_x, stft_wpe_v = stft_wpe[:,0], stft_wpe[:,1] # (B,F,T,2)
sig_wpe_x = self.stft_helper.istft(stft_wpe_x, length=nt) # (B,t)
return sig_wpe_x, stft_wpe_x, lps_x, stft_v
def dereverb(self, sig_x, delay=3, taps=10):
sig_wpe_x = self.forward(sig_x, delay, taps)[0]
return to_arr(sig_wpe_x).squeeze()
return self.forward(sig_x, delay, taps)[0]
def get_loss(self, sig_x, sig_early, delay, taps,
alpha, beta, gamma, drop=0.0, summarize=False):
""" Both "sig_x" and "sig_early" are batched time-domain waveforms """
sig_wpe_x, stft_wpe_x, lps_x, stft_v = \
self.forward(sig_x, delay, taps, drop=drop) # (B,t)
# stft_wpe_x = self.stft_helper.stft(sig_wpe_x) # (B,F,T,2)
stft_early = self.stft_helper.stft(sig_early) # (B,F,T,2)
lms_wpe_x = spo.stft2lms(stft_wpe_x) # (B,F,T)
lms_early = spo.stft2lms(stft_early) # (B,F,T)
mse_stft_r_wpe = self.loss_mse(stft_wpe_x[..., 0], stft_early[..., 0])
mse_stft_i_wpe = self.loss_mse(stft_wpe_x[..., 1], stft_early[..., 1])
mse_lms_wpe = self.loss_mse(lms_wpe_x, lms_early)
mae_wav_wpe = self.loss_mae(sig_wpe_x, sig_early)
raw_loss = alpha*(mse_stft_r_wpe+mse_stft_i_wpe) \
+ beta*mse_lms_wpe + gamma*mae_wav_wpe
if not summarize:
return raw_loss
else:
loss_dict = {
"raw_loss":raw_loss.item(),
"mse_stft_r_wpe":mse_stft_r_wpe.item(),
"mse_stft_i_wpe":mse_stft_i_wpe.item(),
"mse_lms_wpe":mse_lms_wpe.item(),
"mae_wav_wpe":mae_wav_wpe.item(),
}
return raw_loss, loss_dict, (
0.5*lps_x[-1], # lms_x
spo.stft2lms(stft_v[-1]), # lms_v
lms_wpe_x[-1], # lms_wpe_x
sig_x[-1], # sig_x
self.stft_helper.istft(stft_v[-1], length=sig_x.size(1)), # sig_v
sig_wpe_x[-1]) # sig_wpe_x
def get_loss_mfcc(self, sig_x, sig_early, delay, taps,
alpha, beta, gamma, delta=0.0, power_scale=True,
drop=0.0, summarize=False):
""" Both "sig_x" and "sig_early" are batched time-domain waveforms """
sig_wpe_x, stft_wpe_x, lps_x, stft_v = \
self.forward(sig_x, delay, taps, drop=drop) # (B,t)
# stft_wpe_x = self.stft_helper.stft(sig_wpe_x) # (B,F,T,2)
stft_early = self.stft_helper.stft(sig_early) # (B,F,T,2)
lms_wpe_x = spo.stft2lms(stft_wpe_x) # (B,F,T)
lms_early = spo.stft2lms(stft_early) # (B,F,T)
mfcc_wpe_x = self.melfeatext.mfcc(stft_wpe_x, power_scale=power_scale)
mfcc_early = self.melfeatext.mfcc(stft_early, power_scale=power_scale)
mse_stft_r_wpe = self.loss_mse(stft_wpe_x[..., 0], stft_early[..., 0])
mse_stft_i_wpe = self.loss_mse(stft_wpe_x[..., 1], stft_early[..., 1])
mse_lms_wpe = self.loss_mse(lms_wpe_x, lms_early)
mae_wav_wpe = self.loss_mae(sig_wpe_x, sig_early)
mae_mfcc_wpe = self.loss_mae(mfcc_wpe_x, mfcc_early)
raw_loss = alpha*(mse_stft_r_wpe+mse_stft_i_wpe) \
+ beta*mse_lms_wpe + gamma*mae_wav_wpe + delta*mae_mfcc_wpe
if not summarize:
return raw_loss
else:
loss_dict = {
"raw_loss":raw_loss.item(),
"mse_stft_r_wpe":mse_stft_r_wpe.item(),
"mse_stft_i_wpe":mse_stft_i_wpe.item(),
"mse_lms_wpe":mse_lms_wpe.item(),
"mae_wav_wpe":mae_wav_wpe.item(),
"mae_mfcc_wpe":mae_mfcc_wpe.item(),
}
return raw_loss, loss_dict, (
0.5*lps_x[-1], # lms_x
spo.stft2lms(stft_v[-1]), # lms_v
lms_wpe_x[-1], # lms_wpe_x
sig_x[-1], # sig_x
self.stft_helper.istft(stft_v[-1], length=sig_x.size(1)), # sig_v
sig_wpe_x[-1]) # sig_wpe_x
if __name__=="__main__":
import os
# os.environ["CUDA_VISIBLE_DEVICES"]="0"
from gcunet4c_4M4390 import VACENet
from bldnn_4M62 import LstmDnnNet as LPSEstimator
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print('device = {}'.format(device))
# device = "cpu"
## STFT options
stft_opts_torch = dict(
n_fft=1024, hop_length=256, win_length=1024, win_type='hanning',
symmetric=True)
fft_bins = stft_opts_torch['n_fft']//2 + 1
mfcc_opts_torch = dict(
fs=16000, nfft=1024, lowfreq=20., maxfreq=7600.,
nlinfilt=0, nlogfilt=40, nceps=40,
lifter_type='sinusoidal', lift=-22.0) # only useful during fine-tuning
## Input tensors
sig_x = torch.randn(6, 44800).to(device) # assume a mini-batch of waveforms
sig_early = torch.randn(6, 44800).to(device)
## VACEMet
vacenet = VACENet(
input_dim=fft_bins, stft_opts=stft_opts_torch, scope='vace_unet')
vacenet.to(device)
print('VACENet size', vacenet.size)
print(len(tuple(vacenet.trainable_parameters())))
print(len(tuple(vacenet.parameters())))
## LPSNet
fft_bins = stft_opts_torch['n_fft']//2 + 1
lpsnet = LPSEstimator(
input_dim=fft_bins, stft_opts=stft_opts_torch, scope='ldnn_lpseir_ns')
lpsnet.to(device)
print('LPSNet size', lpsnet.size)
print(len(tuple(lpsnet.trainable_parameters())))
print(len(tuple(lpsnet.parameters()))); #exit()
## Freeze the LPSNet
# lpsnet.check_trainable_parameters()
lpsnet.freeze()
# lpsnet.check_trainable_parameters()
# lpsnet.unfreeze()
# lpsnet.check_trainable_parameters()
lpsnet.eval()
## VACE-WPE
vace_wpe = VACEWPE(
stft_opts=stft_opts_torch,
lpsnet=lpsnet, vacenet=vacenet,
mfcc_opts=mfcc_opts_torch)
vace_wpe.to(device)
print('VACE-WPE size', vace_wpe.size)
vace_wpe.check_trainable_parameters()
print(len(tuple(vace_wpe.trainable_parameters())))
print(len(tuple(vace_wpe.parameters())))
for name in vace_wpe.weights_name: print(name)
kernel_l2_norm, kernel_l2_norm_dict = vace_wpe.weights_l2_norm()
print('weight_l2_norm = %.5f' % kernel_l2_norm.item())
## Compute loss
loss_scales = dict(alpha=1.0, beta=0.1, gamma=5.0, delta=0.2, power_scale=True)
loss = vace_wpe.get_loss_mfcc(
sig_x, sig_early, delay=3, taps=5,
**loss_scales ,drop=0.0, summarize=False)
## L2-regularization
total_loss = loss + (1e-5)*kernel_l2_norm
## Compute gradients
total_loss.backward()
print('Succeeded!!')
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
vendor/github.com/containers/buildah/pkg/cli/common.go
|
package cli
// the cli package contains urfave/cli related structs that help make up
// the command line for buildah commands. it resides here so other projects
// that vendor in this code can use them too.
import (
"fmt"
"os"
"runtime"
"strings"
"github.com/containers/buildah"
"github.com/containers/buildah/pkg/completion"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/pkg/auth"
commonComp "github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// LayerResults represents the results of the layer flags
type LayerResults struct {
ForceRm bool
Layers bool
}
// UserNSResults represents the results for the UserNS flags
type UserNSResults struct {
UserNS string
UserNSUIDMap []string
UserNSGIDMap []string
UserNSUIDMapUser string
UserNSGIDMapGroup string
}
// NameSpaceResults represents the results for Namespace flags
type NameSpaceResults struct {
IPC string
Network string
CNIConfigDir string
CNIPlugInPath string
PID string
UTS string
}
// BudResults represents the results for Bud flags
type BudResults struct {
Annotation []string
Arch string
Authfile string
BuildArg []string
CacheFrom string
CertDir string
Compress bool
Creds string
DisableCompression bool
DisableContentTrust bool
File []string
Format string
Iidfile string
Label []string
Logfile string
Loglevel int
NoCache bool
Timestamp int64
OS string
Platform string
Pull bool
PullAlways bool
PullNever bool
Quiet bool
Rm bool
Runtime string
RuntimeFlags []string
SignaturePolicy string
SignBy string
Squash bool
Tag []string
Target string
TLSVerify bool
Jobs int
LogRusage bool
}
// FromAndBugResults represents the results for common flags
// in bud and from
type FromAndBudResults struct {
AddHost []string
BlobCache string
CapAdd []string
CapDrop []string
CgroupParent string
CPUPeriod uint64
CPUQuota int64
CPUSetCPUs string
CPUSetMems string
CPUShares uint64
DecryptionKeys []string
Devices []string
DNSSearch []string
DNSServers []string
DNSOptions []string
HTTPProxy bool
Isolation string
Memory string
MemorySwap string
OverrideArch string
OverrideOS string
SecurityOpt []string
ShmSize string
Ulimit []string
Volumes []string
}
// GetUserNSFlags returns the common flags for usernamespace
func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
usernsFlags := pflag.FlagSet{}
usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerUID:hostUID:length` UID mapping to use in user namespace")
usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerGID:hostGID:length` GID mapping to use in user namespace")
usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
return usernsFlags
}
// GetUserNSFlagsCompletions returns the FlagCompletions for the userns flags
func GetUserNSFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["userns"] = completion.AutocompleteNamespaceFlag
flagCompletion["userns-uid-map"] = commonComp.AutocompleteNone
flagCompletion["userns-gid-map"] = commonComp.AutocompleteNone
flagCompletion["userns-uid-map-user"] = commonComp.AutocompleteSubuidName
flagCompletion["userns-gid-map-group"] = commonComp.AutocompleteSubgidName
return flagCompletion
}
// GetNameSpaceFlags returns the common flags for a namespace menu
func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'")
fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'")
fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", util.DefaultCNIConfigDir, "`directory` of CNI configuration files")
fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", util.DefaultCNIPluginPath, "`path` of CNI network plugins")
fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'")
fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'")
return fs
}
// GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags
func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion["cni-config-dir"] = commonComp.AutocompleteDefault
flagCompletion["cni-plugin-path"] = commonComp.AutocompleteDefault
flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag
return flagCompletion
}
// GetLayerFlags returns the common flags for layers
func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.")
fs.BoolVar(&flags.Layers, "layers", UseLayers(), fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override."))
return fs
}
// Note: GetLayerFlagsCompletion is not needed since GetLayerFlags only contains bool flags
// GetBudFlags returns common bud flags
func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.Arch, "arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
fs.StringVar(&flags.Authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file.")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image")
fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP")
fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
if err := fs.MarkHidden("log-rusage"); err != nil {
panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
}
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.OS, "os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
fs.StringVar(&flags.Platform, "platform", parse.DefaultPlatform(), "set the OS/ARCH to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)")
fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present")
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available")
fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build")
// "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/bud.go.
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
if err := fs.MarkHidden("signature-policy"); err != nil {
panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err))
}
fs.BoolVar(&flags.Squash, "squash", false, "squash newly built layers into a single new layer")
fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
return fs
}
// GetBudFlagsCompletions returns the FlagCompletions for the common bud flags
func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["arch"] = commonComp.AutocompleteNone
flagCompletion["annotation"] = commonComp.AutocompleteNone
flagCompletion["authfile"] = commonComp.AutocompleteDefault
flagCompletion["build-arg"] = commonComp.AutocompleteNone
flagCompletion["cache-from"] = commonComp.AutocompleteNone
flagCompletion["cert-dir"] = commonComp.AutocompleteDefault
flagCompletion["creds"] = commonComp.AutocompleteNone
flagCompletion["file"] = commonComp.AutocompleteDefault
flagCompletion["format"] = commonComp.AutocompleteNone
flagCompletion["iidfile"] = commonComp.AutocompleteDefault
flagCompletion["jobs"] = commonComp.AutocompleteNone
flagCompletion["label"] = commonComp.AutocompleteNone
flagCompletion["logfile"] = commonComp.AutocompleteDefault
flagCompletion["loglevel"] = commonComp.AutocompleteDefault
flagCompletion["os"] = commonComp.AutocompleteNone
flagCompletion["platform"] = commonComp.AutocompleteNone
flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
flagCompletion["sign-by"] = commonComp.AutocompleteNone
flagCompletion["signature-policy"] = commonComp.AutocompleteNone
flagCompletion["tag"] = commonComp.AutocompleteNone
flagCompletion["target"] = commonComp.AutocompleteNone
flagCompletion["timestamp"] = commonComp.AutocompleteNone
return flagCompletion
}
// GetFromAndBudFlags returns from and bud flags
func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) {
fs := pflag.FlagSet{}
defaultContainerConfig, err := config.Default()
if err != nil {
return fs, errors.Wrapf(err, "failed to get container config")
}
fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
if err := fs.MarkHidden("blob-cache"); err != nil {
panic(fmt.Sprintf("error marking net flag as hidden: %v", err))
}
fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "Additional devices to be used within containers (default [])")
fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "Set custom DNS search domains")
fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions, "Set custom DNS options")
fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables")
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
fs.StringVar(&flags.OverrideOS, "override-os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images")
if err := fs.MarkHidden("override-os"); err != nil {
panic(fmt.Sprintf("error marking override-os as hidden: %v", err))
}
fs.StringVar(&flags.OverrideArch, "override-arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine when pulling images")
if err := fs.MarkHidden("override-arch"); err != nil {
panic(fmt.Sprintf("error marking override-arch as hidden: %v", err))
}
fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
fs.StringVar(&flags.ShmSize, "shm-size", defaultContainerConfig.Containers.ShmSize, "size of '/dev/shm'. The format is `<number><unit>`.")
fs.StringSliceVar(&flags.Ulimit, "ulimit", defaultContainerConfig.Containers.DefaultUlimits, "ulimit options")
fs.StringArrayVarP(&flags.Volumes, "volume", "v", defaultContainerConfig.Containers.Volumes, "bind mount a volume into the container")
// Add in the usernamespace and namespaceflags
usernsFlags := GetUserNSFlags(usernsResults)
namespaceFlags := GetNameSpaceFlags(namespaceResults)
fs.AddFlagSet(&usernsFlags)
fs.AddFlagSet(&namespaceFlags)
return fs, nil
}
// GetFromAndBudFlagsCompletions returns the FlagCompletions for the from and bud flags
func GetFromAndBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["add-host"] = commonComp.AutocompleteNone
flagCompletion["blob-cache"] = commonComp.AutocompleteNone
flagCompletion["cap-add"] = commonComp.AutocompleteCapabilities
flagCompletion["cap-drop"] = commonComp.AutocompleteCapabilities
flagCompletion["cgroup-parent"] = commonComp.AutocompleteDefault // FIXME: This would be a path right?!
flagCompletion["cpu-period"] = commonComp.AutocompleteNone
flagCompletion["cpu-quota"] = commonComp.AutocompleteNone
flagCompletion["cpu-shares"] = commonComp.AutocompleteNone
flagCompletion["cpuset-cpus"] = commonComp.AutocompleteNone
flagCompletion["cpuset-mems"] = commonComp.AutocompleteNone
flagCompletion["decryption-key"] = commonComp.AutocompleteNone
flagCompletion["device"] = commonComp.AutocompleteDefault
flagCompletion["dns-search"] = commonComp.AutocompleteNone
flagCompletion["dns"] = commonComp.AutocompleteNone
flagCompletion["dns-option"] = commonComp.AutocompleteNone
flagCompletion["isolation"] = commonComp.AutocompleteNone
flagCompletion["memory"] = commonComp.AutocompleteNone
flagCompletion["memory-swap"] = commonComp.AutocompleteNone
flagCompletion["security-opt"] = commonComp.AutocompleteNone
flagCompletion["shm-size"] = commonComp.AutocompleteNone
flagCompletion["ulimit"] = commonComp.AutocompleteNone
flagCompletion["volume"] = commonComp.AutocompleteDefault
// Add in the usernamespace and namespace flag completions
userNsComp := GetUserNSFlagsCompletions()
for name, comp := range userNsComp {
flagCompletion[name] = comp
}
namespaceComp := GetNameSpaceFlagsCompletions()
for name, comp := range namespaceComp {
flagCompletion[name] = comp
}
return flagCompletion
}
// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
// otherwise it returns false
func UseLayers() bool {
layers := os.Getenv("BUILDAH_LAYERS")
if strings.ToLower(layers) == "true" || layers == "1" {
return true
}
return false
}
// DefaultFormat returns the default image format
func DefaultFormat() string {
format := os.Getenv("BUILDAH_FORMAT")
if format != "" {
return format
}
return buildah.OCI
}
// DefaultIsolation returns the default image format
func DefaultIsolation() string {
isolation := os.Getenv("BUILDAH_ISOLATION")
if isolation != "" {
return isolation
}
return buildah.OCI
}
// DefaultHistory returns the default add-history setting
func DefaultHistory() bool {
history := os.Getenv("BUILDAH_HISTORY")
if strings.ToLower(history) == "true" || history == "1" {
return true
}
return false
}
func VerifyFlagsArgsOrder(args []string) error {
for _, arg := range args {
if strings.HasPrefix(arg, "-") {
return errors.Errorf("No options (%s) can be specified after the image or container name", arg)
}
}
return nil
}
// aliasFlags is a function to handle backwards compatibility with old flags
func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "net":
name = "network"
}
return pflag.NormalizedName(name)
}
|
[
"\"BUILDAH_LAYERS\"",
"\"BUILDAH_FORMAT\"",
"\"BUILDAH_ISOLATION\"",
"\"BUILDAH_HISTORY\""
] |
[] |
[
"BUILDAH_ISOLATION",
"BUILDAH_HISTORY",
"BUILDAH_FORMAT",
"BUILDAH_LAYERS"
] |
[]
|
["BUILDAH_ISOLATION", "BUILDAH_HISTORY", "BUILDAH_FORMAT", "BUILDAH_LAYERS"]
|
go
| 4 | 0 | |
mezzanine/project_template/fabfile.py
|
import os
import re
import sys
from contextlib import contextmanager
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from importlib import import_module
from posixpath import join
from mezzanine.utils.conf import real_project_name
from fabric.api import (
abort,
env,
cd,
prefix,
sudo as _sudo,
run as _run,
hide,
task,
local,
)
from fabric.context_managers import settings as fab_settings
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.contrib.project import rsync_project
from fabric.colors import yellow, green, blue, red
from fabric.decorators import hosts
################
# Config setup #
################
if not hasattr(env, "proj_app"):
env.proj_app = real_project_name("{{ project_name }}")
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = import_module("%s.settings" % env.proj_app).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", env.proj_app)
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s/.virtualenvs" % env.user)
env.venv_path = join(env.venv_home, env.proj_name)
env.proj_path = "/home/%s/mezzanine/%s" % (env.user, env.proj_name)
env.manage = "%s/bin/python %s/manage.py" % (env.venv_path, env.proj_path)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_regex = "|".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.vcs_tools = ["git", "hg"]
env.deploy_tool = conf.get("DEPLOY_TOOL", "rsync")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.num_workers = conf.get("NUM_WORKERS", "multiprocessing.cpu_count() * 2 + 1")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
if (
not env.secret_key
and os.environ.get("DJANGO_SETTINGS_MODULE", "") != "docs_settings"
):
print("Aborting, no SECRET_KEY setting defined.")
exit()
# Remote git repos need to be "bare" and reside separated from the project
if env.deploy_tool == "git":
env.repo_path = "/home/%s/git/%s.git" % (env.user, env.proj_name)
else:
env.repo_path = env.proj_path
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf.template",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "nginx -t && service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf.template",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl update gunicorn_%(proj_name)s",
},
"cron": {
"local_path": "deploy/crontab.template",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py.template",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/local_settings.py.template",
"remote_path": "%(proj_path)s/%(proj_app)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_path):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) + yellow(command, bold=True) + red(" ->", bold=True))
@task
def run(command, show=True, *args, **kwargs):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command, *args, **kwargs)
@task
def sudo(command, show=True, *args, **kwargs):
"""
Runs a command as sudo on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command, *args, **kwargs)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload the
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def rsync_upload():
"""
Uploads the project with rsync excluding some files and folders.
"""
excludes = [
"*.pyc",
"*.pyo",
"*.db",
".DS_Store",
".coverage",
"local_settings.py",
"/static",
"/.git",
"/.hg",
]
local_dir = "'%s%s'" % (os.getcwd(), os.sep)
return rsync_project(
remote_dir=env.proj_path, local_dir=local_dir, exclude=excludes
)
def vcs_upload():
"""
Uploads the project with the selected VCS tool.
"""
if env.deploy_tool == "git":
remote_path = "ssh://%s@%s%s" % (env.user, env.host_string, env.repo_path)
if not exists(env.repo_path):
run("mkdir -p %s" % env.repo_path)
with cd(env.repo_path):
run("git init --bare")
local("git push -f %s master" % remote_path)
with cd(env.repo_path):
run("GIT_WORK_TREE=%s git checkout -f master" % env.proj_path)
run("GIT_WORK_TREE=%s git reset --hard" % env.proj_path)
elif env.deploy_tool == "hg":
remote_path = "ssh://%s@%s/%s" % (env.user, env.host_string, env.repo_path)
with cd(env.repo_path):
if not exists("%s/.hg" % env.repo_path):
run("hg init")
print(env.repo_path)
with fab_settings(warn_only=True):
push = local("hg push -f %s" % remote_path)
if push.return_code == 255:
abort()
run("hg update")
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return run("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return sudo(command, show=show, user="postgres")
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the project database.
"""
tmp_file = "/tmp/%s" % filename
# We dump to /tmp because user "postgres" can't write to other user folders
# We cd to / because user "postgres" might not have read permissions
# elsewhere.
with cd("/"):
postgres("pg_dump -Fc %s > %s" % (env.proj_name, tmp_file))
run("cp %s ." % tmp_file)
sudo("rm -f %s" % tmp_file)
@task
def restore(filename):
"""
Restores the project database from a previous backup.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = (
"import os;"
"os.environ['DJANGO_SETTINGS_MODULE']='%s.settings';"
"import django;"
"django.setup();" % env.proj_app
)
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`")) # noqa
with project():
if show:
print_command(code)
result = run(full_code, show=False)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python(
"from django.conf import settings;" "print(settings.STATIC_ROOT)", show=False
).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
###########################
# Security best practices #
###########################
@task
@log_call
@hosts(["root@%s" % host for host in env.hosts])
def secure(new_user=env.user):
"""
Minimal security steps for brand new servers.
Installs system updates, creates new user (with sudo privileges) for future
usage, and disables root login via SSH.
"""
run("apt-get update -q")
run("apt-get upgrade -y -q")
run("adduser --gecos '' %s" % new_user)
run("usermod -G sudo %s" % new_user)
run("sed -i 's:RootLogin yes:RootLogin no:' /etc/ssh/sshd_config")
run("service ssh restart")
print(
green(
"Security steps completed. Log in to the server as '%s' from "
"now on." % new_user,
bold=True,
)
)
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
# Install system requirements
sudo("apt-get update -y -q")
apt(
"nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor python-pip"
)
apt("gcc rsync")
run("mkdir -p /home/%s/logs" % env.user)
# Install Python requirements
sudo("pip install -U pip virtualenv virtualenvwrapper mercurial")
# Set up virtualenv
run("mkdir -p %s" % env.venv_home)
run("echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc" % (env.venv_home, env.user))
run(
"echo 'source /usr/local/bin/virtualenvwrapper.sh' >> "
"/home/%s/.bashrc" % env.user
)
print(
green(
"Successfully set up git, mercurial, pip, virtualenv, "
"supervisor, memcached.",
bold=True,
)
)
@task
@log_call
def create():
"""
Creates the environment needed to host the project.
The environment consists of: system locales, virtualenv, database, project
files, SSL certificate, and project-specific Python requirements.
"""
# Generate project locale
locale = env.locale.replace("UTF-8", "utf8")
with hide("stdout"):
if locale not in run("locale -a"):
sudo("locale-gen %s" % env.locale)
sudo("update-locale %s" % env.locale)
sudo("service postgresql restart")
run("exit")
# Create project path
run("mkdir -p %s" % env.proj_path)
# Set up virtual env
run("mkdir -p %s" % env.venv_home)
with cd(env.venv_home):
if exists(env.proj_name):
if confirm(
"Virtualenv already exists in host server: %s"
"\nWould you like to replace it?" % env.proj_name
):
run("rm -rf %s" % env.proj_name)
else:
abort()
run("virtualenv %s" % env.proj_name)
# Upload project files
if env.deploy_tool in env.vcs_tools:
vcs_upload()
else:
rsync_upload()
# Create DB and DB user
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql(
"CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;"
% (env.proj_name, env.proj_name, env.locale, env.locale)
)
# Set up SSL certificate
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
(crt_local,) = glob(join("deploy", "*.crt"))
(key_local,) = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo(
"openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts
)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Install project-specific requirements
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle psycopg2 " "django-compressor python-memcached")
# Bootstrap the DB
manage("createdb --noinput --nodata")
python(
"from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"Site.objects.filter(id=settings.SITE_ID).update(domain='%s');"
% env.domains[0]
)
for domain in env.domains:
python(
"from django.contrib.sites.models import Site;"
"Site.objects.get_or_create(domain='%s');" % domain
)
if env.admin_pass:
pw = env.admin_pass
user_py = (
"from django.contrib.auth import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw
)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
run("rm -rf %s" % env.venv_path)
if exists(env.proj_path):
run("rm -rf %s" % env.proj_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
if exists(env.repo_path):
run("rm -rf %s" % env.repo_path)
sudo("supervisorctl update")
psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
psql("DROP USER IF EXISTS %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
If the processes are not running, they will be started.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
run("kill -HUP `cat %s`" % pid_path)
else:
sudo("supervisorctl update")
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Backup current version of the project, push latest version of the project
via version control or rsync, install new requirements, sync and migrate
the database, collect any new static assets, and restart gunicorn's worker
processes for the project.
"""
if not exists(env.proj_path):
if confirm(
"Project does not exist in host server: %s"
"\nWould you like to create it?" % env.proj_name
):
create()
else:
abort()
# Backup current version of the project
with cd(env.proj_path):
backup("last.db")
if env.deploy_tool in env.vcs_tools:
with cd(env.repo_path):
if env.deploy_tool == "git":
run("git rev-parse HEAD > %s/last.commit" % env.proj_path)
elif env.deploy_tool == "hg":
run("hg id -i > last.commit")
with project():
static_dir = static()
if exists(static_dir):
run("tar -cf static.tar --exclude='*.thumbnails' %s" % static_dir)
else:
with cd(join(env.proj_path, "..")):
excludes = ["*.pyc", "*.pio", "*.thumbnails"]
exclude_arg = " ".join("--exclude='%s'" % e for e in excludes)
run("tar -cf {0}.tar {1} {0}".format(env.proj_name, exclude_arg))
# Deploy latest version of the project
with update_changed_requirements():
if env.deploy_tool in env.vcs_tools:
vcs_upload()
else:
rsync_upload()
with project():
manage("collectstatic -v 0 --noinput")
manage("migrate --noinput")
for name in get_templates():
upload_template_and_reload(name)
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the project files, the database, and all static
files. Calling rollback will revert all of these to their state prior to
the last deploy.
"""
with update_changed_requirements():
if env.deploy_tool in env.vcs_tools:
with cd(env.repo_path):
if env.deploy_tool == "git":
run(
"GIT_WORK_TREE={0} git checkout -f "
"`cat {0}/last.commit`".format(env.proj_path)
)
elif env.deploy_tool == "hg":
run("hg update -C `cat last.commit`")
with project():
with cd(join(static(), "..")):
run("tar -xf %s/static.tar" % env.proj_path)
else:
with cd(env.proj_path.rsplit("/", 1)[0]):
run("rm -rf %s" % env.proj_name)
run("tar -xf %s.tar" % env.proj_name)
with cd(env.proj_path):
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
voipms/api/__init__.py
|
import os
import json
import requests
from voipms.base.exceptions import VoipException
class Client(object):
def __init__(self, username=None, password=None):
self.username = username or os.environ.get('VOIPMS_ACCOUNT_USER')
self.password = password or os.environ.get('VOIPMS_API_TOKEN')
self.api_base = "https://voip.ms/api/v1/rest.php"
if not self.username or not self.password:
raise VoipException("Credentials are required to create a Client")
self.auth = (self.username, self.password)
self._accounts = None
self._call_detail_records = None
self._dids = None
self._general = None
self._voicemail = None
def request(self, method, auth=None, params={}):
auth = auth or self.auth
params["api_username"] = auth[0]
params["api_password"] = auth[1]
params["method"] = method
params["content_type"] = "json"
response = requests.get(self.api_base, params=params)
data = json.loads(response.text)
if data['status'] and data['status'] != 'success':
err_code = data['status']
raise VoipException(err_code)
return data
@property
def accounts(self):
if self._accounts is None:
from voipms.api.accounts import Accounts
self._accounts = Accounts(self)
return self._accounts
@property
def call_detail_records(self):
if self._call_detail_records is None:
from voipms.api.call_detail_records import CallDetailRecords
self._call_detail_records = CallDetailRecords(self)
return self._call_detail_records
@property
def dids(self):
if self._dids is None:
from voipms.api.dids import DIDs
self._dids = DIDs(self)
return self._dids
@property
def general(self):
if self._general is None:
from voipms.api.general import General
self._general = General(self)
return self._general
@property
def voicemail(self):
if self._voicemail is None:
from voipms.api.voicemail import Voicemail
self._voicemail = Voicemail(self)
return self._voicemail
@property
def balance(self):
return self.general.balance
@property
def ip(self):
return self.general.ip
@property
def transaction_history(self):
return self.general.transaction_history
@property
def countries(self):
return self.general.countries
@property
def languages(self):
return self.general.languages
@property
def subaccount(self):
return self.accounts.subaccount
@property
def registration_status(self):
return self.accounts.registration_status
@property
def billing(self):
return self.call_detail_records.billing
@property
def records(self):
return self.call_detail_records.records
@property
def rates(self):
return self.call_detail_records.rates
@property
def termination_rates(self):
return self.call_detail_records.termination_rates
@property
def search(self):
return self.dids.search
@property
def sms(self):
return self.dids.sms
@property
def messages(self):
return self.voicemail.messages
|
[] |
[] |
[
"VOIPMS_ACCOUNT_USER",
"VOIPMS_API_TOKEN"
] |
[]
|
["VOIPMS_ACCOUNT_USER", "VOIPMS_API_TOKEN"]
|
python
| 2 | 0 | |
comment-app/app.go
|
package main
import (
"database/sql"
"log"
"net/http"
"os"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/joho/godotenv"
"github.com/julienschmidt/httprouter"
_ "github.com/lib/pq"
httpDelivery "github.com/stanleynguyen/git-comment/comment-app/delivery/http"
"github.com/stanleynguyen/git-comment/comment-app/repository/ghcli"
"github.com/stanleynguyen/git-comment/comment-app/repository/persistence"
"github.com/stanleynguyen/git-comment/comment-app/usecase/comment"
)
func startInDev() {
err := godotenv.Load(".env")
if err != nil {
log.Fatal(err)
}
db, err := bootStrapDB()
if err != nil {
log.Fatal(err)
}
router := bootstrapApplication(db)
log.Printf("Listening on %s", os.Getenv("PORT"))
log.Fatal(http.ListenAndServe(":"+os.Getenv("PORT"), router))
}
func startInProd() {
db, err := bootStrapDB()
if err != nil {
log.Fatal(err)
}
router := bootstrapApplication(db)
log.Printf("Listening on %s", os.Getenv("PORT"))
log.Fatal(http.ListenAndServe(":"+os.Getenv("PORT"), router))
}
func startInTest(bootstrappingDone chan<- bool) {
db, err := bootStrapDB()
if err != nil {
log.Fatal(err)
}
router := bootstrapApplication(db)
log.Printf("Listening on %s", os.Getenv("PORT"))
bootstrappingDone<-true
log.Fatal(http.ListenAndServe("localhost:"+os.Getenv("PORT"), router))
}
func bootStrapDB() (*sql.DB, error) {
db, err := sql.Open("postgres", os.Getenv("DB"))
if err != nil {
return nil, err
}
driver, err := postgres.WithInstance(db, &postgres.Config{})
if err != nil {
return nil, err
}
m, err := migrate.NewWithDatabaseInstance(
"file://./migrations",
"postgres", driver)
if err != nil {
return nil, err
}
err = m.Up()
if err != nil && err != migrate.ErrNoChange {
return nil, err
}
return db, nil
}
func bootstrapApplication(db *sql.DB) http.Handler {
postgresRepo := persistence.NewPostgresRepo(db)
ghCli := ghcli.NewBasicGithubClient()
cu := comment.NewCommentUsecase(postgresRepo, ghCli)
router := httprouter.New()
httpHandler := httpDelivery.Handler{Router: router}
httpHandler.InitCommentsHandler(cu)
return router
}
|
[
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"DB\""
] |
[] |
[
"PORT",
"DB"
] |
[]
|
["PORT", "DB"]
|
go
| 2 | 0 | |
pkg/kn/commands/service/migrate.go
|
// Copyright © 2019 The Knative Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"fmt"
"os"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
api_errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc" // from https://github.com/kubernetes/client-go/issues/345
"k8s.io/client-go/tools/clientcmd"
"knative.dev/client/pkg/kn/commands"
"knative.dev/client/pkg/serving/v1alpha1"
v1alpha12 "knative.dev/client/pkg/serving/v1alpha1"
serving_v1alpha1_api "knative.dev/serving/pkg/apis/serving/v1alpha1"
)
func NewServiceMigrateCommand(p *commands.KnParams) *cobra.Command {
var migrateFlags MigrateFlags
serviceMigrateCommand := &cobra.Command{
Use: "migrate",
Short: "Migrate Knative services from source cluster to destination cluster",
Example: `
# Migrate Knative services from source cluster to destination cluster by export KUBECONFIG and KUBECONFIG_DESTINATION as environment variables
kn migrate --namespace default --destination-namespace default
# Migrate Knative services from source cluster to destination cluster by set kubeconfig as parameters
kn migrate --namespace default --destination-namespace default --kubeconfig $HOME/.kube/config/source-cluster-config.yml --destination-kubeconfig $HOME/.kube/config/destination-cluster-config.yml
# Migrate Knative services from source cluster to destination cluster and force replace the service if exists in destination cluster
kn migrate --namespace default --destination-namespace default --force
# Migrate Knative services from source cluster to destination cluster and delete the service in source cluster
kn migrate --namespace default --destination-namespace default --force --delete`,
RunE: func(cmd *cobra.Command, args []string) (err error) {
namespaceS := ""
namespaceD := ""
if migrateFlags.SourceNamespace == "" {
return fmt.Errorf("cannot get source cluster namespace, please use --namespace to set")
} else {
namespaceS = migrateFlags.SourceNamespace
}
p.GetClientConfig()
if migrateFlags.DestinationNamespace == "" {
return fmt.Errorf("cannot get destination cluster namespace, please use --destination-namespace to set")
} else {
namespaceD = migrateFlags.DestinationNamespace
}
kubeconfigS := p.KubeCfgPath
if kubeconfigS == "" {
kubeconfigS = os.Getenv("KUBECONFIG")
}
if kubeconfigS == "" {
return fmt.Errorf("cannot get source cluster kube config, please use --kubeconfig or export environment variable KUBECONFIG to set")
}
kubeconfigD := migrateFlags.DestinationKubeconfig
if kubeconfigD == "" {
kubeconfigD = os.Getenv("KUBECONFIG_DESTINATION")
}
if kubeconfigD == "" {
return fmt.Errorf("cannot get destination cluster kube config, please use --destination-kubeconfig or export environment variable KUBECONFIG_DESTINATION to set")
}
// For source
p.KubeCfgPath = kubeconfigS
clientS, err := p.NewServingClient(namespaceS)
if err != nil {
return err
}
err = printServiceWithRevisions(clientS, namespaceS, "source")
if err != nil {
return err
}
dp := commands.KnParams{
KubeCfgPath: kubeconfigD,
}
// For destination
dp.Initialize()
clientD, err := dp.NewServingClient(namespaceD)
if err != nil {
return err
}
fmt.Println("[Before migration in destination cluster]")
err = printServiceWithRevisions(clientD, namespaceD, "destination")
if err != nil {
return err
}
fmt.Println("\nNow migrate all Knative resources: \nFrom the source namespace ", namespaceS, "of cluster", p.KubeCfgPath)
fmt.Println("To the destination namespace", namespaceD, "of cluster", kubeconfigD)
cfg_d, err := clientcmd.BuildConfigFromFlags("", dp.KubeCfgPath)
clientset, err := clientset.NewForConfig(cfg_d)
if err != nil {
return err
}
namespaceExists, err := namespaceExists(*clientset, namespaceD)
if err != nil {
return err
}
if !namespaceExists {
fmt.Println("Create namespace", namespaceD, "in destination cluster")
nsSpec := &apiv1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceD}}
_, err = clientset.CoreV1().Namespaces().Create(nsSpec)
if err != nil {
return err
}
} else {
fmt.Println("Namespace", namespaceD, "already exists in destination cluster")
}
if err != nil {
return err
}
servicesS, err := clientS.ListServices()
if err != nil {
return err
}
for i := 0; i < len(servicesS.Items); i++ {
serviceS := servicesS.Items[i]
serviceExists, err := serviceExists(clientD, serviceS.Name)
if err != nil {
return err
}
if serviceExists {
if !migrateFlags.ForceReplace {
fmt.Println("\n[Error] Cannot migrate service", serviceS.Name, "in namespace", namespaceS,
"because the service already exists and no --force option was given")
os.Exit(1)
}
fmt.Println("Deleting service", serviceS.Name, "from the destination cluster and recreate as replacement")
err = clientD.DeleteService(serviceS.Name)
if err != nil {
return err
}
}
err = clientD.CreateService(constructMigratedService(serviceS, namespaceD))
if err != nil {
return err
}
fmt.Println("Migrated service", serviceS.Name, "Successfully")
serviceD, err := clientD.GetService(serviceS.Name)
if err != nil {
return err
}
config, err := clientD.GetConfiguration(serviceD.Name)
if err != nil {
return err
}
configUuid := config.UID
revisionsS, err := clientS.ListRevisions(v1alpha12.WithService(serviceS.Name))
if err != nil {
fmt.Errorf(err.Error())
}
if err != nil {
return err
}
for i := 0; i < len(revisionsS.Items); i++ {
revisionS := revisionsS.Items[i]
if revisionS.Name != serviceS.Status.LatestReadyRevisionName {
err := clientD.CreateRevision(constructRevision(revisionS, configUuid, namespaceD))
if err != nil {
return err
}
fmt.Println("Migrated revision", revisionS.Name, "successfully")
} else {
retries := 0
for {
revision, err := clientD.GetRevision(revisionS.Name)
if err != nil {
return err
}
sourceRevisionGeneration := revisionS.ObjectMeta.Labels["serving.knative.dev/configurationGeneration"]
revision.ObjectMeta.Labels["serving.knative.dev/configurationGeneration"] = sourceRevisionGeneration
err = clientD.UpdateRevision(revision)
if err != nil {
// Retry to update when a resource version conflict exists
if api_errors.IsConflict(err) && retries < MaxUpdateRetries {
retries++
continue
}
return err
}
fmt.Println("Replace revision", revisionS.Name, "to generation", sourceRevisionGeneration, "successfully")
break
}
}
}
fmt.Println("")
}
fmt.Println("[After migration in destination cluster]")
err = printServiceWithRevisions(clientD, namespaceD, "destination")
if err != nil {
return err
}
if cmd.Flag("delete").Value.String() == "false" {
fmt.Println("Migrate without --delete option, skip deleting Knative resource in source cluster")
} else {
fmt.Println("Migrate with --delete option, deleting all Knative resource in source cluster")
servicesS, err := clientS.ListServices()
if err != nil {
return err
}
for i := 0; i < len(servicesS.Items); i++ {
serviceS := servicesS.Items[i]
err = clientS.DeleteService(serviceS.Name)
if err != nil {
return err
}
fmt.Println("Deleted service", serviceS.Name, "in source cluster", namespaceS, "namespace")
}
}
return nil
},
}
migrateFlags.addFlags(serviceMigrateCommand)
return serviceMigrateCommand
}
func namespaceExists(client clientset.Clientset, namespace string) (bool, error) {
_, err := client.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if api_errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
// Get service list with revisions
func printServiceWithRevisions(client v1alpha1.KnServingClient, namespace, clustername string) error {
services, err := client.ListServices()
if err != nil {
return err
}
fmt.Println("There are", len(services.Items), "service(s) in", clustername, "cluster", namespace, "namespace:")
for i := 0; i < len(services.Items); i++ {
service := services.Items[i]
fmt.Printf("%-25s%-30s%-20s\n", "Name", "Current Revision", "Ready")
fmt.Printf("%-25s%-30s%-20s\n", service.Name, service.Status.LatestReadyRevisionName, fmt.Sprint(service.Status.IsReady()))
revisionsS, err := client.ListRevisions(v1alpha12.WithService(service.Name))
if err != nil {
return err
}
for i := 0; i < len(revisionsS.Items); i++ {
revisionS := revisionsS.Items[i]
fmt.Println(" |- Revision", revisionS.Name, "( Generation: "+fmt.Sprint(revisionS.Labels["serving.knative.dev/configurationGeneration"]), ", Ready:", revisionS.Status.IsReady(), ")")
}
fmt.Println("")
}
return nil
}
// Create service struct from provided options
func constructMigratedService(originalService serving_v1alpha1_api.Service, namespace string) *serving_v1alpha1_api.Service {
service := serving_v1alpha1_api.Service{
ObjectMeta: originalService.ObjectMeta,
}
service.ObjectMeta.Namespace = namespace
service.Spec = originalService.Spec
service.Spec.Template.ObjectMeta.Name = originalService.Status.LatestCreatedRevisionName
service.ObjectMeta.ResourceVersion = ""
return &service
}
// Create revision struct from provided options
func constructRevision(originalRevision serving_v1alpha1_api.Revision, configUuid types.UID, namespace string) *serving_v1alpha1_api.Revision {
revision := serving_v1alpha1_api.Revision{
ObjectMeta: originalRevision.ObjectMeta,
}
revision.ObjectMeta.Namespace = namespace
revision.ObjectMeta.ResourceVersion = ""
revision.ObjectMeta.OwnerReferences[0].UID = configUuid
revision.ObjectMeta.Labels["serving.knative.dev/configurationGeneration"] = originalRevision.ObjectMeta.Labels["serving.knative.dev/configurationGeneration"]
revision.Spec = originalRevision.Spec
return &revision
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG_DESTINATION\""
] |
[] |
[
"KUBECONFIG_DESTINATION",
"KUBECONFIG"
] |
[]
|
["KUBECONFIG_DESTINATION", "KUBECONFIG"]
|
go
| 2 | 0 | |
src/project/settings.py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from pathlib import Path
from django.urls import reverse_lazy
import dj_database_url
from dynaconf import settings as _settings
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = Path(__file__).parent.parent.resolve()
PROJECT_DIR = BASE_DIR / "project"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = _settings.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = _settings.DEBUG
ALLOWED_HOSTS = _settings.ALLOWED_HOSTS
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# 'django_filters',
# our apps
"apps.main",
"apps.booking",
"apps.user",
]
AUTH_USER_MODEL = 'user.Account'
# AUTHENTICATION_BACKENDS = (
# 'django.contrib.auth.backends.ModelBackend',
# )
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "project" / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
db_url = _settings.DATABASE_URL
if _settings.ENV_FOR_DYNACONF == "heroku":
db_url = os.getenv("DATABASE_URL")
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
"default": dj_database_url.parse(db_url, conn_max_age=600)
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_DIR = BASE_DIR / "static"
STATIC_DIR.mkdir(exist_ok=True)
STATIC_ROOT = STATIC_DIR.as_posix()
STATIC_URL = "/static/"
STATICFILES_DIRS = [PROJECT_DIR / "static"]
LOGIN_URL = reverse_lazy("login")
LOGIN_REDIRECT_URL = reverse_lazy("main")
LOGOUT_URL = reverse_lazy("main")
PASSWORD_HASHERS = [
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
|
[] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
python
| 1 | 0 | |
tests/pytorch/test_distributed_training.py
|
"""
Tests core functionality of naming workers when there are multiple processes.
See https://pytorch.org/tutorials/intermediate/ddp_tutorial.html to decide
how we want to support DistributedDataParallel with limited user configuration.
The key methods are
torch.distributed.get_rank() - when manually spawning processes
"""
# Standard Library
import os
import shutil
# Third Party
import numpy as nn
import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import multiprocessing
from torch.multiprocessing import Process
# First Party
import smdebug.pytorch as smd
from smdebug.trials import create_trial
out_dir = "/tmp/run"
class Net(nn.Module):
"""Returns f(x) = sigmoid(w*x + b)"""
def __init__(self):
super().__init__()
self.add_module("fc", nn.Linear(1, 1))
def forward(self, x):
x = self.fc(x)
x = F.sigmoid(x)
return x
def dataset(batch_size=4):
"""Return a dataset of (data, target)."""
data = torch.rand(batch_size, 1)
target = F.sigmoid(2 * data + 1)
return data, target
def train(model, device, optimizer, num_steps=10):
"""Runs the training loop."""
model.train()
for i in range(num_steps):
batch_size = 4
data = torch.rand(batch_size, 1)
target = F.sigmoid(2 * data + 1)
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, target)
loss.backward()
optimizer.step()
def run(rank, size, include_workers="one", num_epochs=10, batch_size=128, num_batches=10):
"""Distributed function to be implemented later."""
torch.manual_seed(1234)
device = torch.device("cpu")
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=1)
shutil.rmtree(out_dir, ignore_errors=True)
hook = smd.Hook(
out_dir=out_dir,
save_config=smd.SaveConfig(save_steps=[0, 1, 5]),
save_all=True,
include_workers=include_workers,
)
hook.register_module(model)
for epoch in range(num_epochs):
epoch_loss = 0.0
for _ in range(num_batches):
optimizer.zero_grad()
data, target = dataset(batch_size)
output = model(data)
loss = F.mse_loss(output, target)
epoch_loss += loss.item()
loss.backward()
average_gradients(model)
optimizer.step()
# print(f"Rank {dist.get_rank()}, epoch {epoch}: {epoch_loss / num_batches}")
assert hook._get_worker_name() == f"worker_{dist.get_rank()}"
# Race condition here where both workers attempt to move
# /tmp/{out_dir}/END_OF_JOB.ts to {out_dir}/END_OF_JOB.ts
try:
hook._cleanup()
except FileNotFoundError:
pass
def average_gradients(model):
"""Gradient averaging."""
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def init_processes(rank, size, include_workers, fn, backend="gloo"):
"""Initialize the distributed environment."""
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size, include_workers)
def _run_net_distributed(include_workers="one"):
"""Runs a single linear layer on 2 processes."""
# torch.distributed is empty on Mac on Torch <= 1.2
if not hasattr(dist, "is_initialized"):
return
multiprocessing.set_start_method("spawn", force=True)
size = 2
processes = []
for rank in range(size):
p = Process(target=init_processes, args=(rank, size, include_workers, run))
p.start()
processes.append(p)
for p in processes:
p.join()
# WARNING: assert statements do not cause test failure inside subprocesses
# https://stackoverflow.com/questions/13400546/py-test-how-to-automatically-detect-an-exception-in-a-child-process
assert all([not p.exitcode for p in processes]), f"Some processes failed. processes={processes}"
out_dir = "/tmp/run"
trial = create_trial(path=out_dir)
return trial
@pytest.mark.slow # 0:05 to run
def test_run_net_single_process():
"""Runs a single linear layer."""
device = torch.device("cpu")
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01)
shutil.rmtree(out_dir, ignore_errors=True)
hook = smd.Hook(
out_dir=out_dir, save_config=smd.SaveConfig(save_steps=[0, 1, 5]), save_all=True
)
hook.register_module(model)
train(model=model, device=device, optimizer=optimizer)
hook._cleanup()
assert hook._get_worker_name() == "worker_0"
trial = create_trial(path=out_dir)
assert len(trial.workers()) == 1, f"trial.workers() = {trial.workers()}"
assert len(trial.steps()) == 3, f"trial.steps() = {trial.steps()}"
shutil.rmtree(out_dir, ignore_errors=True)
@pytest.mark.slow # 0:07 to run
def test_run_net_distributed_save_all_workers():
trial = _run_net_distributed(include_workers="all")
assert len(trial.workers()) == 2, f"trial.workers() = {trial.workers()}"
assert len(trial.steps()) == 3, f"trial.steps() = {trial.steps()}"
@pytest.mark.slow # 0:07 to run
def test_run_net_distributed_save_one_worker():
trial = _run_net_distributed(include_workers="one")
assert len(trial.workers()) == 1, f"trial.workers() = {trial.workers()}"
assert len(trial.steps()) == 3, f"trial.steps() = {trial.steps()}"
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT"]
|
python
| 2 | 0 | |
BaseTools/Source/Python/Common/DscClassObject.py
|
## @file
# This file is used to define each component of DSC file
#
# Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import EdkLogger as EdkLogger
import Database
from String import *
from Parsing import *
from DataType import *
from Identification import *
from Dictionary import *
from CommonDataClass.PlatformClass import *
from CommonDataClass.CommonClass import SkuInfoClass
from BuildToolError import *
from Misc import sdict
import GlobalData
from Table.TableDsc import TableDsc
from Common.LongFilePathSupport import OpenLongFilePath as open
#
# Global variable
#
Section = {TAB_UNKNOWN.upper() : MODEL_UNKNOWN,
TAB_DSC_DEFINES.upper() : MODEL_META_DATA_HEADER,
TAB_BUILD_OPTIONS.upper() : MODEL_META_DATA_BUILD_OPTION,
TAB_SKUIDS.upper() : MODEL_EFI_SKU_ID,
TAB_LIBRARIES.upper() : MODEL_EFI_LIBRARY_INSTANCE,
TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
TAB_PCDS_FIXED_AT_BUILD_NULL.upper() : MODEL_PCD_FIXED_AT_BUILD,
TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
TAB_PCDS_FEATURE_FLAG_NULL.upper() : MODEL_PCD_FEATURE_FLAG,
TAB_PCDS_DYNAMIC_EX_NULL.upper() : MODEL_PCD_DYNAMIC_EX,
TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL.upper() : MODEL_PCD_DYNAMIC_EX_DEFAULT,
TAB_PCDS_DYNAMIC_EX_VPD_NULL.upper() : MODEL_PCD_DYNAMIC_EX_VPD,
TAB_PCDS_DYNAMIC_EX_HII_NULL.upper() : MODEL_PCD_DYNAMIC_EX_HII,
TAB_PCDS_DYNAMIC_NULL.upper() : MODEL_PCD_DYNAMIC,
TAB_PCDS_DYNAMIC_DEFAULT_NULL.upper() : MODEL_PCD_DYNAMIC_DEFAULT,
TAB_PCDS_DYNAMIC_VPD_NULL.upper() : MODEL_PCD_DYNAMIC_VPD,
TAB_PCDS_DYNAMIC_HII_NULL.upper() : MODEL_PCD_DYNAMIC_HII,
TAB_COMPONENTS.upper() : MODEL_META_DATA_COMPONENT,
TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION
}
## DscObject
#
# This class defined basic Dsc object which is used by inheriting
#
# @param object: Inherited from object class
#
class DscObject(object):
def __init__(self):
object.__init__()
## Dsc
#
# This class defined the structure used in Dsc object
#
# @param DscObject: Inherited from InfObject class
# @param Ffilename: Input value for Ffilename of Inf file, default is None
# @param IsMergeAllArches: Input value for IsMergeAllArches
# True is to merge all arches
# Fales is not to merge all arches
# default is False
# @param IsToPlatform: Input value for IsToPlatform
# True is to transfer to ModuleObject automatically
# False is not to transfer to ModuleObject automatically
# default is False
# @param WorkspaceDir: Input value for current workspace directory, default is None
#
# @var _NullClassIndex: To store value for _NullClassIndex, default is 0
# @var Identification: To store value for Identification, it is a structure as Identification
# @var Defines: To store value for Defines, it is a structure as DscDefines
# @var Contents: To store value for Contents, it is a structure as DscContents
# @var UserExtensions: To store value for UserExtensions
# @var Platform: To store value for Platform, it is a structure as PlatformClass
# @var WorkspaceDir: To store value for WorkspaceDir
# @var KeyList: To store value for KeyList, a list for all Keys used in Dec
#
class Dsc(DscObject):
_NullClassIndex = 0
def __init__(self, Filename=None, IsToDatabase=False, IsToPlatform=False, WorkspaceDir=None, Database=None):
self.Identification = Identification()
self.Platform = PlatformClass()
self.UserExtensions = ''
self.WorkspaceDir = WorkspaceDir
self.IsToDatabase = IsToDatabase
if Database:
self.Cur = Database.Cur
self.TblFile = Database.TblFile
self.TblDsc = Database.TblDsc
self.KeyList = [
TAB_SKUIDS, TAB_LIBRARIES, TAB_LIBRARY_CLASSES, TAB_BUILD_OPTIONS, TAB_PCDS_FIXED_AT_BUILD_NULL, \
TAB_PCDS_PATCHABLE_IN_MODULE_NULL, TAB_PCDS_FEATURE_FLAG_NULL, \
TAB_PCDS_DYNAMIC_DEFAULT_NULL, TAB_PCDS_DYNAMIC_HII_NULL, TAB_PCDS_DYNAMIC_VPD_NULL, \
TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, TAB_PCDS_DYNAMIC_EX_HII_NULL, TAB_PCDS_DYNAMIC_EX_VPD_NULL, \
TAB_COMPONENTS, TAB_DSC_DEFINES
]
self.PcdToken = {}
#
# Upper all KEYs to ignore case sensitive when parsing
#
self.KeyList = map(lambda c: c.upper(), self.KeyList)
#
# Init RecordSet
#
# self.RecordSet = {}
# for Key in self.KeyList:
# self.RecordSet[Section[Key]] = []
#
# Load Dsc file if filename is not None
#
if Filename != None:
self.LoadDscFile(Filename)
#
# Transfer to Platform Object if IsToPlatform is True
#
if IsToPlatform:
self.DscToPlatform()
## Transfer to Platform Object
#
# Transfer all contents of an Inf file to a standard Module Object
#
def DscToPlatform(self):
#
# Init global information for the file
#
ContainerFile = self.Identification.FileFullPath
#
# Generate Platform Header
#
self.GenPlatformHeader(ContainerFile)
#
# Generate BuildOptions
#
self.GenBuildOptions(ContainerFile)
#
# Generate SkuInfos
#
self.GenSkuInfos(ContainerFile)
#
# Generate Libraries
#
self.GenLibraries(ContainerFile)
#
# Generate LibraryClasses
#
self.GenLibraryClasses(ContainerFile)
#
# Generate Pcds
#
self.GenPcds(DataType.TAB_PCDS_FIXED_AT_BUILD, ContainerFile)
self.GenPcds(DataType.TAB_PCDS_PATCHABLE_IN_MODULE, ContainerFile)
self.GenFeatureFlagPcds(DataType.TAB_PCDS_FEATURE_FLAG, ContainerFile)
self.GenDynamicDefaultPcds(DataType.TAB_PCDS_DYNAMIC_DEFAULT, ContainerFile)
self.GenDynamicDefaultPcds(DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT, ContainerFile)
self.GenDynamicHiiPcds(DataType.TAB_PCDS_DYNAMIC_HII, ContainerFile)
self.GenDynamicHiiPcds(DataType.TAB_PCDS_DYNAMIC_EX_HII, ContainerFile)
self.GenDynamicVpdPcds(DataType.TAB_PCDS_DYNAMIC_VPD, ContainerFile)
self.GenDynamicVpdPcds(DataType.TAB_PCDS_DYNAMIC_EX_VPD, ContainerFile)
#
# Generate Components
#
self.GenComponents(ContainerFile)
#
# Update to database
#
if self.IsToDatabase:
for Key in self.PcdToken.keys():
SqlCommand = """update %s set Value2 = '%s' where ID = %s""" % (self.TblDsc.Table, ".".join((self.PcdToken[Key][0], self.PcdToken[Key][1])), Key)
self.TblDsc.Exec(SqlCommand)
#End of DscToPlatform
## Get Platform Header
#
# Gen Platform Header of Dsc as <Key> = <Value>
#
# @param ContainerFile: The Dsc file full path
#
def GenPlatformHeader(self, ContainerFile):
EdkLogger.debug(2, "Generate PlatformHeader ...")
#
# Update all defines item in database
#
SqlCommand = """select ID, Value1, Arch, StartLine from %s
where Model = %s
and BelongsToFile = %s
and Enabled > -1""" % (self.TblDsc.Table, MODEL_META_DATA_HEADER, self.FileID)
RecordSet = self.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
ValueList = GetSplitValueList(Record[1], TAB_EQUAL_SPLIT)
if len(ValueList) != 2:
RaiseParserError(Record[1], 'Defines', ContainerFile, '<Key> = <Value>', Record[3])
ID, Value1, Value2, Arch = Record[0], ValueList[0], ValueList[1], Record[2]
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(Value1), ConvertToSqlString2(Value2), ID)
self.TblDsc.Exec(SqlCommand)
#
# Get detailed information
#
for Arch in DataType.ARCH_LIST:
PlatformHeader = PlatformHeaderClass()
PlatformHeader.Name = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_PLATFORM_NAME, Arch, self.FileID)[0]
PlatformHeader.Guid = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_PLATFORM_GUID, Arch, self.FileID)[0]
PlatformHeader.Version = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_PLATFORM_VERSION, Arch, self.FileID)[0]
PlatformHeader.FileName = self.Identification.FileName
PlatformHeader.FullPath = self.Identification.FileFullPath
PlatformHeader.DscSpecification = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_DSC_SPECIFICATION, Arch, self.FileID)[0]
PlatformHeader.SkuIdName = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_SKUID_IDENTIFIER, Arch, self.FileID)
PlatformHeader.SupArchList = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES, Arch, self.FileID)
PlatformHeader.BuildTargets = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_BUILD_TARGETS, Arch, self.FileID)
PlatformHeader.OutputDirectory = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_OUTPUT_DIRECTORY, Arch, self.FileID)[0])
PlatformHeader.BuildNumber = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_BUILD_NUMBER, Arch, self.FileID)[0]
PlatformHeader.MakefileName = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_MAKEFILE_NAME, Arch, self.FileID)[0]
PlatformHeader.BsBaseAddress = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_BS_BASE_ADDRESS, Arch, self.FileID)[0]
PlatformHeader.RtBaseAddress = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_RT_BASE_ADDRESS, Arch, self.FileID)[0]
self.Platform.Header[Arch] = PlatformHeader
Fdf = PlatformFlashDefinitionFileClass()
Fdf.FilePath = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_FLASH_DEFINITION, Arch, self.FileID)[0])
self.Platform.FlashDefinitionFile = Fdf
Prebuild = BuildScriptClass()
Prebuild.FilePath = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_PREBUILD, Arch, self.FileID)[0])
self.Platform.Prebuild = Prebuild
Postbuild = BuildScriptClass()
Postbuild.FilePath = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_POSTBUILD, Arch, self.FileID)[0])
self.Platform.Postbuild = Postbuild
## GenBuildOptions
#
# Gen BuildOptions of Dsc
# [<Family>:]<ToolFlag>=Flag
#
# @param ContainerFile: The Dsc file full path
#
def GenBuildOptions(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_BUILD_OPTIONS)
BuildOptions = {}
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_META_DATA_BUILD_OPTION, self.FileID)
#
# Get all BuildOptions
#
RecordSet = QueryDscItem(self.TblDsc, MODEL_META_DATA_BUILD_OPTION, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_BUILD_OPTIONS, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
(Family, ToolChain, Flag) = GetBuildOption(NewItem, Filename, -1)
MergeArches(BuildOptions, (Family, ToolChain, Flag), Arch)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
(Family, ToolChain, Flag) = GetBuildOption(Record[0], ContainerFile, Record[2])
MergeArches(BuildOptions, (Family, ToolChain, Flag), Arch)
#
# Update to Database
#
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(Family), ConvertToSqlString2(ToolChain), ConvertToSqlString2(Flag), Record[3])
self.TblDsc.Exec(SqlCommand)
for Key in BuildOptions.keys():
BuildOption = BuildOptionClass(Key[0], Key[1], Key[2])
BuildOption.SupArchList = BuildOptions[Key]
self.Platform.BuildOptions.BuildOptionList.append(BuildOption)
## GenSkuInfos
#
# Gen SkuInfos of Dsc
# <Integer>|<UiName>
#
# @param ContainerFile: The Dsc file full path
#
def GenSkuInfos(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_SKUIDS)
#
# SkuIds
# <Integer>|<UiName>
#
self.Platform.SkuInfos.SkuInfoList['DEFAULT'] = '0'
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_EFI_SKU_ID, self.FileID)
#
# Get all SkuInfos
#
RecordSet = QueryDscItem(self.TblDsc, MODEL_EFI_SKU_ID, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_SKUIDS, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
List = GetSplitValueList(NewItem)
if len(List) != 2:
RaiseParserError(NewItem, TAB_SKUIDS, Filename, '<Integer>|<UiName>')
else:
self.Platform.SkuInfos.SkuInfoList[List[1]] = List[0]
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
List = GetSplitValueList(Record[0])
if len(List) != 2:
RaiseParserError(Record[0], TAB_SKUIDS, ContainerFile, '<Integer>|<UiName>')
else:
self.Platform.SkuInfos.SkuInfoList[List[1]] = List[0]
#
# Update to Database
#
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(List[0]), ConvertToSqlString2(List[1]), Record[3])
self.TblDsc.Exec(SqlCommand)
## GenLibraries
#
# Gen Libraries of Dsc
# <PathAndFilename>
#
# @param ContainerFile: The Dsc file full path
#
def GenLibraries(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARIES)
Libraries = {}
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_EFI_LIBRARY_INSTANCE, self.FileID)
#
# Get all Libraries
#
RecordSet = QueryDscItem(self.TblDsc, MODEL_EFI_LIBRARY_INSTANCE, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_LIBRARIES, '', IncludeFile[2])
if os.path.exists(Filename):
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
MergeArches(Libraries, NewItem, Arch)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
MergeArches(Libraries, Record[0], Arch)
for Key in Libraries.keys():
Library = PlatformLibraryClass()
Library.FilePath = NormPath(Key)
Library.SupArchList = Libraries[Key]
self.Platform.Libraries.LibraryList.append(Library)
## GenLibraryClasses
#
# Get LibraryClasses of Dsc
# <LibraryClassKeyWord>|<LibraryInstance>
#
# @param ContainerFile: The Dsc file full path
#
def GenLibraryClasses(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARY_CLASSES)
LibraryClasses = {}
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_EFI_LIBRARY_CLASS, self.FileID)
#
# Get all LibraryClasses
#
RecordSet = QueryDscItem(self.TblDsc, MODEL_EFI_LIBRARY_CLASS, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_LIBRARY_CLASSES, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
MergeArches(LibraryClasses, GetLibraryClass([NewItem, IncludeFile[4]], Filename, self.WorkspaceDir, -1), Arch)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
(LibClassName, LibClassIns, SupModelList) = GetLibraryClass([Record[0], Record[4]], ContainerFile, self.WorkspaceDir, Record[2])
MergeArches(LibraryClasses, (LibClassName, LibClassIns, SupModelList), Arch)
#
# Update to Database
#
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(LibClassName), ConvertToSqlString2(LibClassIns), ConvertToSqlString2(SupModelList), Record[3])
self.TblDsc.Exec(SqlCommand)
for Key in LibraryClasses.keys():
Library = PlatformLibraryClass()
Library.Name = Key[0]
Library.FilePath = NormPath(Key[1])
Library.SupModuleList = GetSplitValueList(Key[2])
Library.SupArchList = LibraryClasses[Key]
self.Platform.LibraryClasses.LibraryList.append(Library)
## Gen Pcds
#
# Gen Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<Type>|<MaximumDatumSize>]
#
# @param Type: The type of Pcd
# @param ContainerFile: The file which describes the pcd, used for error report
#
def GenPcds(self, Type='', ContainerFile=''):
Pcds = {}
if Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
Model = MODEL_PCD_PATCHABLE_IN_MODULE
elif Type == DataType.TAB_PCDS_FIXED_AT_BUILD:
Model = MODEL_PCD_FIXED_AT_BUILD
else:
pass
EdkLogger.debug(2, "Generate %s ..." % Type)
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
#
# Get all Pcds
#
RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
(TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type) = GetPcd(NewItem, Type, Filename, -1)
MergeArches(Pcds, (TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type), Arch)
self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
(TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type) = GetPcd(Record[0], Type, ContainerFile, Record[2])
MergeArches(Pcds, (TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type), Arch)
self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Key in Pcds:
Pcd = PcdClass(Key[0], '', Key[1], Key[3], Key[4], Key[2], Key[5], [], {}, [])
Pcd.SupArchList = Pcds[Key]
self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
## Gen FeatureFlagPcds
#
# Gen FeatureFlagPcds of Dsc file as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
#
# @param Type: The type of Pcd
# @param ContainerFile: The file which describes the pcd, used for error report
#
def GenFeatureFlagPcds(self, Type='', ContainerFile=''):
Pcds = {}
if Type == DataType.TAB_PCDS_FEATURE_FLAG:
Model = MODEL_PCD_FEATURE_FLAG
else:
pass
EdkLogger.debug(2, "Generate %s ..." % Type)
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
#
# Get all FeatureFlagPcds
#
RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
(TokenName, TokenGuidCName, Value, Type) = GetFeatureFlagPcd(NewItem, Type, Filename, -1)
MergeArches(Pcds, (TokenName, TokenGuidCName, Value, Type), Arch)
self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
(TokenName, TokenGuidCName, Value, Type) = GetFeatureFlagPcd(Record[0], Type, ContainerFile, Record[2])
MergeArches(Pcds, (TokenName, TokenGuidCName, Value, Type), Arch)
self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Key in Pcds:
Pcd = PcdClass(Key[0], '', Key[1], '', '', Key[2], Key[3], [], {}, [])
Pcd.SupArchList = Pcds[Key]
self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
## Gen DynamicDefaultPcds
#
# Gen DynamicDefaultPcds of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<DatumTyp>[|<MaxDatumSize>]]
#
# @param Type: The type of Pcd
# @param ContainerFile: The file which describes the pcd, used for error report
#
def GenDynamicDefaultPcds(self, Type='', ContainerFile=''):
Pcds = {}
SkuInfoList = {}
if Type == DataType.TAB_PCDS_DYNAMIC_DEFAULT:
Model = MODEL_PCD_DYNAMIC_DEFAULT
elif Type == DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT:
Model = MODEL_PCD_DYNAMIC_EX_DEFAULT
else:
pass
EdkLogger.debug(2, "Generate %s ..." % Type)
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
#
# Get all DynamicDefaultPcds
#
RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
(K1, K2, K3, K4, K5, K6) = GetDynamicDefaultPcd(NewItem, Type, Filename, -1)
MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, IncludeFile[4]), Arch)
self.PcdToken[Record[3]] = (K2, K1)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
(K1, K2, K3, K4, K5, K6) = GetDynamicDefaultPcd(Record[0], Type, ContainerFile, Record[2])
MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, Record[4]), Arch)
self.PcdToken[Record[3]] = (K2, K1)
for Key in Pcds:
(Status, SkuInfoList) = self.GenSkuInfoList(Key[6], self.Platform.SkuInfos.SkuInfoList, '', '', '', '', '', Key[2])
if Status == False:
ErrorMsg = "The SKUID '%s' used in section '%s' is not defined in section [SkuIds]" % (SkuInfoList, Type)
EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, ContainerFile, RaiseError=EdkLogger.IsRaiseError)
Pcd = PcdClass(Key[0], '', Key[1], Key[3], Key[4], Key[2], Key[5], [], SkuInfoList, [])
Pcd.SupArchList = Pcds[Key]
self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
## Gen DynamicHiiPcds
#
# Gen DynamicHiiPcds of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|<VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
#
# @param Type: The type of Pcd
# @param ContainerFile: The file which describes the pcd, used for error report
#
def GenDynamicHiiPcds(self, Type='', ContainerFile=''):
Pcds = {}
SkuInfoList = {}
if Type == DataType.TAB_PCDS_DYNAMIC_HII:
Model = MODEL_PCD_DYNAMIC_HII
elif Type == DataType.TAB_PCDS_DYNAMIC_EX_HII:
Model = MODEL_PCD_DYNAMIC_EX_HII
else:
pass
EdkLogger.debug(2, "Generate %s ..." % Type)
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
#
# Get all DynamicHiiPcds
#
RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
(K1, K2, K3, K4, K5, K6, K7, K8) = GetDynamicHiiPcd(NewItem, Type, Filename, -1)
MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, K7, K8, IncludeFile[4]), Arch)
self.PcdToken[Record[3]] = (K2, K1)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
(K1, K2, K3, K4, K5, K6, K7, K8) = GetDynamicHiiPcd(Record[0], Type, ContainerFile, Record[2])
MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, K7, K8, Record[4]), Arch)
self.PcdToken[Record[3]] = (K2, K1)
for Key in Pcds:
(Status, SkuInfoList) = self.GenSkuInfoList(Key[8], self.Platform.SkuInfos.SkuInfoList, Key[2], Key[3], Key[4], Key[5], '', '')
if Status == False:
ErrorMsg = "The SKUID '%s' used in section '%s' is not defined in section [SkuIds]" % (SkuInfoList, Type)
EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, ContainerFile, RaiseError=EdkLogger.IsRaiseError)
Pcd = PcdClass(Key[0], '', Key[1], '', Key[6], Key[5], Key[7], [], SkuInfoList, [])
Pcd.SupArchList = Pcds[Key]
self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
## Gen DynamicVpdPcds
#
# Gen DynamicVpdPcds of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>[|<MaximumDatumSize>]
#
# @param Type: The type of Pcd
# @param ContainerFile: The file which describes the pcd, used for error report
#
def GenDynamicVpdPcds(self, Type='', ContainerFile=''):
Pcds = {}
SkuInfoList = {}
if Type == DataType.TAB_PCDS_DYNAMIC_VPD:
Model = MODEL_PCD_DYNAMIC_VPD
elif Type == DataType.TAB_PCDS_DYNAMIC_EX_VPD:
Model = MODEL_PCD_DYNAMIC_EX_VPD
else:
pass
EdkLogger.debug(2, "Generate %s ..." % Type)
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
#
# Get all DynamicVpdPcds
#
RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
(K1, K2, K3, K4, K5) = GetDynamicVpdPcd(NewItem, Type, Filename, -1)
MergeArches(Pcds, (K1, K2, K3, K4, K5, IncludeFile[4]), Arch)
self.PcdToken[Record[3]] = (K2, K1)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
(K1, K2, K3, K4, K5) = GetDynamicVpdPcd(Record[0], Type, ContainerFile, Record[2])
MergeArches(Pcds, (K1, K2, K3, K4, K5, Record[4]), Arch)
self.PcdToken[Record[3]] = (K2, K1)
for Key in Pcds:
(Status, SkuInfoList) = self.GenSkuInfoList(Key[5], self.Platform.SkuInfos.SkuInfoList, '', '', '', '', Key[2], '')
if Status == False:
ErrorMsg = "The SKUID '%s' used in section '%s' is not defined in section [SkuIds]" % (SkuInfoList, Type)
EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, ContainerFile, RaiseError=EdkLogger.IsRaiseError)
Pcd = PcdClass(Key[0], '', Key[1], '', Key[3], '', Key[4], [], SkuInfoList, [])
Pcd.SupArchList = Pcds[Key]
self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
## Get Component
#
# Get Component section defined in Dsc file
#
# @param ContainerFile: The file which describes the Components, used for error report
#
# @retval PlatformModuleClass() A instance for PlatformModuleClass
#
def GenComponents(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_COMPONENTS)
Components = sdict()
#
# Get all include files
#
IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_META_DATA_COMPONENT, self.FileID)
#
# Get all Components
#
RecordSet = QueryDscItem(self.TblDsc, MODEL_META_DATA_COMPONENT, -1, self.FileID)
#
# Go through each arch
#
for Arch in DataType.ARCH_LIST:
for IncludeFile in IncludeFiles:
if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_COMPONENTS, '', IncludeFile[2])
for NewItem in open(Filename, 'r').readlines():
if CleanString(NewItem) == '':
continue
NewItems = []
GetComponents(open(Filename, 'r').read(), TAB_COMPONENTS, NewItems, TAB_COMMENT_SPLIT)
for NewComponent in NewItems:
MergeArches(Components, self.GenComponent(NewComponent, Filename), Arch)
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
Lib, Bo, Pcd = [], [], []
SubLibSet = QueryDscItem(self.TblDsc, MODEL_EFI_LIBRARY_CLASS, Record[3], self.FileID)
for SubLib in SubLibSet:
Lib.append(TAB_VALUE_SPLIT.join([SubLib[0], SubLib[4]]))
SubBoSet = QueryDscItem(self.TblDsc, MODEL_META_DATA_BUILD_OPTION, Record[3], self.FileID)
for SubBo in SubBoSet:
Bo.append(SubBo[0])
SubPcdSet1 = QueryDscItem(self.TblDsc, MODEL_PCD_FIXED_AT_BUILD, Record[3], self.FileID)
SubPcdSet2 = QueryDscItem(self.TblDsc, MODEL_PCD_PATCHABLE_IN_MODULE, Record[3], self.FileID)
SubPcdSet3 = QueryDscItem(self.TblDsc, MODEL_PCD_FEATURE_FLAG, Record[3], self.FileID)
SubPcdSet4 = QueryDscItem(self.TblDsc, MODEL_PCD_DYNAMIC_EX_DEFAULT, Record[3], self.FileID)
SubPcdSet5 = QueryDscItem(self.TblDsc, MODEL_PCD_DYNAMIC_DEFAULT, Record[3], self.FileID)
for SubPcd in SubPcdSet1:
Pcd.append([DataType.TAB_PCDS_FIXED_AT_BUILD, SubPcd[0], SubPcd[3]])
for SubPcd in SubPcdSet2:
Pcd.append([DataType.TAB_PCDS_PATCHABLE_IN_MODULE, SubPcd[0], SubPcd[3]])
for SubPcd in SubPcdSet3:
Pcd.append([DataType.TAB_PCDS_FEATURE_FLAG, SubPcd[0], SubPcd[3]])
for SubPcd in SubPcdSet4:
Pcd.append([DataType.TAB_PCDS_DYNAMIC_EX, SubPcd[0], SubPcd[3]])
for SubPcd in SubPcdSet5:
Pcd.append([DataType.TAB_PCDS_DYNAMIC, SubPcd[0], SubPcd[3]])
Item = [Record[0], Lib, Bo, Pcd]
MergeArches(Components, self.GenComponent(Item, ContainerFile), Arch)
for Key in Components.keys():
Key.SupArchList = Components[Key]
self.Platform.Modules.ModuleList.append(Key)
## Get Component
#
# Get Component section defined in Dsc file
#
# @param Item: Contents includes a component block
# @param ContainerFile: The file which describes the library class, used for error report
#
# @retval PlatformModuleClass() A instance for PlatformModuleClass
#
def GenComponent(self, Item, ContainerFile, LineNo= -1):
(InfFilename, ExecFilename) = GetExec(Item[0])
LibraryClasses = Item[1]
BuildOptions = Item[2]
Pcds = Item[3]
Component = PlatformModuleClass()
Component.FilePath = NormPath(InfFilename)
Component.ExecFilePath = NormPath(ExecFilename)
CheckFileType(Component.FilePath, '.Inf', ContainerFile, 'component name', Item[0], LineNo)
CheckFileExist(self.WorkspaceDir, Component.FilePath, ContainerFile, 'component', Item[0], LineNo)
for Lib in LibraryClasses:
List = GetSplitValueList(Lib)
if len(List) != 2:
RaiseParserError(Lib, 'LibraryClasses', ContainerFile, '<ClassName>|<InfFilename>')
LibName = List[0]
LibFile = NormPath(List[1])
if LibName == "" or LibName == "NULL":
LibName = "NULL%d" % self._NullClassIndex
self._NullClassIndex += 1
CheckFileType(List[1], '.Inf', ContainerFile, 'library instance of component ', Lib, LineNo)
CheckFileExist(self.WorkspaceDir, LibFile, ContainerFile, 'library instance of component', Lib, LineNo)
Component.LibraryClasses.LibraryList.append(PlatformLibraryClass(LibName, LibFile))
for BuildOption in BuildOptions:
Key = GetBuildOption(BuildOption, ContainerFile)
Component.ModuleSaBuildOption.BuildOptionList.append(BuildOptionClass(Key[0], Key[1], Key[2]))
for Pcd in Pcds:
Type = Pcd[0]
List = GetSplitValueList(Pcd[1])
PcdId = Pcd[2]
TokenInfo = None
#
# For FeatureFlag
#
if Type == DataType.TAB_PCDS_FEATURE_FLAG:
if len(List) != 2:
RaiseParserError(Pcd[1], 'Components', ContainerFile, '<PcdTokenSpaceGuidCName>.<PcdTokenName>|TRUE/FALSE')
CheckPcdTokenInfo(List[0], 'Components', ContainerFile)
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
Component.PcdBuildDefinitions.append(PcdClass(TokenInfo[1], '', TokenInfo[0], '', '', List[1], Type, [], {}, []))
#
# For FixedAtBuild or PatchableInModule
#
if Type == DataType.TAB_PCDS_FIXED_AT_BUILD or Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
List.append('')
if len(List) != 3 and len(List) != 4:
RaiseParserError(Pcd[1], 'Components', ContainerFile, '<PcdTokenSpaceGuidCName>.<PcdTokenName>|<Value>[|<MaxDatumSize>]')
CheckPcdTokenInfo(List[0], 'Components', ContainerFile)
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
Component.PcdBuildDefinitions.append(PcdClass(TokenInfo[1], '', TokenInfo[0], '', List[2], List[1], Type, [], {}, []))
#
# For Dynamic or DynamicEx
#
if Type == DataType.TAB_PCDS_DYNAMIC or Type == DataType.TAB_PCDS_DYNAMIC_EX:
if len(List) != 1:
RaiseParserError(Pcd[1], 'Components', ContainerFile, '<PcdTokenSpaceGuidCName>.<PcdTokenName>')
CheckPcdTokenInfo(List[0], 'Components', ContainerFile)
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
Component.PcdBuildDefinitions.append(PcdClass(TokenInfo[1], '', TokenInfo[0], '', '', '', Type, [], {}, []))
#
# Add to PcdToken
#
self.PcdToken[PcdId] = (TokenInfo[0], TokenInfo[1])
return Component
#End of GenComponent
## Gen SkuInfoList
#
# Gen SkuInfoList section defined in Dsc file
#
# @param SkuNameList: Input value for SkuNameList
# @param SkuInfo: Input value for SkuInfo
# @param VariableName: Input value for VariableName
# @param VariableGuid: Input value for VariableGuid
# @param VariableOffset: Input value for VariableOffset
# @param HiiDefaultValue: Input value for HiiDefaultValue
# @param VpdOffset: Input value for VpdOffset
# @param DefaultValue: Input value for DefaultValue
#
# @retval (False, SkuName) Not found in section SkuId Dsc file
# @retval (True, SkuInfoList) Found in section SkuId of Dsc file
#
def GenSkuInfoList(self, SkuNameList, SkuInfo, VariableName='', VariableGuid='', VariableOffset='', HiiDefaultValue='', VpdOffset='', DefaultValue=''):
SkuNameList = GetSplitValueList(SkuNameList)
if SkuNameList == None or SkuNameList == [] or SkuNameList == ['']:
SkuNameList = ['DEFAULT']
SkuInfoList = {}
for Item in SkuNameList:
if Item not in SkuInfo:
return False, Item
Sku = SkuInfoClass(Item, SkuInfo[Item], VariableName, VariableGuid, VariableOffset, HiiDefaultValue, VpdOffset, DefaultValue)
SkuInfoList[Item] = Sku
return True, SkuInfoList
## Parse Include statement
#
# Get include file path
#
# 1. Insert a record into TblFile ???
# 2. Insert a record into TblDsc
# Value1: IncludeFilePath
#
# @param LineValue: The line of incude statement
def ParseInclude(self, LineValue, StartLine, Table, FileID, Filename, SectionName, Model, Arch):
EdkLogger.debug(EdkLogger.DEBUG_2, "!include statement '%s' found in section %s" % (LineValue, SectionName))
SectionModel = Section[SectionName.upper()]
IncludeFile = CleanString(LineValue[LineValue.upper().find(DataType.TAB_INCLUDE.upper() + ' ') + len(DataType.TAB_INCLUDE + ' ') : ])
Table.Insert(Model, IncludeFile, '', '', Arch, SectionModel, FileID, StartLine, -1, StartLine, -1, 0)
## Parse DEFINE statement
#
# Get DEFINE macros
#
# 1. Insert a record into TblDsc
# Value1: Macro Name
# Value2: Macro Value
#
def ParseDefine(self, LineValue, StartLine, Table, FileID, Filename, SectionName, Model, Arch):
EdkLogger.debug(EdkLogger.DEBUG_2, "DEFINE statement '%s' found in section %s" % (LineValue, SectionName))
SectionModel = Section[SectionName.upper()]
Define = GetSplitValueList(CleanString(LineValue[LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') + len(DataType.TAB_DEFINE + ' ') : ]), TAB_EQUAL_SPLIT, 1)
Table.Insert(Model, Define[0], Define[1], '', Arch, SectionModel, FileID, StartLine, -1, StartLine, -1, 0)
## Parse Defines section
#
# Get one item in defines section
#
# Value1: Item Name
# Value2: Item Value
#
def ParseDefinesSection(self, LineValue, StartLine, Table, FileID, Filename, SectionName, Model, Arch):
EdkLogger.debug(EdkLogger.DEBUG_2, "Parse '%s' found in section %s" % (LineValue, SectionName))
Defines = GetSplitValueList(LineValue, TAB_EQUAL_SPLIT, 1)
if len(Defines) != 2:
RaiseParserError(LineValue, SectionName, Filename, '', StartLine)
self.TblDsc.Insert(Model, Defines[0], Defines[1], '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
## Insert conditional statements
#
# Pop an item from IfDefList
# Insert conditional statements to database
#
# @param Filename: Path of parsing file
# @param IfDefList: A list stored current conditional statements
# @param EndLine: The end line no
# @param ArchList: Support arch list
#
def InsertConditionalStatement(self, Filename, FileID, BelongsToItem, IfDefList, EndLine, ArchList):
(Value1, Value2, Value3, Model, StartColumn, EndColumn, Enabled) = ('', '', '', -1, -1, -1, 0)
if IfDefList == []:
ErrorMsg = 'Not suited conditional statement in file %s' % Filename
EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, Filename, RaiseError=EdkLogger.IsRaiseError)
else:
#
# Get New Dsc item ID
#
DscID = self.TblDsc.GetCount() + 1
#
# Pop the conditional statements which is closed
#
PreviousIf = IfDefList.pop()
EdkLogger.debug(EdkLogger.DEBUG_5, 'Previous IfDef: ' + str(PreviousIf))
#
# !ifdef and !ifndef
#
if PreviousIf[2] in (MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF):
Value1 = PreviousIf[0]
Model = PreviousIf[2]
self.TblDsc.Insert(Model, Value1, Value2, Value3, ArchList, BelongsToItem, self.FileID, PreviousIf[1], StartColumn, EndLine, EndColumn, Enabled)
#
# !if and !elseif
#
elif PreviousIf[2] in (MODEL_META_DATA_CONDITIONAL_STATEMENT_IF, Model):
List = PreviousIf[0].split(' ')
Value1, Value2, Value3 = '', '==', '0'
if len(List) == 3:
Value1 = List[0]
Value2 = List[1]
Value3 = List[2]
Value3 = SplitString(Value3)
if len(List) == 1:
Value1 = List[0]
Model = PreviousIf[2]
self.TblDsc.Insert(Model, Value1, Value2, Value3, ArchList, BelongsToItem, self.FileID, PreviousIf[1], StartColumn, EndLine, EndColumn, Enabled)
#
# !else
#
elif PreviousIf[2] in (MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE, Model):
Value1 = PreviousIf[0].strip()
Model = PreviousIf[2]
self.TblDsc.Insert(Model, Value1, Value2, Value3, ArchList, BelongsToItem, self.FileID, PreviousIf[1], StartColumn, EndLine, EndColumn, Enabled)
## Load Dsc file
#
# Load the file if it exists
#
# @param Filename: Input value for filename of Dsc file
#
def LoadDscFile(self, Filename):
#
# Insert a record for file
#
Filename = NormPath(Filename)
self.Identification.FileFullPath = Filename
(self.Identification.FileRelativePath, self.Identification.FileName) = os.path.split(Filename)
self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_DSC)
#
# Init DscTable
#
#self.TblDsc.Table = "Dsc%s" % FileID
#self.TblDsc.Create()
#
# Init common datas
#
IfDefList, SectionItemList, CurrentSection, ArchList, ThirdList, IncludeFiles = \
[], [], TAB_UNKNOWN, [], [], []
LineNo = 0
#
# Parse file content
#
IsFindBlockComment = False
ReservedLine = ''
for Line in open(Filename, 'r'):
LineNo = LineNo + 1
#
# Remove comment block
#
if Line.find(TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
continue
#
# Remove comments at tail and remove spaces again
#
Line = CleanString(Line)
if Line == '':
continue
#
# Find a new section tab
# First insert previous section items
# And then parse the content of the new section
#
if Line.startswith(TAB_SECTION_START) and Line.endswith(TAB_SECTION_END):
#
# Insert items data of previous section
#
self.InsertSectionItemsIntoDatabase(self.FileID, Filename, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList)
#
# Parse the new section
#
SectionItemList = []
ArchList = []
ThirdList = []
CurrentSection = ''
LineList = GetSplitValueList(Line[len(TAB_SECTION_START):len(Line) - len(TAB_SECTION_END)], TAB_COMMA_SPLIT)
for Item in LineList:
ItemList = GetSplitValueList(Item, TAB_SPLIT)
if CurrentSection == '':
CurrentSection = ItemList[0]
else:
if CurrentSection != ItemList[0]:
EdkLogger.error("Parser", PARSER_ERROR, "Different section names '%s' and '%s' are found in one section definition, this is not allowed." % (CurrentSection, ItemList[0]), File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
if CurrentSection.upper() not in self.KeyList:
RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
CurrentSection = TAB_UNKNOWN
continue
ItemList.append('')
ItemList.append('')
if len(ItemList) > 5:
RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
else:
if ItemList[1] != '' and ItemList[1].upper() not in ARCH_LIST_FULL:
EdkLogger.error("Parser", PARSER_ERROR, "Invalid Arch definition '%s' found" % ItemList[1], File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
ArchList.append(ItemList[1].upper())
ThirdList.append(ItemList[2])
continue
#
# Not in any defined section
#
if CurrentSection == TAB_UNKNOWN:
ErrorMsg = "%s is not in any defined section" % Line
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
#
# Add a section item
#
SectionItemList.append([Line, LineNo])
# End of parse
#End of For
#
# Insert items data of last section
#
self.InsertSectionItemsIntoDatabase(self.FileID, Filename, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList)
#
# Parse conditional statements
#
self.ParseConditionalStatement()
#
# Replace all DEFINE macros with its actual values
#
#ParseDefineMacro2(self.TblDsc, self.RecordSet, GlobalData.gGlobalDefines)
ParseDefineMacro(self.TblDsc, GlobalData.gGlobalDefines)
## ParseConditionalStatement
#
# Search all conditional statement and disable no match records
#
def ParseConditionalStatement(self):
#
# Disabled all !if/!elif/!ifdef statements without DEFINE
#
SqlCommand = """select A.StartLine, A.EndLine from %s as A
where A.Model in (%s, %s, %s)
and A.Enabled = 0
and A.BelongsToFile = %s
and A.Value1 not in (select B.Value1 from %s as B
where B.Model = %s
and B.Enabled = 0
and A.StartLine > B.StartLine
and A.Arch = B.Arch
and A.BelongsToItem = B.BelongsToItem
and A.BelongsToFile = B.BelongsToFile) """ % \
(self.TblDsc.Table, \
MODEL_META_DATA_CONDITIONAL_STATEMENT_IF, MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF, \
self.FileID, \
self.TblDsc.Table, \
MODEL_META_DATA_DEFINE)
RecordSet = self.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[0], Record[1])
self.TblDsc.Exec(SqlCommand)
#
# Disabled !ifndef with DEFINE
#
SqlCommand = """select A.StartLine, A.EndLine from %s as A
where A.Model = %s
and A.Enabled = 0
and A.BelongsToFile = %s
and A.Value1 in (select B.Value1 from %s as B
where B.Model = %s
and B.Enabled = 0
and A.StartLine > B.StartLine
and A.Arch = B.Arch
and A.BelongsToItem = B.BelongsToItem
and A.BelongsToFile = B.BelongsToFile)""" % \
(self.TblDsc.Table, \
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF, \
self.FileID, \
self.TblDsc.Table, \
MODEL_META_DATA_DEFINE)
RecordSet = self.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[0], Record[1])
EdkLogger.debug(4, "SqlCommand: %s" % SqlCommand)
self.Cur.execute(SqlCommand)
#
# Disabled !if, !elif and !else with un-match value
#
SqlCommand = """select A.Model, A.Value1, A.Value2, A.Value3, A.StartLine, A.EndLine, B.Value2 from %s as A join %s as B
where A.Model in (%s, %s)
and A.Enabled = 0
and A.BelongsToFile = %s
and B.Enabled = 0
and B.Model = %s
and A.Value1 = B.Value1
and A.StartLine > B.StartLine
and A.BelongsToItem = B.BelongsToItem
and A.BelongsToFile = B.BelongsToFile""" % \
(self.TblDsc.Table, self.TblDsc.Table, \
MODEL_META_DATA_CONDITIONAL_STATEMENT_IF, MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE, \
self.FileID, MODEL_META_DATA_DEFINE)
RecordSet = self.TblDsc.Exec(SqlCommand)
DisabledList = []
for Record in RecordSet:
if Record[0] == MODEL_META_DATA_CONDITIONAL_STATEMENT_IF:
if not self.Compare(Record[6], Record[2], Record[3]):
SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[4], Record[5])
self.TblDsc.Exec(SqlCommand)
else:
DisabledList.append(Record[1])
continue
if Record[0] == MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE and Record[1] in DisabledList:
SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[4], Record[5])
self.TblDsc.Exec(SqlCommand)
## Compare
#
# Compare two values
# @param Value1:
# @param CompareType:
# @param Value2:
#
def Compare(self, Value1, CompareType, Value2):
Command = """Value1 %s Value2""" % CompareType
return eval(Command)
## First time to insert records to database
#
# Insert item data of a section to database
# @param FileID: The ID of belonging file
# @param Filename: The name of belonging file
# @param CurrentSection: The name of currect section
# @param SectionItemList: A list of items of the section
# @param ArchList: A list of arches
# @param ThirdList: A list of third parameters, ModuleType for LibraryClass and SkuId for Dynamic Pcds
# @param IfDefList: A list of all conditional statements
#
def InsertSectionItemsIntoDatabase(self, FileID, Filename, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList):
#
# Insert each item data of a section
#
for Index in range(0, len(ArchList)):
Arch = ArchList[Index]
Third = ThirdList[Index]
if Arch == '':
Arch = TAB_ARCH_COMMON.upper()
Model = Section[CurrentSection.upper()]
#Records = self.RecordSet[Model]
for SectionItem in SectionItemList:
BelongsToItem, EndLine, EndColumn = -1, -1, -1
LineValue, StartLine, EndLine = SectionItem[0], SectionItem[1], SectionItem[1]
EdkLogger.debug(4, "Parsing %s ..." % LineValue)
#
# Parse '!ifdef'
#
if LineValue.upper().find(TAB_IF_DEF.upper()) > -1:
IfDefList.append((LineValue[len(TAB_IF_N_DEF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF))
continue
#
# Parse '!ifndef'
#
if LineValue.upper().find(TAB_IF_N_DEF.upper()) > -1:
IfDefList.append((LineValue[len(TAB_IF_N_DEF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF))
continue
#
# Parse '!endif'
#
if LineValue.upper().find(TAB_END_IF.upper()) > -1:
self.InsertConditionalStatement(Filename, FileID, Model, IfDefList, StartLine, Arch)
continue
#
# Parse '!if'
#
if LineValue.upper().find(TAB_IF.upper()) > -1:
IfDefList.append((LineValue[len(TAB_IF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IF))
continue
#
# Parse '!elseif'
#
if LineValue.upper().find(TAB_ELSE_IF.upper()) > -1:
self.InsertConditionalStatement(Filename, FileID, Model, IfDefList, StartLine - 1, Arch)
IfDefList.append((LineValue[len(TAB_ELSE_IF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IF))
continue
#
# Parse '!else'
#
if LineValue.upper().find(TAB_ELSE.upper()) > -1:
Key = IfDefList[-1][0].split(' ' , 1)[0].strip()
self.InsertConditionalStatement(Filename, FileID, Model, IfDefList, StartLine, Arch)
IfDefList.append((Key, StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE))
continue
#
# Parse !include statement first
#
if LineValue.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1:
self.ParseInclude(LineValue, StartLine, self.TblDsc, FileID, Filename, CurrentSection, MODEL_META_DATA_INCLUDE, Arch)
continue
#
# And then parse DEFINE statement
#
if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
self.ParseDefine(LineValue, StartLine, self.TblDsc, FileID, Filename, CurrentSection, MODEL_META_DATA_DEFINE, Arch)
continue
#
# At last parse other sections
#
if CurrentSection == TAB_LIBRARY_CLASSES or CurrentSection in TAB_PCD_DYNAMIC_TYPE_LIST or CurrentSection in TAB_PCD_DYNAMIC_EX_TYPE_LIST:
ID = self.TblDsc.Insert(Model, LineValue, Third, '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
#Records.append([LineValue, Arch, StartLine, ID, Third])
continue
elif CurrentSection != TAB_COMPONENTS:
ID = self.TblDsc.Insert(Model, LineValue, '', '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
#Records.append([LineValue, Arch, StartLine, ID, Third])
continue
#
# Parse COMPONENT section
#
if CurrentSection == TAB_COMPONENTS:
Components = []
GetComponent(SectionItemList, Components)
for Component in Components:
EdkLogger.debug(4, "Parsing component %s ..." % Component)
DscItmeID = self.TblDsc.Insert(MODEL_META_DATA_COMPONENT, Component[0], '', '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
for Item in Component[1]:
List = GetSplitValueList(Item, MaxSplit=2)
LibName, LibIns = '', ''
if len(List) == 2:
LibName = List[0]
LibIns = List[1]
else:
LibName = List[0]
self.TblDsc.Insert(MODEL_EFI_LIBRARY_CLASS, LibName, LibIns, '', Arch, DscItmeID, FileID, StartLine, -1, StartLine, -1, 0)
for Item in Component[2]:
self.TblDsc.Insert(MODEL_META_DATA_BUILD_OPTION, Item, '', '', Arch, DscItmeID, FileID, StartLine, -1, StartLine, -1, 0)
for Item in Component[3]:
Model = Section[Item[0].upper()]
self.TblDsc.Insert(Model, Item[1], '', '', Arch, DscItmeID, FileID, StartLine, -1, StartLine, -1, 0)
## Show detailed information of Dsc
#
# Print all members and their values of Dsc class
#
def ShowDsc(self):
print TAB_SECTION_START + TAB_INF_DEFINES + TAB_SECTION_END
printDict(self.Defines.DefinesDictionary)
for Key in self.KeyList:
for Arch in DataType.ARCH_LIST_FULL:
Command = "printList(TAB_SECTION_START + '" + \
Key + DataType.TAB_SPLIT + Arch + \
"' + TAB_SECTION_END, self.Contents[arch]." + Key + ')'
eval(Command)
## Show detailed information of Platform
#
# Print all members and their values of Platform class
#
def ShowPlatform(self):
M = self.Platform
for Arch in M.Header.keys():
print '\nArch =', Arch
print 'Filename =', M.Header[Arch].FileName
print 'FullPath =', M.Header[Arch].FullPath
print 'BaseName =', M.Header[Arch].Name
print 'Guid =', M.Header[Arch].Guid
print 'Version =', M.Header[Arch].Version
print 'DscSpecification =', M.Header[Arch].DscSpecification
print 'SkuId =', M.Header[Arch].SkuIdName
print 'SupArchList =', M.Header[Arch].SupArchList
print 'BuildTargets =', M.Header[Arch].BuildTargets
print 'OutputDirectory =', M.Header[Arch].OutputDirectory
print 'BuildNumber =', M.Header[Arch].BuildNumber
print 'MakefileName =', M.Header[Arch].MakefileName
print 'BsBaseAddress =', M.Header[Arch].BsBaseAddress
print 'RtBaseAddress =', M.Header[Arch].RtBaseAddress
print 'Define =', M.Header[Arch].Define
print 'Fdf =', M.FlashDefinitionFile.FilePath
print '\nBuildOptions =', M.BuildOptions, M.BuildOptions.IncludeFiles
for Item in M.BuildOptions.BuildOptionList:
print '\t', 'ToolChainFamily =', Item.ToolChainFamily, 'ToolChain =', Item.ToolChain, 'Option =', Item.Option, 'Arch =', Item.SupArchList
print '\nSkuIds =', M.SkuInfos.SkuInfoList, M.SkuInfos.IncludeFiles
print '\nLibraries =', M.Libraries, M.Libraries.IncludeFiles
for Item in M.Libraries.LibraryList:
print '\t', Item.FilePath, Item.SupArchList, Item.Define
print '\nLibraryClasses =', M.LibraryClasses, M.LibraryClasses.IncludeFiles
for Item in M.LibraryClasses.LibraryList:
print '\t', Item.Name, Item.FilePath, Item.SupModuleList, Item.SupArchList, Item.Define
print '\nPcds =', M.DynamicPcdBuildDefinitions
for Item in M.DynamicPcdBuildDefinitions:
print '\tCname=', Item.CName, 'TSG=', Item.TokenSpaceGuidCName, 'Value=', Item.DefaultValue, 'Token=', Item.Token, 'Type=', Item.ItemType, 'Datum=', Item.DatumType, 'Size=', Item.MaxDatumSize, 'Arch=', Item.SupArchList, Item.SkuInfoList
for Sku in Item.SkuInfoList.values():
print '\t\t', str(Sku)
print '\nComponents =', M.Modules.ModuleList, M.Modules.IncludeFiles
for Item in M.Modules.ModuleList:
print '\t', Item.FilePath, Item.ExecFilePath, Item.SupArchList
for Lib in Item.LibraryClasses.LibraryList:
print '\t\tLib:', Lib.Name, Lib.FilePath
for Bo in Item.ModuleSaBuildOption.BuildOptionList:
print '\t\tBuildOption:', Bo.ToolChainFamily, Bo.ToolChain, Bo.Option
for Pcd in Item.PcdBuildDefinitions:
print '\t\tPcd:', Pcd.CName, Pcd.TokenSpaceGuidCName, Pcd.MaxDatumSize, Pcd.DefaultValue, Pcd.ItemType
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
EdkLogger.Initialize()
EdkLogger.SetLevel(EdkLogger.DEBUG_0)
W = os.getenv('WORKSPACE')
F = os.path.join(W, 'Nt32Pkg/Nt32Pkg.dsc')
Db = Database.Database('Dsc.db')
Db.InitDatabase()
P = Dsc(os.path.normpath(F), True, True, W, Db)
P.ShowPlatform()
Db.Close()
|
[] |
[] |
[
"WORKSPACE"
] |
[]
|
["WORKSPACE"]
|
python
| 1 | 0 | |
crawler.py
|
import os
import sys
import requests
# StationID variable, find via /locations query endpoint.
# See https://github.com/derhuerst/bvg-rest/blob/master/docs/index.md#get-locations
STATIONID = os.getenv('CRAWLER_STATIONID', '900000230003')
API_DEPARTURES_ENDPOINT = f'https://1.bvg.transport.rest/stations/{STATIONID}/departures'
def crawl(output_fd):
'''Tries to crawl the api and log S-Bahn's (productCode=0) to filename.'''
try:
r = requests.get(API_DEPARTURES_ENDPOINT)
if r.status_code != 200:
raise requests.HTTPError()
doc = r.json()
for trip in doc:
"""Only log if productCode = 0 (suburban)"""
"""Change this filter accordingly!"""
if trip['line']['productCode'] != 0:
continue
"""Output in format
tripid,departure_timestamp,linename,direction,delay"""
output_fd.write(f'\"{trip["tripId"]}\",{trip["when"]},{trip["line"]["name"]},\"{trip["direction"]}\",{trip["delay"]}\n')
except requests.ConnectionError:
sys.stderr.write('ConnectionError connecting to the API\n')
sys.exit
except requests.HTTPError:
sys.stderr.write(f'Invalid response from API. ({r.status_code})\n')
except ValueError:
sys.stderr.write('API seems to have responded no valid JSON.\n')
if __name__ == '__main__':
out = sys.stdout
csvheader = "tripid,departure_timestamp,linename,direction,delay\n"
if len(sys.argv) > 1:
if os.path.exists(sys.argv[1]):
"""No header if file already exists."""
csvheader = ""
out = open(sys.argv[1], 'a')
out.write(csvheader)
crawl(out)
out.close()
|
[] |
[] |
[
"CRAWLER_STATIONID"
] |
[]
|
["CRAWLER_STATIONID"]
|
python
| 1 | 0 | |
apps/app-library/src/main/java/io/fabric8/app/library/support/KubernetesService.java
|
/**
* Copyright 2005-2015 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package io.fabric8.app.library.support;
import io.hawt.git.GitFacade;
import io.hawt.util.Files;
import io.hawt.util.Function;
import io.hawt.util.IOHelper;
import io.hawt.util.MBeanSupport;
import io.hawt.util.Strings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.EntityTag;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.regex.Pattern;
import static io.hawt.util.Strings.isBlank;
/**
* Helper MBean to expose the <a href="http://kubernetes.io/">Kubernetes</a> REST API
*/
public class KubernetesService extends MBeanSupport implements KubernetesServiceMXBean {
public static final String DEFAULT_DOCKER_HOST = "tcp://localhost:2375";
private static final transient Logger LOG = LoggerFactory.getLogger(KubernetesService.class);
private GitFacade git;
public void init() throws Exception {
if (System.getenv("KUBERNETES_SERVICE_HOST") != null || System.getenv("KUBERNETES_MASTER") != null) {
super.init();
}
}
@Override
public void destroy() throws Exception {
super.destroy();
}
@Override
public String getDockerIp() {
String url = resolveDockerHost();
int idx = url.indexOf("://");
if (idx > 0) {
url = url.substring(idx + 3);
}
idx = url.indexOf(":");
if (idx > 0) {
url = url.substring(0, idx);
}
return url;
}
@Override
protected String getDefaultObjectName() {
return "io.fabric8:type=Kubernetes";
}
@Override
public String getKubernetesAddress() {
// First let's check if it's available as a kubernetes service like it should be...
String address = System.getenv("KUBERNETES_MASTER");
String username = System.getenv("KUBERNETES_USERNAME");
String password = System.getenv("KUBERNETES_PASSWORD");
if (Strings.isNotBlank(username) && Strings.isNotBlank(password)) {
address = address.replaceFirst("://", "://" + username + ":" + password + "@");
}
return address;
}
@Override
public String getHostName() {
String answer = System.getenv("HOSTNAME");
if (Strings.isBlank(answer)) {
try {
answer = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
LOG.warn("Could not look up local host name: " + e, e);
}
}
return answer;
}
public GitFacade getGit() {
if (git == null) {
LOG.info("No GitFacade injected! Defaulting to the singleton");
git = GitFacade.getSingleton();
}
return git;
}
public void setGit(GitFacade git) {
this.git = git;
}
@Override
public String iconPath(final String branch, final String kubernetesId) throws Exception {
GitFacade facade = getGit();
return facade.readFile(branch, "/", new Function<File, String>() {
@Override
public String apply(File rootFolder) {
return doFindIconPath(rootFolder, kubernetesId);
}
});
}
@Override
public String appPath(final String branch, final String kubernetesId) throws Exception {
GitFacade facade = getGit();
return facade.readFile(branch, "/", new Function<File, String>() {
@Override
public String apply(File rootFolder) {
File file = findAppFolder(rootFolder, kubernetesId);
if (file != null) {
return relativePath(rootFolder, file);
} else {
return null;
}
}
});
}
public Response findAppsWithETags(final String branch, final Request request) throws Exception {
final GitFacade facade = getGit();
return facade.readFile(branch, "/", new Function<File, Response>() {
@Override
public Response apply(File rootFolder) {
String head = facade.getHEAD();
EntityTag etag = new EntityTag(head);
Response.ResponseBuilder builder = request.evaluatePreconditions(etag);
// only query the data if its changed
if (builder == null) {
List<AppDTO> answer = new ArrayList<AppDTO>();
doAddApps(rootFolder, rootFolder, answer);
builder = Response.ok(answer);
builder.tag(etag);
}
return builder.build();
}
});
}
@Override
public List<AppDTO> findApps(final String branch) throws Exception {
GitFacade facade = getGit();
return facade.readFile(branch, "/", new Function<File, List<AppDTO>>() {
@Override
public List<AppDTO> apply(File rootFolder) {
List<AppDTO> answer = new ArrayList<AppDTO>();
doAddApps(rootFolder, rootFolder, answer);
return answer;
}
});
}
protected String doFindIconPath(File rootFolder, String kubernetesId) {
File appFolder = findAppFolder(rootFolder, kubernetesId);
return doFindAppIconPath(rootFolder, appFolder);
}
protected String doFindAppIconPath(File rootFolder, File appFolder) {
if (appFolder != null) {
File[] files = appFolder.listFiles();
if (files != null) {
for (File file : files) {
if (isIconFile(file)) {
return relativePath(rootFolder, file);
}
}
}
}
return null;
}
public static boolean isIconFile(File file) {
String name = file.getName();
return name.startsWith("icon.") &&
(name.endsWith(".svg") || name.endsWith(".png") || name.endsWith(".gif") || name.endsWith(".jpg") || name.endsWith(".jpeg") || name.endsWith(".pdf"));
}
protected static String relativePath(File rootFolder, File file) {
try {
return Files.getRelativePath(rootFolder, file);
} catch (IOException e) {
LOG.warn("failed to get relative folder of " + file.getAbsolutePath() + ". " + e, e);
return null;
}
}
protected File findAppFolder(File fileOrDirectory, String kubernetesId) {
Pattern pattern = createKubernetesIdPattern(kubernetesId);
return findAppFolder(fileOrDirectory, pattern);
}
public static Pattern createKubernetesIdPattern(String kubernetesId) {
String regex = "\"id\"\\s*:\\s*\"" + kubernetesId + "\"";
Pattern answer = Pattern.compile(regex);
if (LOG.isDebugEnabled()) {
LOG.debug("Finding kubernetes id via regex " + answer);
}
return answer;
}
protected void doAddApps(File rootFolder, File fileOrDirectory, List<AppDTO> apps) {
if (fileOrDirectory != null && fileOrDirectory.exists()) {
if (fileOrDirectory.isFile()) {
if (isKubernetesMetadataFile(fileOrDirectory)) {
AppDTO app = null;
try {
app = createAppDto(rootFolder, fileOrDirectory);
} catch (IOException e) {
LOG.warn("Failed to create AppDTO for folder " + fileOrDirectory, e);
}
if (app != null) {
apps.add(app);
}
}
} else if (fileOrDirectory.isDirectory()) {
File[] files = fileOrDirectory.listFiles();
if (files != null) {
for (File file : files) {
doAddApps(rootFolder, file, apps);
}
}
}
}
}
protected AppDTO createAppDto(File rootFolder, File kubeFile) throws IOException {
File appFolder = kubeFile.getParentFile();
String appPath = relativePath(rootFolder, appFolder);
String kubePath = relativePath(rootFolder, kubeFile);
String iconPath = doFindAppIconPath(rootFolder, appFolder);
Properties properties = new Properties();
File propertiesFile = new File(appFolder, "fabric8.properties");
if (propertiesFile.exists() && propertiesFile.isFile()) {
try {
properties.load(new FileInputStream(propertiesFile));
} catch (Exception e) {
LOG.warn("Failed to load fabric8 properties file " + propertiesFile + ". " + e, e);
}
}
String name = properties.getProperty("name", appFolder.getName());
String description = properties.getProperty("description");
String version = properties.getProperty("version");
String groupId = properties.getProperty("groupId");
String artifactId = properties.getProperty("artifactId");
KubernetesNames names = KubernetesNames.loadFile(kubeFile);
return new AppDTO(appPath, iconPath, name, description, kubePath, version, groupId, artifactId, names);
}
protected File findAppFolder(File fileOrDirectory, Pattern pattern) {
if (fileOrDirectory != null && fileOrDirectory.exists()) {
if (fileOrDirectory.isFile()) {
if (isKubernetesMetadataFile(fileOrDirectory)) {
if (fileTextMatchesPattern(fileOrDirectory, pattern)) {
return fileOrDirectory.getParentFile();
}
}
} else if (fileOrDirectory.isDirectory()) {
File[] files = fileOrDirectory.listFiles();
if (files != null) {
for (File file : files) {
File answer = findAppFolder(file, pattern);
if (answer != null) {
return answer;
}
}
}
}
}
return null;
}
public static boolean isKubernetesMetadataFile(File fileOrDirectory) {
String name = fileOrDirectory.getName();
return name.equals("kubernetes.json") || name.equals("kubernetes.yml") || name.equals("kubernetes.yaml");
}
/**
* Returns true if the text of the given file matches the regex
*/
public static boolean fileTextMatchesPattern(File file, Pattern pattern) {
try {
String text = IOHelper.readFully(file);
return pattern.matcher(text).find();
} catch (IOException e) {
LOG.warn("Could not load file " + file.getAbsolutePath() + ". " + e, e);
return false;
}
}
public static String resolveHttpDockerHost() {
String dockerHost = resolveDockerHost();
if (dockerHost.startsWith("tcp:")) {
return "http:" + dockerHost.substring(4);
}
return dockerHost;
}
public static String resolveDockerHost() {
String dockerHost = System.getenv("DOCKER_HOST");
if (isBlank(dockerHost)) {
dockerHost = System.getProperty("docker.host");
}
if (!isBlank(dockerHost)) {
return dockerHost;
}
return DEFAULT_DOCKER_HOST;
}
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_MASTER\"",
"\"KUBERNETES_MASTER\"",
"\"KUBERNETES_USERNAME\"",
"\"KUBERNETES_PASSWORD\"",
"\"HOSTNAME\"",
"\"DOCKER_HOST\""
] |
[] |
[
"KUBERNETES_MASTER",
"DOCKER_HOST",
"KUBERNETES_SERVICE_HOST",
"HOSTNAME",
"KUBERNETES_PASSWORD",
"KUBERNETES_USERNAME"
] |
[]
|
["KUBERNETES_MASTER", "DOCKER_HOST", "KUBERNETES_SERVICE_HOST", "HOSTNAME", "KUBERNETES_PASSWORD", "KUBERNETES_USERNAME"]
|
java
| 6 | 0 | |
models/migrations/migrations_test.go
|
// Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package migrations
import (
"context"
"database/sql"
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"github.com/stretchr/testify/assert"
"github.com/unknwon/com"
"xorm.io/xorm"
"xorm.io/xorm/names"
)
func TestMain(m *testing.M) {
giteaRoot := base.SetupGiteaRoot()
if giteaRoot == "" {
fmt.Println("Environment variable $GITEA_ROOT not set")
os.Exit(1)
}
giteaBinary := "gitea"
if runtime.GOOS == "windows" {
giteaBinary += ".exe"
}
setting.AppPath = path.Join(giteaRoot, giteaBinary)
if _, err := os.Stat(setting.AppPath); err != nil {
fmt.Printf("Could not find gitea binary at %s\n", setting.AppPath)
os.Exit(1)
}
giteaConf := os.Getenv("GITEA_CONF")
if giteaConf == "" {
giteaConf = path.Join(filepath.Dir(setting.AppPath), "integrations/sqlite.ini")
fmt.Printf("Environment variable $GITEA_CONF not set - defaulting to %s\n", giteaConf)
}
if !path.IsAbs(giteaConf) {
setting.CustomConf = path.Join(giteaRoot, giteaConf)
} else {
setting.CustomConf = giteaConf
}
tmpDataPath, err := os.MkdirTemp("", "data")
if err != nil {
fmt.Printf("Unable to create temporary data path %v\n", err)
os.Exit(1)
}
setting.AppDataPath = tmpDataPath
setting.SetCustomPathAndConf("", "", "")
setting.LoadForTest()
git.CheckLFSVersion()
setting.InitDBConfig()
setting.NewLogServices(true)
exitStatus := m.Run()
if err := removeAllWithRetry(setting.RepoRootPath); err != nil {
fmt.Fprintf(os.Stderr, "os.RemoveAll: %v\n", err)
}
if err := removeAllWithRetry(tmpDataPath); err != nil {
fmt.Fprintf(os.Stderr, "os.RemoveAll: %v\n", err)
}
os.Exit(exitStatus)
}
func removeAllWithRetry(dir string) error {
var err error
for i := 0; i < 20; i++ {
err = os.RemoveAll(dir)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return err
}
func newXORMEngine() (*xorm.Engine, error) {
if err := db.InitEngine(context.Background()); err != nil {
return nil, err
}
x := unittest.GetXORMEngine()
return x, nil
}
func deleteDB() error {
switch {
case setting.Database.UseSQLite3:
if err := util.Remove(setting.Database.Path); err != nil {
return err
}
return os.MkdirAll(path.Dir(setting.Database.Path), os.ModePerm)
case setting.Database.UseMySQL:
db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/",
setting.Database.User, setting.Database.Passwd, setting.Database.Host))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", setting.Database.Name)); err != nil {
return err
}
if _, err = db.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", setting.Database.Name)); err != nil {
return err
}
return nil
case setting.Database.UsePostgreSQL:
db, err := sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.SSLMode))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", setting.Database.Name)); err != nil {
return err
}
if _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", setting.Database.Name)); err != nil {
return err
}
db.Close()
// Check if we need to setup a specific schema
if len(setting.Database.Schema) != 0 {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
if err != nil {
return err
}
defer db.Close()
schrows, err := db.Query(fmt.Sprintf("SELECT 1 FROM information_schema.schemata WHERE schema_name = '%s'", setting.Database.Schema))
if err != nil {
return err
}
defer schrows.Close()
if !schrows.Next() {
// Create and setup a DB schema
_, err = db.Exec(fmt.Sprintf("CREATE SCHEMA %s", setting.Database.Schema))
if err != nil {
return err
}
}
// Make the user's default search path the created schema; this will affect new connections
_, err = db.Exec(fmt.Sprintf(`ALTER USER "%s" SET search_path = %s`, setting.Database.User, setting.Database.Schema))
if err != nil {
return err
}
return nil
}
case setting.Database.UseMSSQL:
host, port := setting.ParseMSSQLHostPort(setting.Database.Host)
db, err := sql.Open("mssql", fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;",
host, port, "master", setting.Database.User, setting.Database.Passwd))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS [%s]", setting.Database.Name)); err != nil {
return err
}
if _, err = db.Exec(fmt.Sprintf("CREATE DATABASE [%s]", setting.Database.Name)); err != nil {
return err
}
}
return nil
}
// prepareTestEnv prepares the test environment and reset the database. The skip parameter should usually be 0.
// Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from.
//
// fixtures in `models/migrations/fixtures/<TestName>` will be loaded automatically
func prepareTestEnv(t *testing.T, skip int, syncModels ...interface{}) (*xorm.Engine, func()) {
t.Helper()
ourSkip := 2
ourSkip += skip
deferFn := PrintCurrentTest(t, ourSkip)
assert.NoError(t, os.RemoveAll(setting.RepoRootPath))
assert.NoError(t, com.CopyDir(path.Join(filepath.Dir(setting.AppPath), "integrations/gitea-repositories-meta"),
setting.RepoRootPath))
ownerDirs, err := os.ReadDir(setting.RepoRootPath)
if err != nil {
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
}
for _, ownerDir := range ownerDirs {
if !ownerDir.Type().IsDir() {
continue
}
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
if err != nil {
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
}
for _, repoDir := range repoDirs {
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
}
}
if err := deleteDB(); err != nil {
t.Errorf("unable to reset database: %v", err)
return nil, deferFn
}
x, err := newXORMEngine()
assert.NoError(t, err)
if x != nil {
oldDefer := deferFn
deferFn = func() {
oldDefer()
if err := x.Close(); err != nil {
t.Errorf("error during close: %v", err)
}
if err := deleteDB(); err != nil {
t.Errorf("unable to reset database: %v", err)
}
}
}
if err != nil {
return x, deferFn
}
if len(syncModels) > 0 {
if err := x.Sync2(syncModels...); err != nil {
t.Errorf("error during sync: %v", err)
return x, deferFn
}
}
fixturesDir := filepath.Join(filepath.Dir(setting.AppPath), "models", "migrations", "fixtures", t.Name())
if _, err := os.Stat(fixturesDir); err == nil {
t.Logf("initializing fixtures from: %s", fixturesDir)
if err := unittest.InitFixtures(
unittest.FixturesOptions{
Dir: fixturesDir,
}, x); err != nil {
t.Errorf("error whilst initializing fixtures from %s: %v", fixturesDir, err)
return x, deferFn
}
if err := unittest.LoadFixtures(x); err != nil {
t.Errorf("error whilst loading fixtures from %s: %v", fixturesDir, err)
return x, deferFn
}
} else if !os.IsNotExist(err) {
t.Errorf("unexpected error whilst checking for existence of fixtures: %v", err)
} else {
t.Logf("no fixtures found in: %s", fixturesDir)
}
return x, deferFn
}
func Test_dropTableColumns(t *testing.T) {
x, deferable := prepareTestEnv(t, 0)
if x == nil || t.Failed() {
defer deferable()
return
}
defer deferable()
type DropTest struct {
ID int64 `xorm:"pk autoincr"`
FirstColumn string
ToDropColumn string `xorm:"unique"`
AnotherColumn int64
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}
columns := []string{
"first_column",
"to_drop_column",
"another_column",
"created_unix",
"updated_unix",
}
for i := range columns {
x.SetMapper(names.GonicMapper{})
if err := x.Sync2(new(DropTest)); err != nil {
t.Errorf("unable to create DropTest table: %v", err)
return
}
sess := x.NewSession()
if err := sess.Begin(); err != nil {
sess.Close()
t.Errorf("unable to begin transaction: %v", err)
return
}
if err := dropTableColumns(sess, "drop_test", columns[i:]...); err != nil {
sess.Close()
t.Errorf("Unable to drop columns[%d:]: %s from drop_test: %v", i, columns[i:], err)
return
}
if err := sess.Commit(); err != nil {
sess.Close()
t.Errorf("unable to commit transaction: %v", err)
return
}
sess.Close()
if err := x.DropTables(new(DropTest)); err != nil {
t.Errorf("unable to drop table: %v", err)
return
}
for j := range columns[i+1:] {
x.SetMapper(names.GonicMapper{})
if err := x.Sync2(new(DropTest)); err != nil {
t.Errorf("unable to create DropTest table: %v", err)
return
}
dropcols := append([]string{columns[i]}, columns[j+i+1:]...)
sess := x.NewSession()
if err := sess.Begin(); err != nil {
sess.Close()
t.Errorf("unable to begin transaction: %v", err)
return
}
if err := dropTableColumns(sess, "drop_test", dropcols...); err != nil {
sess.Close()
t.Errorf("Unable to drop columns: %s from drop_test: %v", dropcols, err)
return
}
if err := sess.Commit(); err != nil {
sess.Close()
t.Errorf("unable to commit transaction: %v", err)
return
}
sess.Close()
if err := x.DropTables(new(DropTest)); err != nil {
t.Errorf("unable to drop table: %v", err)
return
}
}
}
}
|
[
"\"GITEA_CONF\""
] |
[] |
[
"GITEA_CONF"
] |
[]
|
["GITEA_CONF"]
|
go
| 1 | 0 | |
moni-moni/server/server/apps/users/authentication/views.py
|
from .serializers import (
ResetPasswordEmailRequestSerializer,
RegisterSerializer,
LoginSerializer,
LogoutSerializer,
RegisterSerializer,
SetNewPasswordSerializer,
ResetPasswordEmailRequestSerializer,
RefreshTokenSerializer
)
from rest_framework import status
from rest_framework import generics, permissions
from rest_framework.response import Response
from rest_framework import generics, status, permissions
from rest_framework_simplejwt.views import TokenRefreshView
from django.conf.global_settings import AUTH_USER_MODEL as User
from django.utils.encoding import force_str
from django.utils.http import urlsafe_base64_decode
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.http import HttpResponsePermanentRedirect
from django.utils.encoding import (
force_str,
DjangoUnicodeDecodeError,
)
from .utils import TokenGenerator, Email
from server.apps.users.models import CustomUser as User
import os
class CustomRedirect(HttpResponsePermanentRedirect):
allowed_schemes = [os.environ.get("APP_SCHEME"), "http", "https"]
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
token = user.tokens()
send_email = Email.from_user(request, user)
send_email.start()
return Response(token)
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
tokens = user.tokens()
access_token = {"token": tokens["access"]}
response = Response(access_token, status=status.HTTP_200_OK)
response.set_cookie('x-refresh-token', tokens["refresh"])
return response
class LogoutAPI(generics.GenericAPIView):
serializer_class = LogoutSerializer
permission_classes = [
permissions.IsAuthenticated,
]
def post(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class ActivateAccountView(generics.GenericAPIView):
def get(self, request, uidb64, token):
try:
uid = force_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except Exception as identifier:
user = None
generate_token = TokenGenerator()
if user is not None and generate_token.check_token(user, token):
user.is_verified = True
user.save()
return Response(
{"msg": "Account activated successfully.", "status": status.HTTP_200_OK}
)
return Response(
{
"msg": "Account activation failed.",
"status": status.HTTP_401_UNAUTHORIZED,
}
)
class RequestPasswordResetEmail(generics.GenericAPIView):
serializer_class = ResetPasswordEmailRequestSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
email = request.data.get("email", "")
if User.objects.filter(email=email).exists():
user = User.objects.get(email=email)
send_email = Email.password_reset(request, user)
send_email.start()
return Response(
{"success": "We have sent you a link to reset your password"},
status=status.HTTP_200_OK,
)
class PasswordTokenCheckAPI(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def get(self, request, uidb64, token):
redirect_url = request.GET.get("redirect_url")
try:
id = force_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
generate_token = TokenGenerator()
if user is not None and generate_token.check_token(user, token):
if len(redirect_url) > 3:
return CustomRedirect(redirect_url + "?token_valid=False")
else:
return CustomRedirect(
os.environ.get("FRONTEND_URL") + "?token_valid=False"
)
if redirect_url and len(redirect_url) > 3:
return CustomRedirect(
redirect_url
+ "?token_valid=True&message=Credentials Valid&uidb64="
+ uidb64
+ "&token="
+ token
)
else:
return CustomRedirect(
os.environ.get("FRONTEND_URL") + "?token_valid=False"
)
except DjangoUnicodeDecodeError as identifier:
try:
if not PasswordResetTokenGenerator().check_token(user):
return CustomRedirect(redirect_url + "?token_valid=False")
except UnboundLocalError as e:
return Response(
{"error": "Token is not valid, please request a new one"},
status=status.HTTP_400_BAD_REQUEST,
)
class SetNewPasswordAPIView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response(
{"success": True, "message": "Password reset success"},
status=status.HTTP_200_OK,
)
class RefreshTokenView(TokenRefreshView):
serializer_class = RefreshTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data, context={'refresh': request.COOKIES.get("x-refresh-token")})
serializer.is_valid(raise_exception=True)
access_token = {"token": serializer.validated_data["access"]}
response = Response(access_token, status=status.HTTP_200_OK)
response.set_cookie('x-refresh-token', serializer.validated_data["refresh"])
return response
|
[] |
[] |
[
"APP_SCHEME",
"FRONTEND_URL"
] |
[]
|
["APP_SCHEME", "FRONTEND_URL"]
|
python
| 2 | 0 | |
go/demo/test/cpu_test.go
|
package test
import (
"fmt"
"log"
"os"
"runtime"
"testing"
"time"
"github.com/shirou/gopsutil/cpu"
"github.com/stretchr/testify/assert"
)
func testCPUPercent(percpu bool) {
numcpu := runtime.NumCPU()
testCount := 3
if runtime.GOOS != "windows" {
testCount = 100
v, err := cpu.Percent(time.Millisecond, percpu)
if err != nil {
log.Fatalf("error %v", err)
}
// Skip CircleCI which CPU num is different
if os.Getenv("CIRCLECI") != "true" {
if (percpu && len(v) != numcpu) || (!percpu && len(v) != 1) {
log.Fatalf("wrong number of entries from CPUPercent: %v", v)
}
}
}
for i := 0; i < testCount; i++ {
duration := time.Duration(10) * time.Microsecond
v, err := cpu.Percent(duration, percpu)
if err != nil {
log.Fatalf("error %v", err)
}
for _, percent := range v {
// Check for slightly greater then 100% to account for any rounding issues.
if percent < 0.0 || percent > 100.0001*float64(numcpu) {
log.Fatalf("CPUPercent value is invalid: %f", percent)
}
log.Printf("CPUPercent value is invalid: %f", percent)
}
}
}
func TestCPUPercent(t *testing.T) {
testCPUPercent(false)
}
func TestCPUPercentPerCpu(t *testing.T) {
testCPUPercent(true)
}
func TestCpu_times(t *testing.T) {
v, err := cpu.Times(false)
if err != nil {
t.Errorf("error %v", err)
}
if len(v) == 0 {
t.Error("could not get CPUs ", err)
}
empty := cpu.TimesStat{}
for _, vv := range v {
if vv == empty {
t.Errorf("could not get CPU User: %v", vv)
}
log.Printf("TimesStat value is: %v", vv)
}
// test sum of per cpu stats is within margin of error for cpu total stats
cpuTotal, err := cpu.Times(false)
if err != nil {
t.Errorf("error %v", err)
}
if len(cpuTotal) == 0 {
t.Error("could not get CPUs ", err)
}
perCPU, err := cpu.Times(true)
if err != nil {
t.Errorf("error %v", err)
}
if len(perCPU) == 0 {
t.Error("could not get CPUs ", err)
}
var perCPUUserTimeSum float64
var perCPUSystemTimeSum float64
var perCPUIdleTimeSum float64
for _, pc := range perCPU {
perCPUUserTimeSum += pc.User
perCPUSystemTimeSum += pc.System
perCPUIdleTimeSum += pc.Idle
}
margin := 2.0
assert.InEpsilon(t, cpuTotal[0].User, perCPUUserTimeSum, margin)
assert.InEpsilon(t, cpuTotal[0].System, perCPUSystemTimeSum, margin)
assert.InEpsilon(t, cpuTotal[0].Idle, perCPUIdleTimeSum, margin)
log.Println(cpuTotal[0].User, perCPUUserTimeSum, margin)
log.Println(cpuTotal[0].System, perCPUSystemTimeSum, margin)
log.Println(cpuTotal[0].Idle, perCPUIdleTimeSum, margin)
result := float32(cpuTotal[0].User+cpuTotal[0].System) / float32(cpuTotal[0].User+cpuTotal[0].System+cpuTotal[0].Idle)
log.Println(result)
}
func TestCpu_counts(t *testing.T) {
numcpu := runtime.NumCPU()
v, err := cpu.Counts(true)
if err != nil {
t.Errorf("error %v", err)
}
if v == 0 {
t.Errorf("could not get CPU counts: %v", v)
}
log.Printf("cpuCount value is : %v-%v", v, numcpu)
}
func TestCPUTimeStat_String(t *testing.T) {
v := cpu.TimesStat{
CPU: "cpu0",
User: 100.1,
System: 200.1,
Idle: 300.1,
}
e := `{"cpu":"cpu0","user":100.1,"system":200.1,"idle":300.1,"nice":0.0,"iowait":0.0,"irq":0.0,"softirq":0.0,"steal":0.0,"guest":0.0,"guestNice":0.0}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("CPUTimesStat string is invalid: %v", v)
}
log.Printf("cpu TimesStat value is : %v", v)
}
func TestCpuInfo(t *testing.T) {
v, err := cpu.Info()
if err != nil {
t.Errorf("error %v", err)
}
if len(v) == 0 {
t.Errorf("could not get CPU Info")
}
for _, vv := range v {
if vv.ModelName == "" {
t.Errorf("could not get CPU Info: %v", vv)
}
log.Printf("cpu Info value is : %+v", vv)
}
}
|
[
"\"CIRCLECI\""
] |
[] |
[
"CIRCLECI"
] |
[]
|
["CIRCLECI"]
|
go
| 1 | 0 | |
cmd/microservice/main.go
|
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"time"
"example-helm-go-microservice/pkg/pet"
"github.com/jackc/pgx/v4"
)
func main() {
// Open up our database connection.
config, err := pgx.ParseConfig("postgres://host:5432/database?sslmode=disable")
if err != nil {
log.Fatal(err)
}
config.Host = os.Getenv("POSTGRES_HOST")
config.Database = os.Getenv("POSTGRES_DATABASE")
config.User = os.Getenv("POSTGRES_USER")
config.Password = "mysecretpassword"
config.LogLevel = pgx.LogLevelTrace
conn, err := pgx.ConnectConfig(context.Background(), config)
if err != nil {
log.Fatal(err)
}
//defer the close till after the main function has finished executing
defer conn.Close(context.Background())
var greeting string
//
conn.QueryRow(context.Background(), "select 1").Scan(&greeting)
fmt.Println(greeting)
// os interrupt
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt)
// server
server := http.Server{
Addr: "0.0.0.0:8080",
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
}
http.HandleFunc("/", pet.Handler)
go func() {
log.Printf("listening on http://%s", server.Addr)
log.Printf(os.Getenv("SERVICE"))
if err := server.ListenAndServe(); err != nil {
log.Fatal(err)
}
}()
<-stop
err = server.Shutdown(context.Background())
if err != nil {
log.Println(err)
}
}
|
[
"\"POSTGRES_HOST\"",
"\"POSTGRES_DATABASE\"",
"\"POSTGRES_USER\"",
"\"SERVICE\""
] |
[] |
[
"POSTGRES_DATABASE",
"POSTGRES_HOST",
"POSTGRES_USER",
"SERVICE"
] |
[]
|
["POSTGRES_DATABASE", "POSTGRES_HOST", "POSTGRES_USER", "SERVICE"]
|
go
| 4 | 0 | |
rasterio/env.py
|
"""Rasterio's GDAL/AWS environment"""
import attr
from functools import wraps, total_ordering
from inspect import getfullargspec as getargspec
import logging
import os
import re
import threading
import warnings
import rasterio._loading
with rasterio._loading.add_gdal_dll_directories():
from rasterio._env import (
GDALEnv, get_gdal_config, set_gdal_config,
GDALDataFinder, PROJDataFinder, set_proj_data_search_path)
from rasterio.errors import (
EnvError, GDALVersionError, RasterioDeprecationWarning)
from rasterio.session import Session, DummySession
class ThreadEnv(threading.local):
def __init__(self):
self._env = None # Initialises in each thread
# When the outermost 'rasterio.Env()' executes '__enter__' it
# probes the GDAL environment to see if any of the supplied
# config options already exist, the assumption being that they
# were set with 'osgeo.gdal.SetConfigOption()' or possibly
# 'rasterio.env.set_gdal_config()'. The discovered options are
# reinstated when the outermost Rasterio environment exits.
# Without this check any environment options that are present in
# the GDAL environment and are also passed to 'rasterio.Env()'
# will be unset when 'rasterio.Env()' tears down, regardless of
# their value. For example:
#
# from osgeo import gdal import rasterio
#
# gdal.SetConfigOption('key', 'value') with
# rasterio.Env(key='something'): pass
#
# The config option 'key' would be unset when 'Env()' exits.
# A more comprehensive solution would also leverage
# https://trac.osgeo.org/gdal/changeset/37273 but this gets
# Rasterio + older versions of GDAL halfway there. One major
# assumption is that environment variables are not set directly
# with 'osgeo.gdal.SetConfigOption()' OR
# 'rasterio.env.set_gdal_config()' inside of a 'rasterio.Env()'.
self._discovered_options = None
local = ThreadEnv()
log = logging.getLogger(__name__)
class Env:
"""Abstraction for GDAL and AWS configuration
The GDAL library is stateful: it has a registry of format drivers,
an error stack, and dozens of configuration options.
Rasterio's approach to working with GDAL is to wrap all the state
up using a Python context manager (see PEP 343,
https://www.python.org/dev/peps/pep-0343/). When the context is
entered GDAL drivers are registered, error handlers are
configured, and configuration options are set. When the context
is exited, drivers are removed from the registry and other
configurations are removed.
Example
-------
.. code-block:: python
with rasterio.Env(GDAL_CACHEMAX=128000000) as env:
# All drivers are registered, GDAL's raster block cache
# size is set to 128 MB.
# Commence processing...
...
# End of processing.
# At this point, configuration options are set to their
# previous (possible unset) values.
A boto3 session or boto3 session constructor arguments
`aws_access_key_id`, `aws_secret_access_key`, `aws_session_token`
may be passed to Env's constructor. In the latter case, a session
will be created as soon as needed. AWS credentials are configured
for GDAL as needed.
"""
@classmethod
def default_options(cls):
"""Default configuration options
Parameters
----------
None
Returns
-------
dict
"""
return {
'GTIFF_IMPLICIT_JPEG_OVR': False,
"RASTERIO_ENV": True
}
def __init__(self, session=None, aws_unsigned=False, profile_name=None,
session_class=Session.aws_or_dummy, **options):
"""Create a new GDAL/AWS environment.
Note: this class is a context manager. GDAL isn't configured
until the context is entered via `with rasterio.Env():`
Parameters
----------
session : optional
A Session object.
aws_unsigned : bool, optional
Do not sign cloud requests.
profile_name : str, optional
A shared credentials profile name, as per boto3.
session_class : Session, optional
A sub-class of Session.
**options : optional
A mapping of GDAL configuration options, e.g.,
`CPL_DEBUG=True, CHECK_WITH_INVERT_PROJ=False`.
Returns
-------
Env
Notes
-----
We raise EnvError if the GDAL config options
AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY are given. AWS
credentials are handled exclusively by boto3.
Examples
--------
>>> with Env(CPL_DEBUG=True, CPL_CURL_VERBOSE=True):
... with rasterio.open("https://example.com/a.tif") as src:
... print(src.profile)
For access to secured cloud resources, a Rasterio Session or a
foreign session object may be passed to the constructor.
>>> import boto3
>>> from rasterio.session import AWSSession
>>> boto3_session = boto3.Session(...)
>>> with Env(AWSSession(boto3_session)):
... with rasterio.open("s3://mybucket/a.tif") as src:
... print(src.profile)
"""
aws_access_key_id = options.pop('aws_access_key_id', None)
# Before 1.0, Rasterio only supported AWS. We will special
# case AWS in 1.0.x. TODO: warn deprecation in 1.1.
if aws_access_key_id:
warnings.warn(
"Passing abstract session keyword arguments is deprecated. "
"Pass a Rasterio AWSSession object instead.",
RasterioDeprecationWarning
)
aws_secret_access_key = options.pop('aws_secret_access_key', None)
aws_session_token = options.pop('aws_session_token', None)
region_name = options.pop('region_name', None)
if ('AWS_ACCESS_KEY_ID' in options or
'AWS_SECRET_ACCESS_KEY' in options):
raise EnvError(
"GDAL's AWS config options can not be directly set. "
"AWS credentials are handled exclusively by boto3.")
if session:
# Passing a session via keyword argument is the canonical
# way to configure access to secured cloud resources.
if not isinstance(session, Session):
warnings.warn(
"Passing a boto3 session is deprecated. Pass a Rasterio "
"AWSSession object instead.",
RasterioDeprecationWarning
)
session = Session.aws_or_dummy(session=session)
self.session = session
elif aws_access_key_id or profile_name or aws_unsigned:
self.session = Session.aws_or_dummy(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
profile_name=profile_name,
aws_unsigned=aws_unsigned)
elif 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ:
self.session = Session.from_environ()
else:
self.session = DummySession()
self.options = options.copy()
self.context_options = {}
@classmethod
def from_defaults(cls, *args, **kwargs):
"""Create an environment with default config options
Parameters
----------
args : optional
Positional arguments for Env()
kwargs : optional
Keyword arguments for Env()
Returns
-------
Env
Notes
-----
The items in kwargs will be overlaid on the default values.
"""
options = Env.default_options()
options.update(**kwargs)
return Env(*args, **options)
def credentialize(self):
"""Get credentials and configure GDAL
Note well: this method is a no-op if the GDAL environment
already has credentials, unless session is not None.
Returns
-------
None
"""
cred_opts = self.session.get_credential_options()
self.options.update(**cred_opts)
setenv(**cred_opts)
def drivers(self):
"""Return a mapping of registered drivers."""
return local._env.drivers()
def _dump_open_datasets(self):
"""Writes descriptions of open datasets to stderr
For debugging and testing purposes.
"""
return local._env._dump_open_datasets()
def __enter__(self):
log.debug("Entering env context: %r", self)
if local._env is None:
log.debug("Starting outermost env")
self._has_parent_env = False
# See note directly above where _discovered_options is globally
# defined. This MUST happen before calling 'defenv()'.
local._discovered_options = {}
# Don't want to reinstate the "RASTERIO_ENV" option.
probe_env = {k for k in self.options.keys() if k != "RASTERIO_ENV"}
for key in probe_env:
val = get_gdal_config(key, normalize=False)
if val is not None:
local._discovered_options[key] = val
defenv(**self.options)
self.context_options = {}
else:
self._has_parent_env = True
self.context_options = getenv()
setenv(**self.options)
self.credentialize()
log.debug("Entered env context: %r", self)
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
log.debug("Exiting env context: %r", self)
delenv()
if self._has_parent_env:
defenv()
setenv(**self.context_options)
else:
log.debug("Exiting outermost env")
# See note directly above where _discovered_options is globally
# defined.
while local._discovered_options:
key, val = local._discovered_options.popitem()
set_gdal_config(key, val, normalize=False)
local._discovered_options = None
log.debug("Exited env context: %r", self)
def defenv(**options):
"""Create a default environment if necessary."""
if local._env:
log.debug("GDAL environment exists: %r", local._env)
else:
log.debug("No GDAL environment exists")
local._env = GDALEnv()
local._env.update_config_options(**options)
log.debug(
"New GDAL environment %r created", local._env)
local._env.start()
def getenv():
"""Get a mapping of current options."""
if not local._env:
raise EnvError("No GDAL environment exists")
else:
log.debug("Got a copy of environment %r options", local._env)
return local._env.options.copy()
def hasenv():
return bool(local._env)
def setenv(**options):
"""Set options in the existing environment."""
if not local._env:
raise EnvError("No GDAL environment exists")
else:
local._env.update_config_options(**options)
def hascreds():
warnings.warn("Please use Env.session.hascreds() instead", RasterioDeprecationWarning)
return local._env is not None and all(key in local._env.get_config_options() for key in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'])
def delenv():
"""Delete options in the existing environment."""
if not local._env:
raise EnvError("No GDAL environment exists")
else:
local._env.clear_config_options()
log.debug("Cleared existing %r options", local._env)
local._env.stop()
local._env = None
class NullContextManager:
def __init__(self):
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
def env_ctx_if_needed():
"""Return an Env if one does not exist
Returns
-------
Env or a do-nothing context manager
"""
if local._env:
return NullContextManager()
else:
return Env.from_defaults()
def ensure_env(f):
"""A decorator that ensures an env exists before a function
calls any GDAL C functions."""
@wraps(f)
def wrapper(*args, **kwds):
if local._env:
return f(*args, **kwds)
else:
with Env.from_defaults():
return f(*args, **kwds)
return wrapper
def ensure_env_credentialled(f):
"""DEPRECATED alias for ensure_env_with_credentials"""
warnings.warn("Please use ensure_env_with_credentials instead", RasterioDeprecationWarning)
return ensure_env_with_credentials(f)
def ensure_env_with_credentials(f):
"""Ensures a config environment exists and is credentialized
Parameters
----------
f : function
A function.
Returns
-------
A function wrapper.
Notes
-----
The function wrapper checks the first argument of f and
credentializes the environment if the first argument is a URI with
scheme "s3".
"""
@wraps(f)
def wrapper(*args, **kwds):
if local._env:
env_ctor = Env
else:
env_ctor = Env.from_defaults
if isinstance(args[0], str):
session_cls = Session.cls_from_path(args[0])
if local._env and session_cls.hascreds(getenv()):
session_cls = DummySession
session = session_cls()
else:
session = DummySession()
with env_ctor(session=session):
return f(*args, **kwds)
return wrapper
@attr.s(slots=True)
@total_ordering
class GDALVersion:
"""Convenience class for obtaining GDAL major and minor version components
and comparing between versions. This is highly simplistic and assumes a
very normal numbering scheme for versions and ignores everything except
the major and minor components."""
major = attr.ib(default=0, validator=attr.validators.instance_of(int))
minor = attr.ib(default=0, validator=attr.validators.instance_of(int))
def __eq__(self, other):
return (self.major, self.minor) == tuple(other.major, other.minor)
def __lt__(self, other):
return (self.major, self.minor) < tuple(other.major, other.minor)
def __repr__(self):
return "GDALVersion(major={0}, minor={1})".format(self.major, self.minor)
def __str__(self):
return "{0}.{1}".format(self.major, self.minor)
@classmethod
def parse(cls, input):
"""
Parses input tuple or string to GDALVersion. If input is a GDALVersion
instance, it is returned.
Parameters
----------
input: tuple of (major, minor), string, or instance of GDALVersion
Returns
-------
GDALVersion instance
"""
if isinstance(input, cls):
return input
if isinstance(input, tuple):
return cls(*input)
elif isinstance(input, str):
# Extract major and minor version components.
# alpha, beta, rc suffixes ignored
match = re.search(r'^\d+\.\d+', input)
if not match:
raise ValueError(
"value does not appear to be a valid GDAL version "
"number: {}".format(input))
major, minor = (int(c) for c in match.group().split('.'))
return cls(major=major, minor=minor)
raise TypeError("GDALVersion can only be parsed from a string or tuple")
@classmethod
def runtime(cls):
"""Return GDALVersion of current GDAL runtime"""
from rasterio._base import gdal_version # to avoid circular import
return cls.parse(gdal_version())
def at_least(self, other):
other = self.__class__.parse(other)
return self >= other
def require_gdal_version(version, param=None, values=None, is_max_version=False,
reason=''):
"""A decorator that ensures the called function or parameters are supported
by the runtime version of GDAL. Raises GDALVersionError if conditions
are not met.
Examples:
\b
@require_gdal_version('2.2')
def some_func():
calling `some_func` with a runtime version of GDAL that is < 2.2 raises a
GDALVersionErorr.
\b
@require_gdal_version('2.2', param='foo')
def some_func(foo='bar'):
calling `some_func` with parameter `foo` of any value on GDAL < 2.2 raises
a GDALVersionError.
\b
@require_gdal_version('2.2', param='foo', values=('bar',))
def some_func(foo=None):
calling `some_func` with parameter `foo` and value `bar` on GDAL < 2.2
raises a GDALVersionError.
Parameters
------------
version: tuple, string, or GDALVersion
param: string (optional, default: None)
If `values` are absent, then all use of this parameter with a value
other than default value requires at least GDAL `version`.
values: tuple, list, or set (optional, default: None)
contains values that require at least GDAL `version`. `param`
is required for `values`.
is_max_version: bool (optional, default: False)
if `True` indicates that the version provided is the maximum version
allowed, instead of requiring at least that version.
reason: string (optional: default: '')
custom error message presented to user in addition to message about
GDAL version. Use this to provide an explanation of what changed
if necessary context to the user.
Returns
---------
wrapped function
"""
if values is not None:
if param is None:
raise ValueError(
'require_gdal_version: param must be provided with values')
if not isinstance(values, (tuple, list, set)):
raise ValueError(
'require_gdal_version: values must be a tuple, list, or set')
version = GDALVersion.parse(version)
runtime = GDALVersion.runtime()
inequality = '>=' if runtime < version else '<='
reason = '\n{0}'.format(reason) if reason else reason
def decorator(f):
@wraps(f)
def wrapper(*args, **kwds):
if ((runtime < version and not is_max_version) or
(is_max_version and runtime > version)):
if param is None:
raise GDALVersionError(
"GDAL version must be {0} {1}{2}".format(
inequality, str(version), reason))
# normalize args and kwds to dict
argspec = getargspec(f)
full_kwds = kwds.copy()
if argspec.args:
full_kwds.update(dict(zip(argspec.args[:len(args)], args)))
if argspec.defaults:
defaults = dict(zip(
reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = {}
if param in full_kwds:
if values is None:
if param not in defaults or (
full_kwds[param] != defaults[param]):
raise GDALVersionError(
'usage of parameter "{0}" requires '
'GDAL {1} {2}{3}'.format(param, inequality,
version, reason))
elif full_kwds[param] in values:
raise GDALVersionError(
'parameter "{0}={1}" requires '
'GDAL {2} {3}{4}'.format(
param, full_kwds[param], inequality, version, reason))
return f(*args, **kwds)
return wrapper
return decorator
# Patch the environment if needed, such as in the installed wheel case.
if 'GDAL_DATA' not in os.environ:
path = GDALDataFinder().search_wheel()
if path:
log.debug("GDAL data found in package: path=%r.", path)
set_gdal_config("GDAL_DATA", path)
# See https://github.com/mapbox/rasterio/issues/1631.
elif GDALDataFinder().find_file("header.dxf"):
log.debug("GDAL data files are available at built-in paths.")
else:
path = GDALDataFinder().search()
if path:
set_gdal_config("GDAL_DATA", path)
log.debug("GDAL data found in other locations: path=%r.", path)
if "PROJ_LIB" in os.environ:
path = os.environ["PROJ_LIB"]
set_proj_data_search_path(path)
elif PROJDataFinder().search_wheel():
path = PROJDataFinder().search_wheel()
log.debug("PROJ data found in package: path=%r.", path)
set_proj_data_search_path(path)
# See https://github.com/mapbox/rasterio/issues/1631.
elif PROJDataFinder().has_data():
log.debug("PROJ data files are available at built-in paths.")
else:
path = PROJDataFinder().search()
if path:
log.debug("PROJ data found in other locations: path=%r.", path)
set_proj_data_search_path(path)
|
[] |
[] |
[
"PROJ_LIB"
] |
[]
|
["PROJ_LIB"]
|
python
| 1 | 0 | |
setup.py
|
#!/usr/bin/env python3
import codecs
import os
import pathlib
from typing import Any, List, Dict
from setuptools import setup # type: ignore
from setuptools import find_packages
def is_travis_deploy() -> bool:
if os.getenv("DEPLOY_SDIST", "") or os.getenv("DEPLOY_WHEEL", ""):
return is_tagged_commit()
else:
return False
def is_tagged_commit() -> bool:
if "TRAVIS_TAG" in os.environ:
if os.environ["TRAVIS_TAG"]:
return True
return False
def strip_links_from_required(l_required: List[str]) -> List[str]:
"""
>>> required = ['lib_regexp @ git+https://github.com/bitranox/lib_regexp.git', 'test']
>>> assert strip_links_from_required(required) == ['lib_regexp', 'test']
"""
l_req_stripped: List[str] = list()
for req in l_required:
req_stripped = req.split("@")[0].strip()
l_req_stripped.append(req_stripped)
return l_req_stripped
# will be overwritten with long_description if exists !
long_description = "a more pythonic way to access the windows registry as winreg"
path_readme = pathlib.Path(__file__).parent / "README.rst"
if path_readme.exists():
# noinspection PyBroadException
try:
readme_content = codecs.open(str(path_readme), encoding="utf-8").read()
long_description = readme_content
except Exception:
pass
def get_requirements_from_file(requirements_filename: str) -> List[str]:
"""
>>> assert len(get_requirements_from_file('requirements.txt')) > 0
"""
l_requirements = list()
try:
with open(
str(pathlib.Path(__file__).parent / requirements_filename), mode="r"
) as requirements_file:
for line in requirements_file:
line_data = get_line_data(line)
if line_data:
l_requirements.append(line_data)
except FileNotFoundError:
pass
return l_requirements
def get_line_data(line: str) -> str:
line = line.strip()
if "#" in line:
line = line.split("#", 1)[0].strip()
return line
tests_require = get_requirements_from_file("requirements_test.txt")
install_requires = get_requirements_from_file("requirements.txt")
setup_requires = list(set(tests_require + install_requires))
# for deploy on pypi we must not rely on imports from github
if is_travis_deploy() and is_tagged_commit():
setup_requires = strip_links_from_required(setup_requires)
tests_require = strip_links_from_required(tests_require)
install_requires = strip_links_from_required(install_requires)
setup_kwargs: Dict[str, Any] = dict()
setup_kwargs["name"] = "lib_registry"
setup_kwargs["version"] = "v2.0.7"
setup_kwargs["url"] = "https://github.com/bitranox/lib_registry"
setup_kwargs["packages"] = find_packages()
setup_kwargs["package_data"] = {"lib_registry": ["py.typed", "*.pyi", "__init__.pyi"]}
setup_kwargs[
"description"
] = "a more pythonic way to access the windows registry as winreg"
setup_kwargs["long_description"] = long_description
setup_kwargs["long_description_content_type"] = "text/x-rst"
setup_kwargs["author"] = "Robert Nowotny"
setup_kwargs["author_email"] = "[email protected]"
setup_kwargs["classifiers"] = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
]
setup_kwargs["entry_points"] = {
"console_scripts": ["lib_registry = lib_registry.lib_registry_cli:cli_main"]
}
# minimally needs to run tests - no project requirements here
setup_kwargs["tests_require"] = tests_require
# specify what a project minimally needs to run correctly
setup_kwargs["install_requires"] = install_requires
# minimally needs to run the setup script, dependencies needs also to put here for "setup.py install test"
# dependencies must not be put here for pip install
setup_kwargs["setup_requires"] = setup_requires
setup_kwargs["python_requires"] = ">=3.6.0"
setup_kwargs["zip_safe"] = False
if __name__ == "__main__":
setup(**setup_kwargs)
|
[] |
[] |
[
"DEPLOY_WHEEL",
"DEPLOY_SDIST",
"TRAVIS_TAG"
] |
[]
|
["DEPLOY_WHEEL", "DEPLOY_SDIST", "TRAVIS_TAG"]
|
python
| 3 | 0 | |
Discord/discordParse.py
|
import discord
import twitch_check
from discord.ext import commands
import asyncio
import os
from dotenv import load_dotenv
load_dotenv()
# Command parser DO NOT TOUCH
async def parse_command(message):
parse = message.content.split(' ')
if(len(parse) > 1):
await commandList[parse[0]](message, parse[1])
else:
await commandList[parse[0]](message)
# --------- COMMANDS --------- #
# To add basic text commands follow this model:
# async def commandName(message):
# await message.channel.send("Bot Message")
async def hello(message):
await message.channel.send("Hello!")
async def is_streaming(message):
if twitch_check.is_streaming(os.getenv('CHANNEL')) == True:
await message.channel.send('YUP')
else:
await message.channel.send('NOPE')
# --------- COMMAND LIST --------- #
# After creating your command, you must add it to
# the command list with this format:
# "!commandName" : commandName
# DO NOT forget to add the , !
commandList = {
"!hello" : hello,
"!isStreaming" : is_streaming
}
|
[] |
[] |
[
"CHANNEL"
] |
[]
|
["CHANNEL"]
|
python
| 1 | 0 | |
service/auth/cognito.go
|
package auth
import (
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
cognitoidp "github.com/aws/aws-sdk-go/service/cognitoidentityprovider"
)
// Create and then return an AWS session
func awsSession() *session.Session {
awsProfile := os.Getenv("AWS_PROFILE")
awsRegion := os.Getenv("AWS_REGION")
session, _ := session.NewSession(&aws.Config{
Region: aws.String(awsRegion),
Credentials: credentials.NewSharedCredentials("", awsProfile),
})
return session
}
// Cognito client
var CognitoClient *cognitoidp.CognitoIdentityProvider = cognitoidp.New(awsSession())
// TODO: Remove this after refactoring `func ResendConfirmationCode(username string) (string, error)`
// Cognito client
var client *cognitoidp.CognitoIdentityProvider = cognitoidp.New(awsSession())
// Extracts `error code` and `error message` from the `awserr`
func CognitoErrorDetails(err error) (string, string) {
return err.(awserr.Error).Code(), err.(awserr.Error).Message()
}
|
[
"\"AWS_PROFILE\"",
"\"AWS_REGION\""
] |
[] |
[
"AWS_PROFILE",
"AWS_REGION"
] |
[]
|
["AWS_PROFILE", "AWS_REGION"]
|
go
| 2 | 0 | |
expand.go
|
package main
// NOTE: all of the below is copied, with a few changes, from
// https://golang.org/src/os/env.go (go1.13). In essence, I want
// to replicate os.ExpandEnv(), but only targeting variables of
// the form:
//
// ${VAR_NAME}, or ${varName}, etc.
//
// ... excepting also the case of:
//
// $${VAR_NAME} (which becomes ${VAR_NAME} in the output).
// Expand replaces ${var} in the string based on the mapping function.
// For example, Expand(s, os.Getenv) is (mostly) equivalent to
// os.ExpandEnv(s).
func Expand(s string, mapping func(string) string) string {
var buf []byte
// ${} is all ASCII, so bytes are fine for this operation.
i := 0
for j := 0; j < len(s); j++ {
if j+1 < len(s) && s[j:j+2] == "${" {
if buf == nil {
buf = make([]byte, 0, 2*len(s))
}
if j-1 >= 0 && s[j-1] == '$' {
buf = append(buf, s[i:j]...)
j++
i = j
continue
}
buf = append(buf, s[i:j]...)
name, w := getShellName(s[j+1:])
if name == "" && w > 0 {
// Encountered invalid syntax; eat the
// characters.
} else {
buf = append(buf, mapping(name)...)
}
j += w
i = j + 1
}
}
if buf == nil {
return s
}
return string(buf) + s[i:]
}
// isShellSpecialVar reports whether the character identifies a special
// shell variable such as $*.
func isShellSpecialVar(c uint8) bool {
switch c {
case '*', '#', '$', '@', '!', '?', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
return true
}
return false
}
// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore
func isAlphaNum(c uint8) bool {
return c == '_' || '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
}
// getShellName returns the name that begins the string and the number of bytes
// consumed to extract it. Since the name is enclosed in {}, it's part of a ${}
// expansion and two more bytes are needed than the length of the name. If the
// internal syntax is un-env-iable (get it?), then just "eat" the variable.
func getShellName(s string) (string, int) {
if len(s) > 2 && isShellSpecialVar(s[1]) && s[2] == '}' {
return s[1:2], 3
}
// Scan to closing brace
for i := 1; i < len(s); i++ {
if s[i] == '}' {
if i == 1 {
return "", 2 // Bad syntax; eat "${}"
}
return s[1:i], i + 1
}
}
return "", 1 // Bad syntax; eat "${"
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
app/django_first/django_first/asgi.py
|
"""
ASGI config for django_first project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_first.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
executor/celery_p2rank.py
|
#!/usr/bin/env python3
import os
import celery.signals
import run_p2rank_task
prankweb = celery.Celery("prankweb")
if "CELERY_BROKER_URL" in os.environ:
prankweb.conf.update({
"broker_url":
os.environ["CELERY_BROKER_URL"]
})
elif "CELERY_BROKER_PATH" in os.environ:
prankweb.conf.update({
"broker_url": "filesystem://",
"broker_transport_options": {
"data_folder_in":
os.environ["CELERY_BROKER_PATH"] + "/queue/",
"data_folder_out":
os.environ["CELERY_BROKER_PATH"] + "/queue/",
"data_folder_processed":
os.environ["CELERY_BROKER_PATH"] + "/processed/"
},
})
@celery.signals.setup_logging.connect
def setup_celery_logging(**kwargs):
# We do nothing here to disable logging.
...
# https://github.com/celery/celery/issues/2509
prankweb.log.setup()
@prankweb.task(name="prediction")
def celery_run_prediction(directory: str):
if os.path.isdir(directory):
run_p2rank_task.execute_directory_task(directory, keep_working=False)
else:
print(f"Given directory does not exist {directory}")
|
[] |
[] |
[
"CELERY_BROKER_PATH",
"CELERY_BROKER_URL"
] |
[]
|
["CELERY_BROKER_PATH", "CELERY_BROKER_URL"]
|
python
| 2 | 0 | |
fhir/resources/STU3/tests/test_contract.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Contract
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import contract
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class ContractTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("Contract", js["resourceType"])
return contract.Contract(js)
def testContract1(self):
inst = self.instantiate_from("pcd-example-notOrg.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract1(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract1(inst2)
def implContract1(self, inst):
self.assertEqual(
force_bytes(inst.friendly[0].contentAttachment.title),
force_bytes("The terms of the consent in friendly consumer speak."),
)
self.assertEqual(force_bytes(inst.id), force_bytes("pcd-example-notOrg"))
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.title),
force_bytes("The terms of the consent in lawyer speak."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].code), force_bytes("Opt-In")
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].display),
force_bytes("Default Authorization with exceptions."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].system),
force_bytes("http://www.infoway-inforoute.ca.org/Consent-subtype-codes"),
)
self.assertEqual(
force_bytes(inst.term[0].text),
force_bytes(
"Withhold this order and any results or related objects from any provider."
),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].code), force_bytes("withhold-from")
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].display),
force_bytes("Withhold all data from specified actor entity."),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].system),
force_bytes("http://example.org/fhir/consent-term-type-codes"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("57016-8"))
self.assertEqual(
force_bytes(inst.type.coding[0].system), force_bytes("http://loinc.org")
)
def testContract2(self):
inst = self.instantiate_from("contract-example-42cfr-part2.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract2(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract2(inst2)
def implContract2(self, inst):
self.assertEqual(
force_bytes(inst.agent[0].role[0].coding[0].code), force_bytes("IR")
)
self.assertEqual(
force_bytes(inst.agent[0].role[0].coding[0].display),
force_bytes("Recipient"),
)
self.assertEqual(
force_bytes(inst.agent[0].role[0].coding[0].system),
force_bytes("http://org.mdhhs.fhir.consent-actor-type"),
)
self.assertEqual(
force_bytes(inst.agent[0].role[0].text),
force_bytes("Recipient of restricted health information"),
)
self.assertEqual(
force_bytes(inst.agent[1].role[0].coding[0].code), force_bytes("IS")
)
self.assertEqual(
force_bytes(inst.agent[1].role[0].coding[0].display), force_bytes("Sender")
)
self.assertEqual(
force_bytes(inst.agent[1].role[0].coding[0].system),
force_bytes("http://org.mdhhs.fhir.consent-actor-type"),
)
self.assertEqual(
force_bytes(inst.agent[1].role[0].text),
force_bytes("Sender of restricted health information"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("C-2121"))
self.assertEqual(inst.issued.date, FHIRDate("2031-11-01T21:18:27-04:00").date)
self.assertEqual(inst.issued.as_json(), "2031-11-01T21:18:27-04:00")
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.contentType),
force_bytes("application/pdf"),
)
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.language), force_bytes("en-US")
)
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.title),
force_bytes("MDHHS-5515 Consent To Share Your Health Information"),
)
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.url),
force_bytes("http://org.mihin.ecms/ConsentDirective-2121"),
)
self.assertEqual(
inst.meta.lastUpdated.date, FHIRDate("2016-07-19T18:18:42.108-04:00").date
)
self.assertEqual(
inst.meta.lastUpdated.as_json(), "2016-07-19T18:18:42.108-04:00"
)
self.assertEqual(force_bytes(inst.meta.versionId), force_bytes("1"))
self.assertEqual(force_bytes(inst.securityLabel[0].code), force_bytes("R"))
self.assertEqual(
force_bytes(inst.securityLabel[0].display), force_bytes("Restricted")
)
self.assertEqual(
force_bytes(inst.securityLabel[0].system),
force_bytes("http://hl7.org/fhir/v3/Confidentiality"),
)
self.assertEqual(force_bytes(inst.securityLabel[1].code), force_bytes("ETH"))
self.assertEqual(
force_bytes(inst.securityLabel[1].display),
force_bytes("substance abuse information sensitivity"),
)
self.assertEqual(
force_bytes(inst.securityLabel[1].system),
force_bytes("http://hl7.org/fhir/v3/ActCode"),
)
self.assertEqual(
force_bytes(inst.securityLabel[2].code), force_bytes("42CFRPart2")
)
self.assertEqual(
force_bytes(inst.securityLabel[2].system),
force_bytes("http://hl7.org/fhir/v3/ActCode"),
)
self.assertEqual(force_bytes(inst.securityLabel[3].code), force_bytes("TREAT"))
self.assertEqual(
force_bytes(inst.securityLabel[3].display), force_bytes("treatment")
)
self.assertEqual(
force_bytes(inst.securityLabel[3].system),
force_bytes("http://hl7.org/fhir/v3/ActReason"),
)
self.assertEqual(force_bytes(inst.securityLabel[4].code), force_bytes("HPAYMT"))
self.assertEqual(
force_bytes(inst.securityLabel[4].display),
force_bytes("healthcare payment"),
)
self.assertEqual(
force_bytes(inst.securityLabel[4].system),
force_bytes("http://hl7.org/fhir/v3/ActReason"),
)
self.assertEqual(
force_bytes(inst.securityLabel[5].code), force_bytes("HOPERAT")
)
self.assertEqual(
force_bytes(inst.securityLabel[5].display),
force_bytes("healthcare operations"),
)
self.assertEqual(
force_bytes(inst.securityLabel[5].system),
force_bytes("http://hl7.org/fhir/v3/ActReason"),
)
self.assertEqual(
force_bytes(inst.securityLabel[6].code), force_bytes("PERSISTLABEL")
)
self.assertEqual(
force_bytes(inst.securityLabel[6].display),
force_bytes("persist security label"),
)
self.assertEqual(
force_bytes(inst.securityLabel[6].system),
force_bytes("http://hl7.org/fhir/v3/ActCode"),
)
self.assertEqual(
force_bytes(inst.securityLabel[7].code), force_bytes("PRIVMARK")
)
self.assertEqual(
force_bytes(inst.securityLabel[7].display), force_bytes("privacy mark")
)
self.assertEqual(
force_bytes(inst.securityLabel[7].system),
force_bytes("http://hl7.org/fhir/v3/ActCode"),
)
self.assertEqual(
force_bytes(inst.securityLabel[8].code), force_bytes("NORDSCLCD")
)
self.assertEqual(
force_bytes(inst.securityLabel[8].display),
force_bytes("no redisclosure without consent directive"),
)
self.assertEqual(
force_bytes(inst.securityLabel[8].system),
force_bytes("http://hl7.org/fhir/v3/ActCode"),
)
self.assertEqual(
force_bytes(inst.signer[0].signature[0].type[0].code),
force_bytes("1.2.840.10065.1.12.1.1"),
)
self.assertEqual(
force_bytes(inst.signer[0].signature[0].type[0].system),
force_bytes("urn:iso-astm:E1762-95:2013"),
)
self.assertEqual(
inst.signer[0].signature[0].when.date,
FHIRDate("2017-02-08T10:57:34+01:00").date,
)
self.assertEqual(
inst.signer[0].signature[0].when.as_json(), "2017-02-08T10:57:34+01:00"
)
self.assertEqual(force_bytes(inst.signer[0].type.code), force_bytes("SELF"))
self.assertEqual(
force_bytes(inst.signer[0].type.system),
force_bytes("http://org.mdhhs.fhir.consent-signer-type"),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].code), force_bytes("MDHHS-5515")
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].display),
force_bytes(
"Michigan MDHHS-5515 Consent to Share Behavioral Health Information for Care Coordination Purposes"
),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].system),
force_bytes("http://hl7.org/fhir/consentcategorycodes"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("OPTIN"))
self.assertEqual(
force_bytes(inst.type.coding[0].system),
force_bytes("http://org.mdhhs.fhir.consentdirective-type"),
)
self.assertEqual(
force_bytes(inst.type.text), force_bytes("Opt-in consent directive")
)
def testContract3(self):
inst = self.instantiate_from("pcd-example-notLabs.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract3(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract3(inst2)
def implContract3(self, inst):
self.assertEqual(
force_bytes(inst.friendly[0].contentAttachment.title),
force_bytes("The terms of the consent in friendly consumer speak."),
)
self.assertEqual(force_bytes(inst.id), force_bytes("pcd-example-notLabs"))
self.assertEqual(inst.issued.date, FHIRDate("2014-08-17").date)
self.assertEqual(inst.issued.as_json(), "2014-08-17")
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.title),
force_bytes("The terms of the consent in lawyer speak."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].code), force_bytes("Opt-In")
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].display),
force_bytes("Default Authorization with exceptions."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].system),
force_bytes("http://www.infoway-inforoute.ca.org/Consent-subtype-codes"),
)
self.assertEqual(
force_bytes(inst.term[0].subType.coding[0].code),
force_bytes("ProcedureRequest"),
)
self.assertEqual(
force_bytes(inst.term[0].subType.coding[0].system),
force_bytes("http://hl7.org/fhir/resource-types"),
)
self.assertEqual(
force_bytes(inst.term[0].text),
force_bytes("Withhold orders from any provider."),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].code),
force_bytes("withhold-object-type"),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].system),
force_bytes("http://example.org/fhir/consent-term-type-codes"),
)
self.assertEqual(
force_bytes(inst.term[1].subType.coding[0].code),
force_bytes("DiagnosticReport"),
)
self.assertEqual(
force_bytes(inst.term[1].subType.coding[0].system),
force_bytes("http://hl7.org/fhir/resource-types"),
)
self.assertEqual(
force_bytes(inst.term[1].text),
force_bytes("Withhold order results from any provider."),
)
self.assertEqual(
force_bytes(inst.term[1].type.coding[0].code),
force_bytes("withhold-object-type"),
)
self.assertEqual(
force_bytes(inst.term[1].type.coding[0].system),
force_bytes("http://example.org/fhir/consent-term-type-codes"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("57016-8"))
self.assertEqual(
force_bytes(inst.type.coding[0].system), force_bytes("http://loinc.org")
)
def testContract4(self):
inst = self.instantiate_from("pcd-example-notThem.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract4(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract4(inst2)
def implContract4(self, inst):
self.assertEqual(
force_bytes(inst.friendly[0].contentAttachment.title),
force_bytes("The terms of the consent in friendly consumer speak."),
)
self.assertEqual(force_bytes(inst.id), force_bytes("pcd-example-notThem"))
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.title),
force_bytes("The terms of the consent in lawyer speak."),
)
self.assertEqual(
force_bytes(inst.signer[0].signature[0].type[0].code),
force_bytes("1.2.840.10065.1.12.1.1"),
)
self.assertEqual(
force_bytes(inst.signer[0].signature[0].type[0].system),
force_bytes("urn:iso-astm:E1762-95:2013"),
)
self.assertEqual(
inst.signer[0].signature[0].when.date,
FHIRDate("2013-06-08T10:57:34-07:00").date,
)
self.assertEqual(
inst.signer[0].signature[0].when.as_json(), "2013-06-08T10:57:34-07:00"
)
self.assertEqual(force_bytes(inst.signer[0].type.code), force_bytes("COVPTY"))
self.assertEqual(
force_bytes(inst.signer[0].type.system),
force_bytes("http://www.hl7.org/fhir/contractsignertypecodes"),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].code), force_bytes("Opt-In")
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].display),
force_bytes("Default Authorization with exceptions."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].system),
force_bytes("http://www.infoway-inforoute.ca.org/Consent-subtype-codes"),
)
self.assertEqual(
force_bytes(inst.term[0].text),
force_bytes(
"Withhold this order and any results or related objects from specified nurse provider."
),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].code), force_bytes("withhold-from")
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].display),
force_bytes("Withhold all data from specified actor entity."),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].system),
force_bytes("http://example.org/fhir/consent-term-type-codes"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("57016-8"))
self.assertEqual(
force_bytes(inst.type.coding[0].system), force_bytes("http://loinc.org")
)
def testContract5(self):
inst = self.instantiate_from("pcd-example-notAuthor.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract5(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract5(inst2)
def implContract5(self, inst):
self.assertEqual(
force_bytes(inst.friendly[0].contentAttachment.title),
force_bytes("The terms of the consent in friendly consumer speak."),
)
self.assertEqual(force_bytes(inst.id), force_bytes("pcd-example-notAuthor"))
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.title),
force_bytes("The terms of the consent in lawyer speak."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].code), force_bytes("Opt-In")
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].display),
force_bytes("Default Authorization with exceptions."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].system),
force_bytes("http://www.infoway-inforoute.ca.org/Consent-subtype-codes"),
)
self.assertEqual(
force_bytes(inst.term[0].text),
force_bytes("Withhold all data authored by Good Health provider."),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].code),
force_bytes("withhold-authored-by"),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].display),
force_bytes("Withhold all data authored by specified actor entity."),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].system),
force_bytes("http://example.org/fhir/consent-term-type-codes"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("57016-8"))
self.assertEqual(
force_bytes(inst.type.coding[0].system), force_bytes("http://loinc.org")
)
def testContract6(self):
inst = self.instantiate_from("contract-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract6(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract6(inst2)
def implContract6(self, inst):
self.assertEqual(force_bytes(inst.id), force_bytes("C-123"))
self.assertEqual(
force_bytes(inst.identifier.system),
force_bytes("http://happyvalley.com/contract"),
)
self.assertEqual(force_bytes(inst.identifier.value), force_bytes("12347"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable rendering of the contract</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testContract7(self):
inst = self.instantiate_from("pcd-example-notThis.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract7(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract7(inst2)
def implContract7(self, inst):
self.assertEqual(
force_bytes(inst.friendly[0].contentAttachment.title),
force_bytes("The terms of the consent in friendly consumer speak."),
)
self.assertEqual(force_bytes(inst.id), force_bytes("pcd-example-notThis"))
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(
force_bytes(inst.legal[0].contentAttachment.title),
force_bytes("The terms of the consent in lawyer speak."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].code), force_bytes("Opt-In")
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].display),
force_bytes("Default Authorization with exceptions."),
)
self.assertEqual(
force_bytes(inst.subType[0].coding[0].system),
force_bytes("http://www.infoway-inforoute.ca.org/Consent-subtype-codes"),
)
self.assertEqual(
force_bytes(inst.term[0].text),
force_bytes(
"Withhold this order and any results or related objects from any provider."
),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].code),
force_bytes("withhold-identified-object-and-related"),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].display),
force_bytes(
"Withhold the identified object and any other resources that are related to this object."
),
)
self.assertEqual(
force_bytes(inst.term[0].type.coding[0].system),
force_bytes("http://example.org/fhir/consent-term-type-codes"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("57016-8"))
self.assertEqual(
force_bytes(inst.type.coding[0].system), force_bytes("http://loinc.org")
)
|
[] |
[] |
[
"FHIR_UNITTEST_DATADIR"
] |
[]
|
["FHIR_UNITTEST_DATADIR"]
|
python
| 1 | 0 | |
tasks.py
|
import os
import time
from redis import Redis
redis_conn = Redis(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'])
from rq.decorators import job
@job('default', connection=redis_conn)
def generate_report(input_param):
time.sleep(5)
return {'input_param': input_param}
@job('default', connection=redis_conn)
def download_data(input_param):
time.sleep(5)
return {'download_data': input_param}
|
[] |
[] |
[
"REDIS_PORT",
"REDIS_HOST"
] |
[]
|
["REDIS_PORT", "REDIS_HOST"]
|
python
| 2 | 0 | |
TrackingService/src/main/java/service/TrackingServiceAllInOneTest.java
|
package service;
import com.github.jasync.sql.db.Configuration;
import com.github.jasync.sql.db.Connection;
import com.github.jasync.sql.db.ConnectionPoolConfigurationBuilder;
import com.github.jasync.sql.db.QueryResult;
import com.github.jasync.sql.db.general.ArrayRowData;
import com.github.jasync.sql.db.pool.ConnectionPool;
import com.github.jasync.sql.db.pool.PoolConfiguration;
import com.github.jasync.sql.db.postgresql.PostgreSQLConnection;
import com.github.jasync.sql.db.postgresql.PostgreSQLConnectionBuilder;
import com.github.jasync.sql.db.postgresql.pool.PostgreSQLConnectionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import util.Settings;
import java.util.Arrays;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import io.javalin.Javalin;
public class TrackingServiceAllInOneTest {
private static Logger logger = LoggerFactory.getLogger(TrackingServiceAllInOneTest.class);
public static void main(String[] args) throws ExecutionException, InterruptedException {
logger.error("starting");
logger.warn("starting warn");
logger.info("starting info");
logger.debug("starting debug");
logger.trace("starting trace");
Settings settings = Settings.getSettings(System.getenv("TRACKING_SERVICE_SETTINGS_LOCATION"));
// Configuration configuration =
// new Configuration(
// "username",
// "host.com",
// 5324,
// "password",
// "schema"
// );
// PoolConfiguration poolConfiguration = new PoolConfiguration(
// 100, // maxObjects
// TimeUnit.MINUTES.toMillis(15), // maxIdle
// 10_000, // maxQueueSize
// TimeUnit.SECONDS.toMillis(30) // validationInterval
// );
ConnectionPoolConfigurationBuilder config = new ConnectionPoolConfigurationBuilder();
config.setUsername(settings.getUsername());
config.setHost(settings.getUrl());
config.setPort(Integer.parseInt(settings.getPort()));
config.setPassword(settings.getPassword());
config.setDatabase(settings.getDatabase());
config.setMaxActiveConnections(100);
config.setMaxIdleTime(TimeUnit.MINUTES.toMillis(15));
config.setMaxPendingQueries(10000);
config.setConnectionValidationInterval(TimeUnit.SECONDS.toMillis(30));
// ConnectionPool<PostgreSQLConnection> connection = new ConnectionPool<>(
// new PostgreSQLConnectionFactory(configuration), poolConfiguration);
// ConnectionPool<PostgreSQLConnection> connection = PostgreSQLConnectionBuilder.createConnectionPool(config);
// connection.connect().get();
// CompletableFuture<QueryResult> future = connection.sendPreparedStatement("select * from table limit 2");
// QueryResult queryResult = future.get();
// System.out.println(Arrays.toString(((ArrayRowData) (queryResult.getRows().get(0))).getColumns()));
// System.out.println(Arrays.toString(((ArrayRowData) (queryResult.getRows().get(1))).getColumns()));
// for PostgreSQL use PostgreSQLConnectionBuilder instead of MySQLConnectionBuilder
Connection connection = PostgreSQLConnectionBuilder.createConnectionPool(config);
Javalin app = Javalin.create()
.events(event -> {
event.serverStarting(() -> {
connection.connect().get();
logger.info("Database connection established");
});
event.serverStopping(() -> {
logger.info("Javalin stopping...");
connection.disconnect().get();
logger.info("Database connection closed");
});
})
.start(7000);
app.get("/:accountId", (ctx) -> {
String accountId = ctx.pathParam("accountId");
final CompletableFuture<QueryResult> queryResultCompletableFuture = connection.sendPreparedStatement("SELECT accountName, isActive FROM Account WHERE accountId="+accountId);
ctx.result(
queryResultCompletableFuture
// .thenApply((t) -> "got result: " + t.getRows().get(0).get(0))
.thenApply((t) -> "got result: " + Arrays.toString(((ArrayRowData) (t.getRows().get(0))).getColumns()))
);
});
}
}
|
[
"\"TRACKING_SERVICE_SETTINGS_LOCATION\""
] |
[] |
[
"TRACKING_SERVICE_SETTINGS_LOCATION"
] |
[]
|
["TRACKING_SERVICE_SETTINGS_LOCATION"]
|
java
| 1 | 0 | |
main.go
|
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"sync/atomic"
"syscall"
"time"
)
// ***** Structs For Web Service *******
// This is a very simple checkip and geoip information service
type key int
// ***** END STRUCTS *********
const (
requestIDKey key = 0
)
var (
config Config
healthy int32
body []byte
)
func main() {
config = LoadConfiguration(os.Getenv("LINKFN_CONFIG"))
listenAddr := config.Host + ":" + config.Port
logger := log.New(os.Stdout, "http: ", log.LstdFlags)
nextRequestID := func() string {
return fmt.Sprintf("%d", time.Now().UnixNano())
}
server := &http.Server{
Addr: listenAddr,
Handler: tracing(nextRequestID)(logging(logger)(routes())),
ErrorLog: logger,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 15 * time.Second,
}
// Listen for CTRL+C or kill and start shutting down the app without
// disconnecting people by not taking any new requests. ("Graceful Shutdown")
done := make(chan bool)
quit := make(chan os.Signal, 1)
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
go func() {
<-quit
logger.Println("Server is shutting down...")
atomic.StoreInt32(&healthy, 0)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
server.SetKeepAlivesEnabled(false)
if err := server.Shutdown(ctx); err != nil {
logger.Fatalf("Could not gracefully shutdown the server: %v\n", err)
}
close(done)
}()
logger.Println("Server is ready to handle requests at", listenAddr)
atomic.StoreInt32(&healthy, 1)
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
logger.Fatalf("Could not listen on %s: %v\n", listenAddr, err)
}
<-done
logger.Println("Server stopped")
}
// routes -
// Setup all your routes simple mux router
// Put new handler routes here
func routes() *http.ServeMux {
router := http.NewServeMux()
router.HandleFunc("/", checkHandler)
router.HandleFunc("/health", healthHandler)
router.HandleFunc("/ping", pingHandler)
return router
}
// ****** HANDLERS HERE ********
// checkHandler -
// executes the url checking and parsing the json payload
func checkHandler(w http.ResponseWriter, r *http.Request) {
var lk Link
if r.Body == nil {
http.Error(w, "Please send a request body", 400)
return
}
temp, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(temp, &lk)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
data := runChecker(lk.EndPoint)
output, err := json.Marshal(data)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
if data.StatusCode == 404 {
fmt.Println("Got a 404, this is where I'd send a email!")
// triggerMessage("Hello from LinkFN!\n"+"Link Checked: "+data.Link+"\n"+string(output), data.Link)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Write(output)
}
// pingHandler -
// Simple health check.
func pingHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "{\"status\":\"pong!\"}")
}
// forceTextHandler -
// Prevent Content-Type sniffing
func forceTextHandler(w http.ResponseWriter, r *http.Request) {
// https://stackoverflow.com/questions/18337630/what-is-x-content-type-options-nosniff
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "{\"status\":\"ok\"}")
}
// healthHandler -
// Report server status
func healthHandler(w http.ResponseWriter, r *http.Request) {
if atomic.LoadInt32(&healthy) == 0 {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintln(w, "{\"status\":\"bad\"}")
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "{\"status\":\"ok\"}")
}
// ****** END HANDLERS HERE *******
// ****** START FUNC's ******
// logging just a simple logging handler
// this generates a basic access log entry
func logging(logger *log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
requestID, ok := r.Context().Value(requestIDKey).(string)
if !ok {
requestID = "unknown"
}
logger.Println(requestID, r.Method, r.URL.Path, r.RemoteAddr, r.UserAgent())
}()
next.ServeHTTP(w, r)
})
}
}
// tracing for debuging a access log entry to a given request
func tracing(nextRequestID func() string) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestID := r.Header.Get("X-Request-Id")
if requestID == "" {
requestID = nextRequestID()
}
ctx := context.WithValue(r.Context(), requestIDKey, requestID)
w.Header().Set("X-Request-Id", requestID)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
}
|
[
"\"LINKFN_CONFIG\""
] |
[] |
[
"LINKFN_CONFIG"
] |
[]
|
["LINKFN_CONFIG"]
|
go
| 1 | 0 | |
python/ray/worker.py
|
from contextlib import contextmanager
import colorama
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
from six.moves import queue
import sys
import threading
import time
import traceback
# Ray modules
import ray.cloudpickle as pickle
import ray.gcs_utils
import ray.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.services as services
import ray
import setproctitle
import ray.signature
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
from ray import import_thread
from ray import profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray.function_manager import FunctionActorManager
from ray.ray_logging import setup_logger
from ray.utils import _random_string, check_oversized_pickle
from ray.util.inspect import is_cython
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
connected (bool): True if Ray has been started and False otherwise.
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actor_init_error = None
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
@property
def connected(self):
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self.node.load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
def mark_actor_init_failed(self, error):
"""Called to mark this actor as failed during initialization."""
self.actor_init_error = error
def reraise_actor_init_error(self):
"""Raises any previous actor initialization error."""
if self.actor_init_error is not None:
raise self.actor_init_error
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def put_object(self, value, object_ref=None, pin_object=True):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
pin_object: If set, the object will be pinned at the raylet.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value, object_ref=object_ref,
pin_object=pin_object))
def deserialize_objects(self, data_metadata_pairs, object_refs):
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs, object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
return self.deserialize_objects(data_metadata_pairs, object_refs)
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = []
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
if resource == "GPU" or resource.startswith("GPU_group_"):
for resource_id, _ in assignment:
assigned_ids.append(resource_id)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError("ray.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_code_search_path=None,
_temp_dir=None,
_load_code_from_local=False,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_load_code_from_local: Whether code should be loaded from a local
module or from the GCS.
_java_worker_options: Overwrite the options to start Java workers.
_code_search_path (list): Java classpath or python import path.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
"please call ray.init() or ray.init(address=\"auto\") on the "
"driver.")
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
java_worker_options=_java_worker_options,
code_search_path=_code_search_path,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _lru_evict:
raise ValueError("When connecting to an existing cluster, "
"_lru_evict must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
def shutdown(_exiting_interpreter=False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE:
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = ray.gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
# The last time we raised a TaskError in this process. We use this value to
# suppress redundant error messages pushed from the workers.
last_task_error_raise_time = 0
# The max amount of seconds to wait before printing out an uncaught error.
UNCAUGHT_ERROR_GRACE_PERIOD = 5
def print_logs(redis_client, threads_stopped, job_id):
"""Prints log messages from workers on all of the nodes.
Args:
redis_client: A client to the primary Redis shard.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
job_id (JobID): The id of the driver's job
"""
pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have been
# received with no break in between. If this number grows continually,
# then the worker is probably not able to process the log messages as
# rapidly as they are coming in.
num_consecutive_messages_received = 0
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding logs "
"to the driver, use 'ray.init(log_to_driver=False)'.")
data = json.loads(ray.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if data["job"] and ray.utils.binary_to_hex(
job_id.binary()) != data["job"]:
continue
print_file = sys.stderr if data["is_err"] else sys.stdout
def color_for(data):
if data["pid"] == "raylet":
return colorama.Fore.YELLOW
else:
return colorama.Fore.CYAN
if data["ip"] == localhost:
for line in data["lines"]:
print(
"{}{}(pid={}){} {}".format(
colorama.Style.DIM, color_for(data), data["pid"],
colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in data["lines"]:
print(
"{}{}(pid={}, ip={}){} {}".format(
colorama.Style.DIM, color_for(data), data["pid"],
data["ip"], colorama.Style.RESET_ALL, line),
file=print_file)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
def print_error_messages_raylet(task_error_queue, threads_stopped):
"""Prints message received in the given output queue.
This checks periodically if any un-raised errors occurred in the
background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
try:
error, t = task_error_queue.get(block=False)
except queue.Empty:
threads_stopped.wait(timeout=0.01)
continue
# Delay errors a little bit of time to attempt to suppress redundant
# messages originating from the worker.
while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time():
threads_stopped.wait(timeout=1)
if threads_stopped.is_set():
break
if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD:
logger.debug(f"Suppressing error from worker: {error}")
else:
logger.error(f"Possible unhandled error from worker: {error}")
def listen_error_messages_raylet(worker, task_error_queue, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
task_error_queue (queue.Queue): A queue used to communicate with the
thread that prints the errors found by this thread.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = ray.gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
# Get the errors that occurred before the call to subscribe.
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = ray.gcs_utils.ErrorTableData.FromString(
pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# Delay it a bit to see if we can suppress it
task_error_queue.put((error_message, time.time()))
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
def is_initialized():
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
job_config=None):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Limit the amount of memory the driver can
use in the object store when creating objects.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
# TODO(qwang): Rename this to `worker_id_str` or type to `WorkerID`
worker.worker_id = _random_string()
else:
# This is the code path of driver mode.
if job_id is None:
# TODO(qwang): use `GcsClient::GenerateJobId()` here.
job_id = JobID.from_int(
int(worker.redis_client.incr("JobCounter")))
# When tasks are executed on remote workers in the context of multiple
# drivers, the current job ID is used to keep track of which job is
# responsible for the task so that error messages will be propagated to
# the correct driver.
worker.worker_id = ray.utils.compute_driver_id_from_job(
job_id).binary()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
if mode == SCRIPT_MODE:
import __main__ as main
driver_name = (main.__file__
if hasattr(main, "__file__") else "INTERACTIVE MODE")
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port)
# Create an object for interfacing with the global state.
# Note, global state should be intialized after `CoreWorker`, because it
# will use glog, which is intialized in `CoreWorker`.
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
if driver_object_store_memory is not None:
worker.core_worker.set_object_store_client_options(
f"ray_driver_{os.getpid()}", driver_object_store_memory)
# Start the import thread
worker.import_thread = import_thread.ImportThread(worker, mode,
worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
q = queue.Queue()
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, q, worker.threads_stopped))
worker.printer_thread = threading.Thread(
target=print_error_messages_raylet,
name="ray_print_error_messages",
args=(q, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
worker.printer_thread.daemon = True
worker.printer_thread.start()
if log_to_driver:
worker.logger_thread = threading.Thread(
target=print_logs,
name="ray_print_logs",
args=(worker.redis_client, worker.threads_stopped, job_id))
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "printer_thread"):
worker.printer_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
def show_in_dashboard(message, key="", dtype="text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
def get(object_refs, *, timeout=None):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
global last_task_error_raise_time
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values = worker.get_objects(object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
last_task_error_raise_time = time.time()
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
return values
def put(value):
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(value, pin_object=True)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
def wait(object_refs, *, num_returns=1, timeout=None):
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
)
return ready_ids, remaining_ids
def get_actor(name):
"""Get a handle to a detached actor.
Gets a handle to a detached actor with the given name. The actor must
have been created with Actor.options(name="name").remote().
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
worker = global_worker
worker.check_connected()
handle = worker.core_worker.get_named_actor_handle(name)
return handle
def kill(actor, *, no_restart=True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. Any atexit handlers installed in the actor will still be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
def cancel(object_ref, *, force=False, recursive=True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing the task will immediately exit. If
the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
worker=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
accelerator_type, max_restarts,
max_task_retries)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
override_environment_variables (Dict[str, str]): This specifies
environment variables to override for the actor or task. The
overrides are propagated to all child actors and tasks. This
is a dictionary mapping variable names to their values. Existing
variables can be overridden, new ones can be created, and an
existing variable can be unset by setting it to an empty string.
streamlit_script_path (str): Only for *actors*. The path to a
Streamlit script to run for this actor.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_returns', 'num_cpus', 'num_gpus', "
"'memory', 'object_store_memory', 'resources', "
"'max_calls', or 'max_restarts', like "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_returns", "num_cpus", "num_gpus", "memory",
"object_store_memory", "resources", "accelerator_type",
"max_calls", "max_restarts", "max_task_retries", "max_retries",
"streamlit_script_path"
], error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
worker=worker)
|
[] |
[] |
[
"RAY_ADDRESS"
] |
[]
|
["RAY_ADDRESS"]
|
python
| 1 | 0 | |
web_project/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project11.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
broker/pubsub/default.go
|
package broker
import (
"context"
"strings"
"cloud.google.com/go/pubsub"
"github.com/cockroachdb/errors"
"github.com/rs/zerolog/log"
"golang.org/x/sync/errgroup"
)
const (
DefaultName = "mkit.broker.default"
)
type pubsubBroker struct {
client *pubsub.Client
options Options
subs []*pubsubSubscriber
pubs []*pubsubPublisher
}
type pubsubPublisher struct {
options PublishOptions
topic *pubsub.Topic
}
func (p *pubsubPublisher) Topic() string {
return p.topic.String()
}
// Stop should be called once
func (p *pubsubPublisher) stop() {
log.Info().Str("component", "pubsub").Msgf("Stopping Publisher: %s", p.Topic())
// It blocks until all items have been flushed.
p.topic.Stop()
log.Info().Str("component", "pubsub").Msgf("Stopped Publisher Gracefully: %s", p.Topic())
}
func (p *pubsubPublisher) Publish(ctx context.Context, msg *pubsub.Message) (err error) {
pr := p.topic.Publish(ctx, msg)
if !p.options.Async {
if _, err = pr.Get(ctx); err != nil {
log.Error().Err(err).Msgf("Unable to publish to topic: %s", p.topic.String())
}
}
return
}
type pubsubSubscriber struct {
options SubscribeOptions
sub *pubsub.Subscription
hdlr Handler
done chan struct{}
}
func (s *pubsubSubscriber) start(ctx context.Context) (err error) {
defer close(s.done)
log.Info().Str("component", "pubsub").Msgf("Subscribing to: %s", s.sub)
// If ctx is done, Receive returns nil after all of the outstanding calls to `s.hdlr` have returned
// and all messages have been acknowledged or have expired.
if err = s.sub.Receive(ctx, s.hdlr); err == nil {
log.Info().Str("component", "pubsub").Msgf("Stopped Subscriber Gracefully: %s", s.sub)
}
return
}
func (b *pubsubBroker) NewPublisher(topic string, opts ...PublishOption) (Publisher, error) {
t := b.client.Topic(topic)
options := PublishOptions{
Async: false,
}
for _, o := range opts {
o(&options)
}
if exists, err := t.Exists(context.Background()); err != nil {
return nil, err
} else if !exists {
err = errors.Errorf("Doesn't exist Topic: %s", t)
return nil, err
}
if options.PublishSettings.DelayThreshold != 0 {
t.PublishSettings.DelayThreshold = options.PublishSettings.DelayThreshold
}
if options.PublishSettings.CountThreshold != 0 {
t.PublishSettings.CountThreshold = options.PublishSettings.CountThreshold
}
if options.PublishSettings.ByteThreshold != 0 {
t.PublishSettings.ByteThreshold = options.PublishSettings.ByteThreshold
}
if options.PublishSettings.NumGoroutines != 0 {
t.PublishSettings.NumGoroutines = options.PublishSettings.NumGoroutines
}
if options.PublishSettings.Timeout != 0 {
t.PublishSettings.Timeout = options.PublishSettings.Timeout
}
if options.PublishSettings.BufferedByteLimit != 0 {
t.PublishSettings.BufferedByteLimit = options.PublishSettings.BufferedByteLimit
}
pub := &pubsubPublisher{
topic: t,
}
// keep track of pubs
b.pubs = append(b.pubs, pub)
return pub, nil
}
// AddSubscriber registers a subscription to the given topic against the google pubsub api
func (b *pubsubBroker) AddSubscriber(subscription string, hdlr Handler, opts ...SubscribeOption) error {
options := SubscribeOptions{}
for _, o := range opts {
o(&options)
}
sub := b.client.Subscription(subscription)
if exists, err := sub.Exists(context.Background()); err != nil {
return err
} else if !exists {
return errors.Errorf("Subscription %s doesn't exists", sub)
}
if options.ReceiveSettings.MaxOutstandingBytes != 0 {
sub.ReceiveSettings.MaxOutstandingBytes = options.ReceiveSettings.MaxOutstandingBytes
}
if options.ReceiveSettings.MaxOutstandingMessages != 0 {
sub.ReceiveSettings.MaxOutstandingMessages = options.ReceiveSettings.MaxOutstandingMessages
}
if options.ReceiveSettings.NumGoroutines != 0 {
sub.ReceiveSettings.NumGoroutines = options.ReceiveSettings.NumGoroutines
}
if options.ReceiveSettings.MaxExtension != 0 {
sub.ReceiveSettings.MaxExtension = options.ReceiveSettings.MaxExtension
}
if options.ReceiveSettings.MaxExtensionPeriod != 0 {
sub.ReceiveSettings.MaxExtensionPeriod = options.ReceiveSettings.MaxExtensionPeriod
}
if options.ReceiveSettings.Synchronous != false {
sub.ReceiveSettings.Synchronous = options.ReceiveSettings.Synchronous
}
middleware := hdlr
if rHdlr := options.RecoveryHandler; rHdlr != nil {
middleware = func(ctx context.Context, msg *pubsub.Message) {
defer func() {
if r := recover(); r != nil {
rHdlr(ctx, msg, r)
}
}()
hdlr(ctx, msg)
}
}
subscriber := &pubsubSubscriber{
options: options,
done: make(chan struct{}),
sub: sub,
hdlr: middleware,
}
// keep track of subs
b.subs = append(b.subs, subscriber)
return nil
}
// Start blocking. run as background process.
func (b *pubsubBroker) Start() (err error) {
ctx := b.options.Context
g, egCtx := errgroup.WithContext(ctx)
// start subscribers in the background.
// when context cancelled, they exit without error.
for _, sub := range b.subs {
g.Go(func() error {
return sub.start(egCtx)
})
}
g.Go(func() (err error) {
// listen for the interrupt signal
<-ctx.Done()
// log situation
switch ctx.Err() {
case context.DeadlineExceeded:
log.Debug().Str("component", "pubsub").Msg("Context timeout exceeded")
case context.Canceled:
log.Debug().Str("component", "pubsub").Msg("Context cancelled by interrupt signal")
}
// wait for all subs to stop
for _, sub := range b.subs {
log.Info().Str("component", "pubsub").Msgf("Stopping Subscriber: %s", sub.sub)
<-sub.done
}
// then stop all pubs
for _, pub := range b.pubs {
pub.stop()
}
// then disconnection client.
log.Info().Str("component", "pubsub").Msgf("Closing pubsub client...")
err = b.client.Close()
// Hint: when using pubsub emulator, you receive this error, which you can safely ignore.
// Live pubsub server will throw this error.
if err != nil && strings.Contains(err.Error(), "the client connection is closing") {
err = nil
}
return
})
// Wait for all tasks to be finished or return if error occur at any task.
return g.Wait()
}
// NewBroker creates a new google pubsub broker
func newBroker(ctx context.Context, opts ...Option) Broker {
// Default Options
options := Options{
Name: DefaultName,
Context: ctx,
}
for _, o := range opts {
o(&options)
}
// retrieve project id
prjID := options.ProjectID
// if `GOOGLE_CLOUD_PROJECT` is present, it will overwrite programmatically set projectID
//if envPrjID := os.Getenv("GOOGLE_CLOUD_PROJECT"); len(envPrjID) > 0 {
// prjID = envPrjID
//}
// create pubsub client
c, err := pubsub.NewClient(ctx, prjID, options.ClientOptions...)
if err != nil {
panic(err.Error())
}
return &pubsubBroker{
client: c,
options: options,
}
}
|
[
"\"GOOGLE_CLOUD_PROJECT\""
] |
[] |
[
"GOOGLE_CLOUD_PROJECT"
] |
[]
|
["GOOGLE_CLOUD_PROJECT"]
|
go
| 1 | 0 | |
eval_new_val.py
|
import os
import torch
import numpy as np
from PIL import Image
import torch.nn as nn
from torch.utils import data
from network import *
from dataset.zurich_night_dataset import zurich_night_DataSet
from dataset.acdc_dataset import acdc_dataset
from configs.test_2_config import get_arguments
from tqdm import tqdm
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device("cuda")
args = get_arguments()
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.model == 'PSPNet':
model = PSPNet(num_classes=args.num_classes)
if args.model == 'DeepLab':
model = Deeplab(num_classes=args.num_classes)
if args.model == 'RefineNet':
model = RefineNet(num_classes=args.num_classes, imagenet=False)
saved_state_dict = torch.load(args.restore_from)
model_dict = model.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
model.load_state_dict(saved_state_dict)
lightnet = LightNet()
saved_state_dict = torch.load(args.restore_from_light)
model_dict = lightnet.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
lightnet.load_state_dict(saved_state_dict)
model = model.to(device)
lightnet = lightnet.to(device)
model.eval()
lightnet.eval()
testloader = data.DataLoader(zurich_night_DataSet(args.data_dir, args.data_list, set=args.set))
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
weights = torch.log(torch.FloatTensor(
[0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651,
0.01157818, 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,
0.00413907])).cuda()
weights = (torch.mean(weights) - weights) / torch.std(weights) * args.std + 1.0
for index, batch in tqdm(enumerate(testloader)):
if index % 10 == 0:
print('%d processd' % index)
image, name = batch
image = image.to(device)
with torch.no_grad():
r = lightnet(image)
enhancement = image + r
if args.model == 'RefineNet':
output2 = model(enhancement)
else:
_, output2 = model(enhancement)
weights_prob = weights.expand(output2.size()[0], output2.size()[3], output2.size()[2], 19)
weights_prob = weights_prob.transpose(1, 3)
output2 = output2 * weights_prob
output = interp(output2).cpu().data[0].numpy()
output = output.transpose(1,2,0)
# print(output.shape)
# torch.save(output, 'out.pt')
# print(name)
name = name[0].split('/')[-1].replace('.png','.pt')
# print(name)
flname = os.path.join(args.save, name)
torch.save(output, flname)
# print('done')
# output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# output_col = colorize_mask(output)
# output = Image.fromarray(output)
# ###### get the enhanced image
# # enhancement = enhancement.cpu().data[0].numpy().transpose(1,2,0)
# # enhancement = enhancement*mean_std[1]+mean_std[0]
# # enhancement = (enhancement-enhancement.min())/(enhancement.max()-enhancement.min())
# # enhancement = enhancement[:, :, ::-1]*255 # change to BGR
# # enhancement = Image.fromarray(enhancement.astype(np.uint8))
# ###### get the light
# # light = r.cpu().data[0].numpy().transpose(1,2,0)
# # light = (light-light.min())/(light.max()-light.min())
# # light = light[:, :, ::-1]*255 # change to BGR
# # light = Image.fromarray(light.astype(np.uint8))
# name = name[0].split('/')[-1]
# output.save('%s/%s' % (args.save, name))
# output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
# # enhancement.save('%s/%s_enhancement.png' % (args.save, name.split('.')[0]))
# # light.save('%s/%s_light.png' % (args.save, name.split('.')[0]))
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
pkg/settings/setting.go
|
package settings
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strconv"
"strings"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
authsettings "github.com/rancher/rancher/pkg/auth/settings"
fleetconst "github.com/rancher/rancher/pkg/fleet"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
)
var (
releasePattern = regexp.MustCompile("^v[0-9]")
settings = map[string]Setting{}
provider Provider
InjectDefaults string
AgentImage = NewSetting("agent-image", "rancher/rancher-agent:master-head")
AgentRolloutTimeout = NewSetting("agent-rollout-timeout", "300s")
AgentRolloutWait = NewSetting("agent-rollout-wait", "true")
AuthImage = NewSetting("auth-image", v32.ToolsSystemImages.AuthSystemImages.KubeAPIAuth)
AuthTokenMaxTTLMinutes = NewSetting("auth-token-max-ttl-minutes", "0") // never expire
AuthorizationCacheTTLSeconds = NewSetting("authorization-cache-ttl-seconds", "10")
AuthorizationDenyCacheTTLSeconds = NewSetting("authorization-deny-cache-ttl-seconds", "10")
AzureGroupCacheSize = NewSetting("azure-group-cache-size", "10000")
CACerts = NewSetting("cacerts", "")
CLIURLDarwin = NewSetting("cli-url-darwin", "https://releases.rancher.com/cli/v1.0.0-alpha8/rancher-darwin-amd64-v1.0.0-alpha8.tar.gz")
CLIURLLinux = NewSetting("cli-url-linux", "https://releases.rancher.com/cli/v1.0.0-alpha8/rancher-linux-amd64-v1.0.0-alpha8.tar.gz")
CLIURLWindows = NewSetting("cli-url-windows", "https://releases.rancher.com/cli/v1.0.0-alpha8/rancher-windows-386-v1.0.0-alpha8.zip")
ClusterControllerStartCount = NewSetting("cluster-controller-start-count", "50")
EngineInstallURL = NewSetting("engine-install-url", "https://releases.rancher.com/install-docker/20.10.sh")
EngineISOURL = NewSetting("engine-iso-url", "https://releases.rancher.com/os/latest/rancheros-vmware.iso")
EngineNewestVersion = NewSetting("engine-newest-version", "v17.12.0")
EngineSupportedRange = NewSetting("engine-supported-range", "~v1.11.2 || ~v1.12.0 || ~v1.13.0 || ~v17.03.0 || ~v17.06.0 || ~v17.09.0 || ~v18.06.0 || ~v18.09.0 || ~v19.03.0 || ~v20.10.0 ")
FirstLogin = NewSetting("first-login", "true")
GlobalRegistryEnabled = NewSetting("global-registry-enabled", "false")
GithubProxyAPIURL = NewSetting("github-proxy-api-url", "https://api.github.com")
HelmVersion = NewSetting("helm-version", "dev")
HelmMaxHistory = NewSetting("helm-max-history", "10")
IngressIPDomain = NewSetting("ingress-ip-domain", "sslip.io")
InstallUUID = NewSetting("install-uuid", "")
InternalServerURL = NewSetting("internal-server-url", "")
InternalCACerts = NewSetting("internal-cacerts", "")
IsRKE = NewSetting("is-rke", "")
JailerTimeout = NewSetting("jailer-timeout", "60")
KubeconfigGenerateToken = NewSetting("kubeconfig-generate-token", "true")
KubeconfigTokenTTLMinutes = NewSetting("kubeconfig-token-ttl-minutes", "960") // 16 hours
KubernetesVersion = NewSetting("k8s-version", "")
KubernetesVersionToServiceOptions = NewSetting("k8s-version-to-service-options", "")
KubernetesVersionToSystemImages = NewSetting("k8s-version-to-images", "")
KubernetesVersionsCurrent = NewSetting("k8s-versions-current", "")
KubernetesVersionsDeprecated = NewSetting("k8s-versions-deprecated", "")
KDMBranch = NewSetting("kdm-branch", "dev-v2.6")
MachineVersion = NewSetting("machine-version", "dev")
Namespace = NewSetting("namespace", os.Getenv("CATTLE_NAMESPACE"))
PasswordMinLength = NewSetting("password-min-length", "12")
PeerServices = NewSetting("peer-service", os.Getenv("CATTLE_PEER_SERVICE"))
RDNSServerBaseURL = NewSetting("rdns-base-url", "https://api.lb.rancher.cloud/v1")
RkeVersion = NewSetting("rke-version", "")
RkeMetadataConfig = NewSetting("rke-metadata-config", getMetadataConfig())
ServerImage = NewSetting("server-image", "rancher/rancher")
ServerURL = NewSetting("server-url", "")
ServerVersion = NewSetting("server-version", "dev")
SystemAgentVersion = NewSetting("system-agent-version", "")
WinsAgentVersion = NewSetting("wins-agent-version", "")
SystemAgentInstallScript = NewSetting("system-agent-install-script", "https://raw.githubusercontent.com/rancher/system-agent/main/install.sh")
WindowsRke2InstallScript = NewSetting("windows-rke2-install-script", "https://raw.githubusercontent.com/rancher/wins/main/install.ps1")
SystemAgentInstallerImage = NewSetting("system-agent-installer-image", "rancher/system-agent-installer-")
SystemAgentUpgradeImage = NewSetting("system-agent-upgrade-image", "")
SystemDefaultRegistry = NewSetting("system-default-registry", "")
SystemNamespaces = NewSetting("system-namespaces", "kube-system,kube-public,cattle-system,cattle-alerting,cattle-logging,cattle-pipeline,cattle-prometheus,ingress-nginx,cattle-global-data,cattle-istio,kube-node-lease,cert-manager,cattle-global-nt,security-scan,cattle-fleet-system,cattle-fleet-local-system,calico-system,tigera-operator,cattle-impersonation-system,rancher-operator-system")
SystemUpgradeControllerChartVersion = NewSetting("system-upgrade-controller-chart-version", "")
TelemetryOpt = NewSetting("telemetry-opt", "")
TLSMinVersion = NewSetting("tls-min-version", "1.2")
TLSCiphers = NewSetting("tls-ciphers", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305")
UIBanners = NewSetting("ui-banners", "{}")
UIBrand = NewSetting("ui-brand", "")
UIDefaultLanding = NewSetting("ui-default-landing", "vue")
UIFeedBackForm = NewSetting("ui-feedback-form", "")
UIIndex = NewSetting("ui-index", "https://releases.rancher.com/ui/latest2/index.html")
UIPath = NewSetting("ui-path", "/usr/share/rancher/ui")
UIDashboardIndex = NewSetting("ui-dashboard-index", "https://releases.rancher.com/dashboard/latest/index.html")
UIDashboardPath = NewSetting("ui-dashboard-path", "/usr/share/rancher/ui-dashboard")
UIPreferred = NewSetting("ui-preferred", "vue")
UIOfflinePreferred = NewSetting("ui-offline-preferred", "dynamic")
UIIssues = NewSetting("ui-issues", "")
UIPL = NewSetting("ui-pl", "rancher")
UICommunityLinks = NewSetting("ui-community-links", "true")
UIKubernetesSupportedVersions = NewSetting("ui-k8s-supported-versions-range", ">= 1.11.0 <=1.14.x")
UIKubernetesDefaultVersion = NewSetting("ui-k8s-default-version-range", "<=1.14.x")
WhitelistDomain = NewSetting("whitelist-domain", "forums.rancher.com")
WhitelistEnvironmentVars = NewSetting("whitelist-envvars", "HTTP_PROXY,HTTPS_PROXY,NO_PROXY")
AuthUserInfoResyncCron = NewSetting("auth-user-info-resync-cron", "0 0 * * *")
AuthUserSessionTTLMinutes = NewSetting("auth-user-session-ttl-minutes", "960") // 16 hours
AuthUserInfoMaxAgeSeconds = NewSetting("auth-user-info-max-age-seconds", "3600") // 1 hour
APIUIVersion = NewSetting("api-ui-version", "1.1.6") // Please update the CATTLE_API_UI_VERSION in package/Dockerfile when updating the version here.
RotateCertsIfExpiringInDays = NewSetting("rotate-certs-if-expiring-in-days", "7") // 7 days
ClusterTemplateEnforcement = NewSetting("cluster-template-enforcement", "false")
InitialDockerRootDir = NewSetting("initial-docker-root-dir", "/var/lib/docker")
SystemCatalog = NewSetting("system-catalog", "external") // Options are 'external' or 'bundled'
ChartDefaultBranch = NewSetting("chart-default-branch", "dev-v2.6")
PartnerChartDefaultBranch = NewSetting("partner-chart-default-branch", "main")
RKE2ChartDefaultBranch = NewSetting("rke2-chart-default-branch", "main")
FleetDefaultWorkspaceName = NewSetting("fleet-default-workspace-name", fleetconst.ClustersDefaultNamespace) // fleetWorkspaceName to assign to clusters with none
ShellImage = NewSetting("shell-image", "rancher/shell:v0.1.14")
IgnoreNodeName = NewSetting("ignore-node-name", "") // nodes to ignore when syncing v1.node to v3.node
NoDefaultAdmin = NewSetting("no-default-admin", "")
RestrictedDefaultAdmin = NewSetting("restricted-default-admin", "false") // When bootstrapping the admin for the first time, give them the global role restricted-admin
AKSUpstreamRefresh = NewSetting("aks-refresh", "300")
EKSUpstreamRefreshCron = NewSetting("eks-refresh-cron", "*/5 * * * *") // EKSUpstreamRefreshCron is deprecated and will be replaced by EKSUpstreamRefresh
EKSUpstreamRefresh = NewSetting("eks-refresh", "300")
GKEUpstreamRefresh = NewSetting("gke-refresh", "300")
HideLocalCluster = NewSetting("hide-local-cluster", "false")
MachineProvisionImage = NewSetting("machine-provision-image", "rancher/machine:v0.15.0-rancher73")
SystemFeatureChartRefreshSeconds = NewSetting("system-feature-chart-refresh-seconds", "900")
FleetMinVersion = NewSetting("fleet-min-version", "")
RancherWebhookMinVersion = NewSetting("rancher-webhook-min-version", "")
)
func FullShellImage() string {
return PrefixPrivateRegistry(ShellImage.Get())
}
func PrefixPrivateRegistry(image string) string {
private := SystemDefaultRegistry.Get()
if private == "" {
return image
}
return private + "/" + image
}
func IsRelease() bool {
return !strings.Contains(ServerVersion.Get(), "head") && releasePattern.MatchString(ServerVersion.Get())
}
func init() {
// setup auth setting
authsettings.AuthUserInfoResyncCron = AuthUserInfoResyncCron
authsettings.AuthUserSessionTTLMinutes = AuthUserSessionTTLMinutes
authsettings.AuthUserInfoMaxAgeSeconds = AuthUserInfoMaxAgeSeconds
authsettings.FirstLogin = FirstLogin
if InjectDefaults == "" {
return
}
defaults := map[string]string{}
if err := json.Unmarshal([]byte(InjectDefaults), &defaults); err != nil {
return
}
for name, defaultValue := range defaults {
value, ok := settings[name]
if !ok {
continue
}
value.Default = defaultValue
settings[name] = value
}
}
type Provider interface {
Get(name string) string
Set(name, value string) error
SetIfUnset(name, value string) error
SetAll(settings map[string]Setting) error
}
type Setting struct {
Name string
Default string
ReadOnly bool
}
func (s Setting) SetIfUnset(value string) error {
if provider == nil {
return s.Set(value)
}
return provider.SetIfUnset(s.Name, value)
}
func (s Setting) Set(value string) error {
if provider == nil {
s, ok := settings[s.Name]
if ok {
s.Default = value
settings[s.Name] = s
}
} else {
return provider.Set(s.Name, value)
}
return nil
}
func (s Setting) Get() string {
if provider == nil {
s := settings[s.Name]
return s.Default
}
return provider.Get(s.Name)
}
func (s Setting) GetInt() int {
v := s.Get()
i, err := strconv.Atoi(v)
if err == nil {
return i
}
logrus.Errorf("failed to parse setting %s=%s as int: %v", s.Name, v, err)
i, err = strconv.Atoi(s.Default)
if err != nil {
return 0
}
return i
}
func SetProvider(p Provider) error {
if err := p.SetAll(settings); err != nil {
return err
}
provider = p
return nil
}
func NewSetting(name, def string) Setting {
s := Setting{
Name: name,
Default: def,
}
settings[s.Name] = s
return s
}
func GetEnvKey(key string) string {
return "CATTLE_" + strings.ToUpper(strings.Replace(key, "-", "_", -1))
}
func getMetadataConfig() string {
branch := KDMBranch.Get()
data := map[string]interface{}{
"url": fmt.Sprintf("https://releases.rancher.com/kontainer-driver-metadata/%s/data.json", branch),
"refresh-interval-minutes": "1440",
}
ans, err := json.Marshal(data)
if err != nil {
logrus.Errorf("error getting metadata config %v", err)
return ""
}
return string(ans)
}
// GetSettingByID returns a setting that is stored with the given id
func GetSettingByID(id string) string {
if provider == nil {
s := settings[id]
return s.Default
}
return provider.Get(id)
}
func DefaultAgentSettings() []Setting {
return []Setting{
ServerVersion,
InstallUUID,
IngressIPDomain,
}
}
func DefaultAgentSettingsAsEnvVars() []v1.EnvVar {
defaultAgentSettings := DefaultAgentSettings()
envVars := make([]v1.EnvVar, 0, len(defaultAgentSettings))
for _, s := range defaultAgentSettings {
envVars = append(envVars, v1.EnvVar{
Name: GetEnvKey(s.Name),
Value: s.Get(),
})
}
return envVars
}
|
[
"\"CATTLE_NAMESPACE\"",
"\"CATTLE_PEER_SERVICE\""
] |
[] |
[
"CATTLE_NAMESPACE",
"CATTLE_PEER_SERVICE"
] |
[]
|
["CATTLE_NAMESPACE", "CATTLE_PEER_SERVICE"]
|
go
| 2 | 0 | |
liesl/streams/__init__.py
|
"""
localhost
---------
"""
import socket, os
def get_localhostname():
if os.environ.get("DOC", False) == True:
return socket.gethostname()
else:
return "sphinx-doc"
def get_ip_adress():
if os.environ.get("DOC", False) == True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
except Exception:
pass
return "123.4.567.890"
localhostname: str #: the name of the local machine
localhostname = get_localhostname()
localhost: str #: the localhost
localhost = "127.0.0.1"
localip: str #: the local ip address
localip = get_ip_adress()
|
[] |
[] |
[
"DOC"
] |
[]
|
["DOC"]
|
python
| 1 | 0 | |
src/main/java/me/coley/recaf/Recaf.java
|
package me.coley.recaf;
import io.github.soc.directories.BaseDirectories;
import me.coley.recaf.command.impl.Initializer;
import me.coley.recaf.control.Controller;
import me.coley.recaf.control.headless.HeadlessController;
import me.coley.recaf.plugin.PluginsManager;
import me.coley.recaf.plugin.api.EntryLoaderProviderPlugin;
import me.coley.recaf.util.Log;
import me.coley.recaf.util.Natives;
import me.coley.recaf.util.OSUtil;
import me.coley.recaf.util.VMUtil;
import me.coley.recaf.util.self.SelfDependencyPatcher;
import me.coley.recaf.util.self.SelfUpdater;
import me.coley.recaf.workspace.InstrumentationResource;
import me.coley.recaf.workspace.Workspace;
import org.objectweb.asm.Opcodes;
import picocli.CommandLine;
import java.lang.instrument.Instrumentation;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.Locale;
import static me.coley.recaf.util.Log.*;
/**
* Entry point & version constant.
*
* @author Matt
*/
public class Recaf {
public static final String VERSION = "2.19.6";
public static final String DOC_URL = "https://col-e.github.io/Recaf-documentation/";
public static final int ASM_VERSION = Opcodes.ASM9;
private static Controller currentController;
private static Workspace currentWorkspace;
private static boolean initialized;
private static boolean headless;
private static Path configDir;
/**
* Start Recaf.
*
* @param args
* Optional args.
*/
public static void main(String[] args) {
Natives.loadAttach().ifPresent(t -> error(t, "Failed to load attach library."));
init();
launch(args);
}
private static void agent(String args, Instrumentation inst) {
InstrumentationResource.instrumentation = inst;
if (Recaf.class.getClassLoader() == ClassLoader.getSystemClassLoader()) {
warn("Recaf was attached and loaded into system class loader," +
" that is not a good thing!");
}
init();
// Log that we are an agent
info("Starting as agent...");
// Add instrument launch arg
if(args == null || args.trim().isEmpty())
args = "--instrument";
else if(!args.contains("--instrument"))
args = args + ",--instrument";
// Set instance
// Start Recaf
launch(args.split("[=,]"));
}
/**
* Run pre-launch initialization tasks.
*/
private static void init() {
if (!initialized) {
// Bypass JDK restrictions.
VMUtil.patch();
// Patch in dependencies
SelfDependencyPatcher.patch();
// Fix title bar not displaying in GTK systems
System.setProperty("jdk.gtk.version", "2");
// Fix for this dumb "feature" - https://mattryall.net/blog/the-infamous-turkish-locale-bug
Locale.setDefault(Locale.US);
// Show version & start
info("Recaf-{}", VERSION);
initialized = true;
}
}
/**
* Launch Recaf
*/
private static void launch(String[] args) {
// Setup initializer, this loads command line arguments
Initializer initializer = new Initializer();
CommandLine commandLine = new CommandLine(initializer);
commandLine.execute(args);
if (commandLine.getUnmatchedArguments().size() > 0)
return;
headless = initializer.cli;
loadPlugins();
// Do version check
SelfUpdater.setController(initializer.getController());
SelfUpdater.setArgs(args);
SelfUpdater.checkForUpdates();
// Start the initializer's controller, starting Recaf
initializer.startController();
}
/**
* Load plugins.
*/
private static void loadPlugins() {
try {
PluginsManager manager = PluginsManager.getInstance();
manager.load();
// Check for loaders, set the current loader to the first one found
Collection<EntryLoaderProviderPlugin> loaders = manager.ofType(EntryLoaderProviderPlugin.class);
if (!loaders.isEmpty())
manager.setEntryLoader(loaders.iterator().next().create());
} catch(NoClassDefFoundError noDef) {
Log.error("An error occurred loading the plugins, failed class lookup: " +
noDef.getMessage() + "\n - Is the plugin outdated?");
} catch(Throwable t) {
Log.error(t, "An error occurred loading the plugins");
}
}
/**
* @param currentWorkspace
* New workspace.
*/
public static void setCurrentWorkspace(Workspace currentWorkspace) {
Recaf.currentWorkspace = currentWorkspace;
}
/**
* Try not to use this too often. It would be best to be passed an instance of the workspace
* so things do not become statically dependent.
*
* @return Current workspace.
*/
public static Workspace getCurrentWorkspace() {
return currentWorkspace;
}
/**
* @param controller
* New controller.
*/
public static void setController(Controller controller) {
if (currentController != null)
throw new IllegalStateException("Controller already set!");
headless = controller instanceof HeadlessController;
currentController = controller;
}
/**
* @return Recaf controller.
*/
public static Controller getController() {
return currentController;
}
/**
* @return {@code true} when Recaf is running in headless mode.
*/
public static boolean isHeadless() {
return headless;
}
/**
* @return Recaf's storage directory.
*/
public static Path getDirectory() {
Path configDir = Recaf.configDir;
if (configDir == null) {
try {
configDir = Recaf.configDir = Paths.get(BaseDirectories.get().configDir)
.resolve("Recaf");
} catch (Throwable t) {
// BaseDirectories library has a powershell problem...
// This should only affect windows
if (OSUtil.getOSType() == OSUtil.WINDOWS) {
configDir = Paths.get(System.getenv("APPDATA"), "Recaf");
} else {
throw new IllegalStateException("Failed to initialize Recaf directory");
}
}
}
return configDir;
}
/**
* @param subfolder
* Subfolder name.
*
* @return Subfolder in Recaf's storage directory.
*/
public static Path getDirectory(String subfolder) {
return getDirectory().resolve(subfolder);
}
}
|
[
"\"APPDATA\""
] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
java
| 1 | 0 | |
opr/opr.go
|
// Copyright (c) of parts are held by the various contributors (see the CLA)
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
package opr
import (
"context"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"strconv"
"sync"
"github.com/FactomProject/btcutil/base58"
"github.com/FactomProject/factom"
"github.com/golang/protobuf/proto"
lxr "github.com/pegnet/LXRHash"
"github.com/pegnet/pegnet/common"
"github.com/pegnet/pegnet/opr/oprencoding"
"github.com/pegnet/pegnet/polling"
log "github.com/sirupsen/logrus"
"github.com/zpatrick/go-config"
)
// TODO: Do not make this a global.
// currently the OPR does the asset polling, this is bit backwards.
// We should poll the asset prices, and set the OPR. Not create the OPR
// and have it find it's own prices.
var PollingDataSource *polling.DataSources
var pollingDataSourceInitializer sync.Once
func InitDataSource(config *config.Config) {
pollingDataSourceInitializer.Do(func() {
if PollingDataSource == nil { // This can be inited from unit tests
PollingDataSource = polling.NewDataSources(config)
}
})
}
// OraclePriceRecord is the data used and created by miners
type OraclePriceRecord struct {
// These fields are not part of the OPR, but track values associated with the OPR.
Protocol string `json:"-"` // The Protocol we are running on (PegNet)
Network string `json:"-"` // The network we are running on (TestNet vs MainNet)
Difficulty uint64 `json:"-"` // The difficulty of the given nonce
Grade float64 `json:"-"` // The grade when OPR records are compared
OPRHash []byte `json:"-"` // The hash of the OPR record (used by PegNet Mining)
OPRChainID string `json:"-"` // [base58] Chain ID of the chain used by the Oracle Miners
CoinbasePEGAddress string `json:"-"` // [base58] PEG Address to pay PEG
// This can be attached to an OPR, which indicates how low we should expect a mined
// opr to be. Any OPRs mined below this are not worth submitting to the network.
MinimumDifficulty uint64 `json:"-"`
// Factom Entry data
EntryHash []byte `json:"-"` // Entry to record this record
Nonce []byte `json:"-"` // Nonce used with OPR
SelfReportedDifficulty []byte `json:"-"` // Miners self report their difficulty
Version uint8 `json:"-"`
// These values define the context of the OPR, and they go into the PegNet OPR record, and are mined.
CoinbaseAddress string `json:"coinbase"` // [base58] PEG Address to pay PEG
Dbht int32 `json:"dbht"` // The Directory Block Height of the OPR.
WinPreviousOPR []string `json:"winners"` // First 8 bytes of the Entry Hashes of the previous winners
FactomDigitalID string `json:"minerid"` // [unicode] Digital Identity of the miner
// The Oracle values of the OPR, they are the meat of the OPR record, and are mined.
Assets OraclePriceRecordAssetList `json:"assets"`
}
func NewOraclePriceRecord() *OraclePriceRecord {
o := new(OraclePriceRecord)
o.Assets = make(OraclePriceRecordAssetList)
return o
}
// CloneEntryData will clone the OPR data needed to make a factom entry.
// This needs to be done because I need to marshal this into my factom entry.
func (c *OraclePriceRecord) CloneEntryData() *OraclePriceRecord {
n := NewOraclePriceRecord()
n.OPRChainID = c.OPRChainID
n.Dbht = c.Dbht
n.Version = c.Version
n.WinPreviousOPR = make([]string, len(c.WinPreviousOPR), len(c.WinPreviousOPR))
copy(n.WinPreviousOPR[:], c.WinPreviousOPR[:])
n.CoinbaseAddress = c.CoinbaseAddress
n.CoinbasePEGAddress = c.CoinbasePEGAddress
n.FactomDigitalID = c.FactomDigitalID
n.Assets = make(OraclePriceRecordAssetList)
for k, v := range c.Assets {
n.Assets[k] = v
}
return n
}
// LX holds an instance of lxrhash
var LX lxr.LXRHash
var lxInitializer sync.Once
// The init function for LX is expensive. So we should explicitly call the init if we intend
// to use it. Make the init call idempotent
func InitLX() {
lxInitializer.Do(func() {
// This code will only be executed ONCE, no matter how often you call it
LX.Verbose(true)
if size, err := strconv.Atoi(os.Getenv("LXRBITSIZE")); err == nil && size >= 8 && size <= 30 {
LX.Init(0xfafaececfafaecec, uint64(size), 256, 5)
} else {
LX.Init(0xfafaececfafaecec, 30, 256, 5)
}
})
}
// OPRChainID is the calculated chain id of the records chain
var OPRChainID string
// Token is a combination of currency Code and Value
type Token struct {
Code string
Value float64
}
// Validate performs sanity checks of the structure and values of the OPR.
// It does not validate the winners of the previous block.
func (opr *OraclePriceRecord) Validate(c *config.Config, dbht int64) bool {
net, _ := common.LoadConfigNetwork(c)
if !common.NetworkActive(net, dbht) {
return false
}
// Validate there are no 0's
for k, v := range opr.Assets {
if v == 0 && k != "PEG" { // PEG is exception until we get a value for it
return false
}
}
// Only enforce on version 2 and forward
if err := common.ValidIdentity(opr.FactomDigitalID); opr.Version == 2 && err != nil {
return false
}
// Only enforce on version 2 and forward, checking valid FCT address
if opr.Version == 2 && !ValidFCTAddress(opr.CoinbaseAddress) {
return false
}
if int64(opr.Dbht) != dbht {
return false // DBHeight is not reported correctly
}
if opr.Version != common.OPRVersion(net, int64(dbht)) {
return false // We only support this version
}
// Validate all the Assets exists
switch opr.Version {
case 1:
if len(opr.WinPreviousOPR) != 10 {
return false
}
return opr.Assets.ContainsExactly(common.AssetsV1)
case 2:
// It can contain 10 winners when it is a transition record
return opr.Assets.ContainsExactly(common.AssetsV2)
default:
return false
}
}
// ValidFCTAddress will be removed in the grading module refactor. This is just temporary to get this
// functionality, and be easily unit testable.
func ValidFCTAddress(addr string) bool {
return len(addr) > 2 && addr[:2] == "FA" && factom.IsValidAddress(addr)
}
// GetTokens creates an iterateable slice of Tokens containing all the currency values
func (opr *OraclePriceRecord) GetTokens() (tokens []Token) {
return opr.Assets.List(opr.Version)
}
// GetHash returns the LXHash over the OPR's json representation
func (opr *OraclePriceRecord) GetHash() []byte {
if len(opr.OPRHash) > 0 {
return opr.OPRHash
}
// SafeMarshal handles the PNT/PEG issue
data, err := opr.SafeMarshal()
common.CheckAndPanic(err)
sha := sha256.Sum256(data)
opr.OPRHash = sha[:]
return opr.OPRHash
}
// ComputeDifficulty gets the difficulty by taking the hash of the OPRHash
// appended by the nonce. The difficulty is the highest 8 bytes of the hash
// taken as uint64 in Big Endian
func (opr *OraclePriceRecord) ComputeDifficulty(nonce []byte) (difficulty uint64) {
no := append(opr.OPRHash, nonce...)
h := LX.Hash(no)
// The high eight bytes of the hash(hash(entry.Content) + nonce) is the difficulty.
// Because we don't have a difficulty bar, we can define difficulty as the greatest
// value, rather than the minimum value. Our bar is the greatest difficulty found
// within a 10 minute period. We compute difficulty as Big Endian.
for i := uint64(0); i < 8; i++ {
difficulty = difficulty<<8 + uint64(h[i])
}
return difficulty
}
func ComputeDifficulty(oprhash, nonce []byte) (difficulty uint64) {
no := append(oprhash, nonce...)
h := LX.Hash(no)
// The high eight bytes of the hash(hash(entry.Content) + nonce) is the difficulty.
// Because we don't have a difficulty bar, we can define difficulty as the greatest
// value, rather than the minimum value. Our bar is the greatest difficulty found
// within a 10 minute period. We compute difficulty as Big Endian.
for i := uint64(0); i < 8; i++ {
difficulty = difficulty<<8 + uint64(h[i])
}
return difficulty
}
// ShortString returns a human readable string with select data
func (opr *OraclePriceRecord) ShortString() string {
str := fmt.Sprintf("DID %30x OPRHash %30x Nonce %33x Difficulty %15x Grade %20f",
opr.FactomDigitalID,
opr.OPRHash,
opr.Nonce,
opr.Difficulty,
opr.Grade)
return str
}
// String returns a human readable string for the Oracle Record
func (opr *OraclePriceRecord) String() (str string) {
str = fmt.Sprintf("Nonce %x\n", opr.Nonce)
str = str + fmt.Sprintf("%32s %v\n", "Difficulty", opr.Difficulty)
str = str + fmt.Sprintf("%32s %v\n", "Directory Block Height", opr.Dbht)
str = str + fmt.Sprintf("%32s %v\n", "WinningPreviousOPRs", "")
for i, v := range opr.WinPreviousOPR {
str = str + fmt.Sprintf("%32s %2d, %s\n", "", i+1, v)
}
str = str + fmt.Sprintf("%32s %s\n", "Coinbase PEG", opr.CoinbasePEGAddress)
// Make a display string out of the Digital Identity.
str = str + fmt.Sprintf("%32s %v\n", "FactomDigitalID", opr.FactomDigitalID)
for _, asset := range opr.Assets.List(opr.Version) {
str = str + fmt.Sprintf("%32s %v\n", "PEG", asset)
}
str = str + fmt.Sprintf("\nWinners\n\n")
// If there were previous winners, we need to make sure this miner is running
// the software to detect them, and that we agree with their conclusions.
for i, v := range opr.WinPreviousOPR {
str = str + fmt.Sprintf(" %2d\t%16s\n",
i,
v,
)
}
return str
}
// LogFieldsShort returns a set of common fields to be included in logrus
func (opr *OraclePriceRecord) LogFieldsShort() log.Fields {
return log.Fields{
"did": opr.FactomDigitalID,
"opr_hash": hex.EncodeToString(opr.OPRHash),
"nonce": hex.EncodeToString(opr.Nonce),
"difficulty": opr.Difficulty,
"grade": opr.Grade,
}
}
// SetPegValues assigns currency polling values to the OPR
func (opr *OraclePriceRecord) SetPegValues(assets polling.PegAssets) {
// TODO: Remove when version 2 is activated
switch common.OPRVersion(opr.Network, int64(opr.Dbht)) {
case 1:
for asset, v := range assets {
opr.Assets.SetValue(asset, v.Value)
}
case 2:
for asset, v := range assets {
// Skip XPT and XPD
if asset == "XPT" || asset == "XPD" {
continue
}
opr.Assets.SetValue(asset, v.Value)
}
}
}
// NewOpr collects all the information unique to this miner and its configuration, and also
// goes and gets the oracle data. Also collects the winners from the prior block and
// puts their entry hashes (base58) into this OPR
func NewOpr(ctx context.Context, minerNumber int, dbht int32, c *config.Config, alert chan *OPRs) (opr *OraclePriceRecord, err error) {
opr = NewOraclePriceRecord()
// Get the Identity Chain Specification
if did, err := c.String("Miner.IdentityChain"); err != nil {
return nil, errors.New("config file has no Miner.IdentityChain specified")
} else {
if minerNumber > 0 {
did = fmt.Sprintf("%sminer%03d", did, minerNumber)
}
opr.FactomDigitalID = did
}
// Get the protocol chain to be used for pegnetMining records
protocol, err1 := c.String("Miner.Protocol")
network, err2 := common.LoadConfigNetwork(c)
opr.Network = network
opr.Protocol = protocol
if err1 != nil {
return nil, errors.New("config file has no Miner.Protocol specified")
}
if err2 != nil {
return nil, errors.New("config file has no Miner.Network specified")
}
opr.OPRChainID = base58.Encode(common.ComputeChainIDFromStrings([]string{protocol, network, common.OPRChainTag}))
opr.Dbht = dbht
opr.Version = common.OPRVersion(opr.Network, int64(opr.Dbht))
// If this is a test network, then give multiple miners their own tPEG address
// because that is way more useful debugging than giving all miners the same
// PEG address. Otherwise, give all miners the same PEG address because most
// users really doing mining will mostly be happen sending rewards to a single
// address.
if network == common.TestNetwork && minerNumber != 0 {
fct := common.DebugFCTaddresses[minerNumber][1]
opr.CoinbaseAddress = fct
} else {
if str, err := c.String("Miner.CoinbaseAddress"); err != nil {
return nil, errors.New("config file has no Coinbase PEG Address")
} else {
opr.CoinbaseAddress = str
}
}
opr.CoinbasePEGAddress, err = common.ConvertFCTtoPegNetAsset(network, "PEG", opr.CoinbaseAddress)
if err != nil {
log.Errorf("invalid fct address in config file: %v", err)
}
var winners *OPRs
select {
case winners = <-alert: // Wait for winner
case <-ctx.Done(): // If we get cancelled
return nil, context.Canceled
}
if winners.Error != nil {
return nil, winners.Error
}
// For the transition, we need to support a 10 winner opr.
// The winner's should be correct from our grader, so we will accept it
if len(winners.ToBePaid) > 0 {
opr.WinPreviousOPR = make([]string, len(winners.ToBePaid), len(winners.ToBePaid))
for i, w := range winners.ToBePaid {
opr.WinPreviousOPR[i] = hex.EncodeToString(w.EntryHash[:8])
}
} else {
// If there are no previous winners, this is a bootstrap record
min := 0
switch common.OPRVersion(network, int64(dbht)) {
case 1:
min = 10
case 2:
min = 25
}
opr.WinPreviousOPR = make([]string, min, min)
}
if len(winners.AllOPRs) > 0 {
cutoff, _ := c.Int(common.ConfigSubmissionCutOff)
if cutoff > 0 { // <= 0 disables it
// This will calculate a minimum difficulty floor for our target cutoff.
opr.MinimumDifficulty = CalculateMinimumDifficultyFromOPRs(winners.AllOPRs, cutoff)
}
}
err = opr.GetOPRecord(c)
if err != nil {
return nil, err
}
if !opr.Validate(c, int64(dbht)) {
// TODO: Remove this custom error handle once the network is live.
// This is just to give a better error when are waiting for activation.
if !common.NetworkActive(opr.Network, int64(dbht)) {
return nil, fmt.Errorf("Waiting for activation height")
}
return nil, fmt.Errorf("opr invalid")
}
return opr, nil
}
// GetOPRecord initializes the OPR with polling data and factom entry
func (opr *OraclePriceRecord) GetOPRecord(c *config.Config) error {
InitDataSource(c) // Kinda odd to have this here.
//get asset values
Peg, err := PollingDataSource.PullAllPEGAssets(opr.Version)
if err != nil {
return err
}
opr.SetPegValues(Peg)
data, err := opr.SafeMarshal()
if err != nil {
panic(err)
}
sha := sha256.Sum256(data)
opr.OPRHash = sha[:]
return nil
}
// CreateOPREntry will create the entry from the EXISITING data.
// It will not set any fields like in `GetOPRecord`
func (opr *OraclePriceRecord) CreateOPREntry(nonce []byte, difficulty uint64) (*factom.Entry, error) {
var err error
e := new(factom.Entry)
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, difficulty)
e.ChainID = hex.EncodeToString(base58.Decode(opr.OPRChainID))
e.ExtIDs = [][]byte{nonce, buf, {opr.Version}}
e.Content, err = opr.SafeMarshal()
if err != nil {
return nil, err
}
return e, nil
}
// SafeMarshal will marshal the json depending on the opr version
func (opr *OraclePriceRecord) SafeMarshal() ([]byte, error) {
// our opr version must be set before entering this
if opr.Version == 0 {
return nil, fmt.Errorf("opr version is 0")
}
// This function relies on the assets, so check up front
if opr.Assets == nil {
return nil, fmt.Errorf("assets is nil, cannot marshal")
}
// When we marshal a version 1 opr, we need to change PEG -> PNT
// No opr in the code should ever have 'PNT'. We only use PNT in the marshal
// function, no where else.
if _, ok := opr.Assets["PNT"]; ok {
return nil, fmt.Errorf("this opr has asset 'PNT', it should have 'PEG'")
}
// Version 1 we json marshal and
// do the swap of PEG -> PNT
if opr.Version == 1 {
opr.Assets["PNT"] = opr.Assets["PEG"]
delete(opr.Assets, "PEG")
// This is a known key that will be removed by the marshal json function. It indicates
// to the marshaler that it was called from a safe path. This is not the cleanest method,
// but to override the json function, and still use the default, it would require an odd
// structure nesting and a lot of code changes
opr.Assets["version"] = uint64(opr.Version)
data, err := json.Marshal(opr)
delete(opr.Assets, "version") // Should be deleted by the json.Marshal, but that can error out
// Revert the swap
opr.Assets["PEG"] = opr.Assets["PNT"]
delete(opr.Assets, "PNT")
return data, err
} else if opr.Version == 2 {
prices := make([]uint64, len(opr.Assets))
for i, asset := range common.AssetsV2 {
prices[i] = opr.Assets[asset]
}
// Decode winners into byte slice
var err error
winners := make([][]byte, len(opr.WinPreviousOPR))
for i, winner := range opr.WinPreviousOPR {
winners[i], err = hex.DecodeString(winner)
if err != nil {
return nil, err
}
}
// Version 2 uses Protobufs for encoding
pOpr := &oprencoding.ProtoOPR{
Address: opr.CoinbaseAddress,
ID: opr.FactomDigitalID,
Height: opr.Dbht,
Assets: prices,
Winners: winners,
}
return proto.Marshal(pOpr)
}
return nil, fmt.Errorf("opr version %d not supported", opr.Version)
}
// SafeMarshal will unmarshal the json depending on the opr version
func (opr *OraclePriceRecord) SafeUnmarshal(data []byte) error {
// our opr version must be set before entering this
if opr.Version == 0 {
return fmt.Errorf("opr version is 0")
}
// If version 1, we need to json unmarshal and swap PNT and PEG
if opr.Version == 1 {
err := json.Unmarshal(data, opr)
if err != nil {
return err
}
if v, ok := opr.Assets["PNT"]; ok {
opr.Assets["PEG"] = v
delete(opr.Assets, "PNT")
} else {
return fmt.Errorf("exp version 1 to have 'PNT', but it did not")
}
return nil
} else if opr.Version == 2 {
protoOPR := oprencoding.ProtoOPR{}
err := proto.Unmarshal(data, &protoOPR)
if err != nil {
return err
}
opr.Assets = make(OraclePriceRecordAssetList)
// Populate the original opr
opr.CoinbaseAddress = protoOPR.Address
opr.FactomDigitalID = protoOPR.ID
opr.Dbht = protoOPR.Height
if len(protoOPR.Assets) != len(common.AssetsV2) {
return fmt.Errorf("found %d assets, expected %d", len(protoOPR.Assets), len(common.AssetsV2))
}
// Hard coded list of assets
for i, asset := range common.AssetsV2 {
opr.Assets[asset] = protoOPR.Assets[i]
}
// Decode winners
opr.WinPreviousOPR = make([]string, len(protoOPR.Winners), len(protoOPR.Winners))
for i, winner := range protoOPR.Winners {
opr.WinPreviousOPR[i] = hex.EncodeToString(winner)
}
return nil
}
return fmt.Errorf("opr version %d not supported", opr.Version)
}
|
[
"\"LXRBITSIZE\""
] |
[] |
[
"LXRBITSIZE"
] |
[]
|
["LXRBITSIZE"]
|
go
| 1 | 0 | |
src/edeposit/amqp/aleph_link_export/settings.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
Module is containing all necessary global variables for the package.
Module also has the ability to read user-defined data from two paths:
- ``$HOME/_SETTINGS_PATH``
- ``/etc/_SETTINGS_PATH``
See :attr:`_SETTINGS_PATH` for details.
Note:
If the first path is found, other is ignored.
Example of the configuration file (``$HOME/edeposit/aleph_export.json``)::
{
"REQUEST_FN": "/home/whatever/req.xml"
}
Attributes
----------
"""
# Imports =====================================================================
import os
import json
import os.path
# Module configuration ========================================================
#: Path to the dir with zeo.conf and zeo_client.conf.
_BASE_PATH = "/home/aleph_export"
#: Path to the XML file, where the requests will be stored.
REQUEST_FN = os.path.join(_BASE_PATH, "edep2aleph/requests.xml")
#: Path to the file, where the Aleph will put the XML responses.
RESPONSE_FN = os.path.join(_BASE_PATH, "aleph2edep/responses.xml")
#: Path to the internal database file, which is used to store records before
#: they are serialized to XML.
DATABASE_FN = os.path.join(_BASE_PATH, "request_datase.shelve") #:
#: Path to the file, where the logs will be stored.
LOG_FN = os.path.join(_BASE_PATH, "link_export.log")
#: Don't change this! Key for the database.
DATABASE_KEY = "request_database"
#: Link to the export XSD
EXPORT_XSD_LINK = "http://edeposit-aplikace.nkp.cz/link_export_notification.xsd"
#: Logging enabled or not?
LOGGING_ENABLED = True
# User configuration reader (don't edit this) =================================
_ALLOWED = [str, unicode, int, float, long, bool] #: Allowed types.
_SETTINGS_PATH = "edeposit/aleph_export.json" #: Path to the file.
def _get_all_constants():
"""
Get list of all uppercase, non-private globals (doesn't start with ``_``).
Returns:
list: Uppercase names defined in `globals()` (variables from this \
module).
"""
return [
key for key in globals().keys()
if all([
not key.startswith("_"), # publicly accesible
key.upper() == key, # uppercase
type(globals()[key]) in _ALLOWED # and with type from _ALLOWED
])
]
def _substitute_globals(config_dict):
"""
Set global variables to values defined in `config_dict`.
Args:
config_dict (dict): dict with data, which are used to set `globals`.
Note:
`config_dict` have to be dictionary, or it is ignored. Also all
variables, that are not already in globals, or are not types defined in
:attr:`_ALLOWED` (str, int, ..) or starts with ``_`` are silently
ignored.
"""
constants = _get_all_constants()
if type(config_dict) != dict:
return
for key, val in config_dict.iteritems():
if key in constants and type(val) in _ALLOWED:
globals()[key] = val
def _read_from_paths():
"""
Try to read data from configuration paths ($HOME/_SETTINGS_PATH,
/etc/_SETTINGS_PATH).
"""
home = os.environ.get("HOME", "/")
home_path = os.path.join(home, _SETTINGS_PATH)
etc_path = os.path.join("/etc", _SETTINGS_PATH)
read_path = None
if home and os.path.exists(home_path):
read_path = home_path
elif os.path.exists(etc_path):
read_path = etc_path
if read_path:
with open(read_path) as f:
_substitute_globals(
json.loads(f.read())
)
_read_from_paths()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
tests/conftest.py
|
"""Fixtures for tests."""
import os
import shutil
from typing import Generator
from unittest.mock import patch
import pytest
from _pytest.config import Config
from adaptavist import Adaptavist
from . import AdaptavistMock, read_global_config, system_test_preconditions
pytest_plugins = ("pytester", )
def pytest_configure(config: Config):
"""Configure pytest."""
config.addinivalue_line("markers", "system: mark test as system tests. Select system tests with '-m system'")
@pytest.fixture(scope="session", autouse=True)
def create_test_plan(request):
"""Creates a test plan. All system test will link the test cycle with this test plan."""
if system_test_preconditions() and request.config.option.markexpr != "not system": # This should only be used if test is a system test
config = read_global_config()
atm = Adaptavist(config["jira_server"], config["jira_username"], config["jira_password"])
test_plan = atm.create_test_plan(config["project_key"], "pytest_adaptavist_system_test")
os.environ["TEST_PLAN_KEY"] = test_plan
@pytest.fixture
def adaptavist(pytester: pytest.Pytester) -> Generator[Adaptavist, None, None]:
"""Establish connection to Adaptavist."""
pytester.copy_example("config/global_config.json")
pytester.mkdir("config")
shutil.move("global_config.json", "config/global_config.json")
config = read_global_config()
atm = Adaptavist(config["jira_server"], config["jira_username"], config["jira_password"])
yield atm
@pytest.fixture(name="test_run")
def create_test_run(adaptavist: Adaptavist) -> Generator[str, None, None]:
"""Create a new test run."""
config = read_global_config()
test_run = adaptavist.create_test_run(config["project_key"], "pytest_system_tests")
if test_run:
os.environ["TEST_RUN_KEY"] = test_run
yield test_run
del os.environ["TEST_RUN_KEY"]
@pytest.fixture
def configure(pytester: pytest.Pytester):
"""Configure environment for unittests."""
pytester.mkdir("config")
with open("config/global_config.json", "w", encoding="utf8") as file:
file.write('{"jira_server": "https://jira.test", "project_key": "TEST", "test_run_key":"TEST-C1"}')
@pytest.fixture
def valid_user() -> Generator[None, None, None]:
"""Mark user as always valid."""
with patch("pytest_adaptavist.atm_user_is_valid", return_value=True):
yield
@pytest.fixture
def adaptavist_mock(valid_user: None) -> Generator[AdaptavistMock, None, None]:
"""Patch adaptavist to prevent real I/O."""
with patch("adaptavist.Adaptavist.get_test_result", return_value={"scriptResults": [{"status": "Pass", "index": "0"}], "status": "Pass"}), \
patch("adaptavist.Adaptavist.get_test_run", return_value={"items": [{"testCaseKey": "TEST-T121"},
{"testCaseKey": "TEST-T123"},
{"testCaseKey": "TEST-T124"}
]}), \
patch("adaptavist.Adaptavist.get_test_cases", return_value=[{"key": "TEST-T123"}]), \
patch("adaptavist.Adaptavist.get_test_run_by_name", return_value={"key": "TEST_RUN_TEST"}), \
patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "TEST-T123", "priority": "Normal"}), \
patch("adaptavist.Adaptavist.edit_test_case", return_value=True), \
patch("adaptavist.Adaptavist._delete"), \
patch("adaptavist.Adaptavist._get"), \
patch("adaptavist.Adaptavist._post"), \
patch("adaptavist.Adaptavist._put"), \
patch("adaptavist.Adaptavist.create_test_result") as ctr, \
patch("adaptavist.Adaptavist.edit_test_result_status") as etrs, \
patch("adaptavist.Adaptavist.edit_test_script_status") as etss:
yield ctr, etrs, etss
|
[] |
[] |
[
"TEST_RUN_KEY",
"TEST_PLAN_KEY"
] |
[]
|
["TEST_RUN_KEY", "TEST_PLAN_KEY"]
|
python
| 2 | 0 | |
vendor/github.com/gogo/protobuf/test/casttype/combos/marshaler/casttypepb_test.go
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: combos/marshaler/casttype.proto
/*
Package casttype is a generated protocol buffer package.
It is generated from these files:
combos/marshaler/casttype.proto
It has these top-level messages:
Castaway
Wilson
*/
package casttype
import testing "testing"
import math_rand "math/rand"
import time "time"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
import fmt "fmt"
import go_parser "go/parser"
import proto "github.com/gogo/protobuf/proto"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
func TestCastawayProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCastaway(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Castaway{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestCastawayMarshalTo(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCastaway(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Castaway{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func BenchmarkCastawayProtoMarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*Castaway, 10000)
for i := 0; i < 10000; i++ {
pops[i] = NewPopulatedCastaway(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
if err != nil {
panic(err)
}
total += len(dAtA)
}
b.SetBytes(int64(total / b.N))
}
func BenchmarkCastawayProtoUnmarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
datas := make([][]byte, 10000)
for i := 0; i < 10000; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedCastaway(popr, false))
if err != nil {
panic(err)
}
datas[i] = dAtA
}
msg := &Castaway{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += len(datas[i%10000])
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
panic(err)
}
}
b.SetBytes(int64(total / b.N))
}
func TestWilsonProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedWilson(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Wilson{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestWilsonMarshalTo(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedWilson(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Wilson{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func BenchmarkWilsonProtoMarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*Wilson, 10000)
for i := 0; i < 10000; i++ {
pops[i] = NewPopulatedWilson(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
if err != nil {
panic(err)
}
total += len(dAtA)
}
b.SetBytes(int64(total / b.N))
}
func BenchmarkWilsonProtoUnmarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
datas := make([][]byte, 10000)
for i := 0; i < 10000; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedWilson(popr, false))
if err != nil {
panic(err)
}
datas[i] = dAtA
}
msg := &Wilson{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += len(datas[i%10000])
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
panic(err)
}
}
b.SetBytes(int64(total / b.N))
}
func TestCastawayJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCastaway(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Castaway{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestWilsonJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedWilson(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Wilson{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestCastawayProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCastaway(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &Castaway{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestCastawayProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCastaway(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &Castaway{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestWilsonProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedWilson(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &Wilson{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestWilsonProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedWilson(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &Wilson{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestCasttypeDescription(t *testing.T) {
CasttypeDescription()
}
func TestCastawayVerboseEqual(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedCastaway(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &Castaway{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestWilsonVerboseEqual(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedWilson(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &Wilson{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestCastawayFace(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedCastaway(popr, true)
msg := p.TestProto()
if !p.Equal(msg) {
t.Fatalf("%#v !Face Equal %#v", msg, p)
}
}
func TestWilsonFace(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedWilson(popr, true)
msg := p.TestProto()
if !p.Equal(msg) {
t.Fatalf("%#v !Face Equal %#v", msg, p)
}
}
func TestCastawayGoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedCastaway(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestWilsonGoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedWilson(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestCastawaySize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCastaway(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func BenchmarkCastawaySize(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*Castaway, 1000)
for i := 0; i < 1000; i++ {
pops[i] = NewPopulatedCastaway(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += pops[i%1000].Size()
}
b.SetBytes(int64(total / b.N))
}
func TestWilsonSize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedWilson(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func BenchmarkWilsonSize(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*Wilson, 1000)
for i := 0; i < 1000; i++ {
pops[i] = NewPopulatedWilson(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += pops[i%1000].Size()
}
b.SetBytes(int64(total / b.N))
}
func TestCastawayStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedCastaway(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestWilsonStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedWilson(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
//These tests are generated by github.com/gogo/protobuf/plugin/testgen
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
main.go
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"net/http"
_ "net/http/pprof"
"os"
"strings"
"time"
landscaper "github.com/gardener/landscaper/apis/core/v1alpha1"
appRepov1 "github.com/gardener/potter-controller/api/external/apprepository/v1alpha1"
"github.com/gardener/potter-controller/pkg/synchronize"
"github.com/go-logr/logr"
kappcrtl "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1"
uberzap "go.uber.org/zap"
"go.uber.org/zap/zapcore"
apicorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
// +kubebuilder:scaffold:imports
hubv1 "github.com/gardener/potter-controller/api/v1"
"github.com/gardener/potter-controller/pkg/admission"
"github.com/gardener/potter-controller/pkg/avcheck"
"github.com/gardener/potter-controller/pkg/controllersdi"
"github.com/gardener/potter-controller/pkg/util"
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
const (
pprofListenAddr = "0.0.0.0:6060"
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = appRepov1.AddToScheme(scheme)
_ = kappcrtl.AddToScheme(scheme)
_ = hubv1.AddToScheme(scheme)
_ = landscaper.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
setupLog.V(util.LogLevelWarning).Info("Setup step: start main")
var metricsAddr string
var enableLeaderElection bool
var runsLocally bool
var skipAdmissionHook bool
var skipReconcile bool
var appRepoKubeconfig string
var hubControllerKubeconfig string
var landscaperEnabled bool
var extendedLogEnabled bool
var tokenReviewEnabled bool
var tokenIssuer string
var reconcileIntervalMinutes int64
var restartKappIntervalMinutes int64
var auditLog bool
var logLevel string
var configTypesStringList string
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&appRepoKubeconfig, "apprepo-kubeconfig", "", "Kubeconfig of the cluster with the appRepo resource")
flag.StringVar(&hubControllerKubeconfig, "hubcontroller-kubeconfig", "", "Kubeconfig of the local hub controller cluster")
flag.BoolVar(&extendedLogEnabled, "extended-log-enabled", false, "Flag to enable additional logs")
flag.BoolVar(&landscaperEnabled, "landscaper-enabled", false, "Flag to enable clusterbom handling via landscaper")
flag.BoolVar(&tokenReviewEnabled, "tokenreview-enabled", false, "Flag to enable token reviewing for the admission webhook")
flag.StringVar(&tokenIssuer, "token-issuer", "", "Issuer for validation of webhook jwt tokens")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&runsLocally, "runs-locally", false, "Flag to distinguish between local and productive run. Default value is false (productive).")
flag.BoolVar(&skipAdmissionHook, "skip-admission-hook", false, "Flag to run without the admission hook. Defaults to false.")
flag.BoolVar(&skipReconcile, "skip-reconcile", false, "Flag to run without the reconcile loop")
flag.Int64Var(&reconcileIntervalMinutes, "reconcile-interval-minutes", 60, "Reconcile interval in minutes")
flag.Int64Var(&restartKappIntervalMinutes, "restart-kapp-interval-minutes", 0, "Restart kapp-controller interval in minutes")
flag.StringVar(&logLevel, "loglevel", util.LogLevelStringInfo, "log level debug/info/warning/error")
flag.StringVar(&configTypesStringList, "configtypes", util.ConfigTypeHelm, "supported config types")
flag.BoolVar(&auditLog, "audit-log", false, "Flag to enable audit logging (requires additional container). Default false")
flag.Parse()
zapcoreLogLevel := zapcore.InfoLevel
if logLevel == util.LogLevelStringDebug {
zapcoreLogLevel = zapcore.DebugLevel
} else if logLevel == util.LogLevelStringWarning {
zapcoreLogLevel = zapcore.WarnLevel
} else if logLevel == util.LogLevelStringError {
zapcoreLogLevel = zapcore.ErrorLevel
}
ctrl.SetLogger(zap.New(func(o *zap.Options) {
o.Development = false
o.Encoder = createLogEncoder()
o.Level = zapcoreLogLevel
}))
setupLog.V(util.LogLevelWarning).Info("Starting hub controller")
config := ctrl.GetConfigOrDie()
appRepoClient := getAppRepoClient(appRepoKubeconfig)
uncachedClient := getUncachedClient(config)
hubControllerClient := getHubControllerClient(hubControllerKubeconfig, runsLocally)
mgr := createManager(config, metricsAddr, enableLeaderElection)
eventBroadcaster, eventRecorder := setupEventRecording(config)
defer eventBroadcaster.Shutdown()
avCheckConfig := parseAVCheckConfig()
blockObject := setupBlockObject(avCheckConfig, runsLocally)
cbReconciler := setupClusterBomReconciler(mgr, uncachedClient, hubControllerClient, blockObject, auditLog, avCheckConfig)
defer cbReconciler.Close()
cbStateReconciler := setupClusterBomStateReconciler(mgr, uncachedClient, blockObject, avCheckConfig)
setupInstallationStateReconciler(mgr, uncachedClient, blockObject)
deploymentReconciler := setupDeploymentReconciler(mgr, appRepoClient, uncachedClient, blockObject, eventRecorder,
reconcileIntervalMinutes)
configTypes := strings.Split(configTypesStringList, ",")
admissionHookConfig := admission.AdmissionHookConfig{
UncachedClient: uncachedClient,
HubControllerClient: hubControllerClient,
ConfigTypes: configTypes,
ExtendedLogEnabled: extendedLogEnabled,
LandscaperEnabled: landscaperEnabled,
RunsLocally: runsLocally,
TokenIssuer: tokenIssuer,
TokenReviewEnabled: tokenReviewEnabled,
}
startAdmissionHook(&admissionHookConfig, skipAdmissionHook)
startReconciler(mgr, uncachedClient, hubControllerClient, reconcileIntervalMinutes, restartKappIntervalMinutes, skipReconcile, runsLocally)
startAvailabilityCheck(avCheckConfig, mgr, []avcheck.Controller{
cbReconciler,
cbStateReconciler,
deploymentReconciler,
})
go func() {
err := http.ListenAndServe(pprofListenAddr, nil)
if err != nil {
setupLog.Error(err, "failed starting serving pprof")
}
}()
// +kubebuilder:scaffold:builder
setupLog.V(util.LogLevelWarning).Info("Starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "Problem running manager")
shutDown(setupLog)
os.Exit(1)
}
shutDown(setupLog)
}
func createManager(config *rest.Config, metricsAddr string, enableLeaderElection bool) manager.Manager {
setupLog.V(util.LogLevelDebug).Info("Creating manager")
mgr, err := ctrl.NewManager(config, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
Port: 9443,
})
if err != nil {
setupLog.Error(err, "Unable to create manager")
os.Exit(1)
}
return mgr
}
func setupEventRecording(config *rest.Config) (record.EventBroadcaster, record.EventRecorder) {
setupLog.V(util.LogLevelDebug).Info("Setup event recording")
var coreClient = typedcorev1.NewForConfigOrDie(config)
var eventSink record.EventSink = &typedcorev1.EventSinkImpl{
Interface: coreClient.Events(""),
}
var eventSource = apicorev1.EventSource{
Component: "DeploymentController",
}
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(eventSink)
eventRecorder := eventBroadcaster.NewRecorder(scheme, eventSource)
return eventBroadcaster, eventRecorder
}
func setupBlockObject(avCheckConfig *avcheck.Configuration, syncDisabled bool) *synchronize.BlockObject {
setupLog.V(util.LogLevelDebug).Info("Setup block object")
var excludedBoms []types.NamespacedName
if avCheckConfig != nil {
excludedBoms = append(excludedBoms, types.NamespacedName{
Namespace: avCheckConfig.Namespace,
Name: avCheckConfig.BomName,
})
}
return synchronize.NewBlockObject(excludedBoms, syncDisabled)
}
func setupClusterBomReconciler(mgr manager.Manager, uncachedClient, hubControllerClient synchronize.UncachedClient,
blockObject *synchronize.BlockObject, auditLog bool, avCheckConfig *avcheck.Configuration) *controllersdi.ClusterBomReconciler {
setupLog.V(util.LogLevelDebug).Info("Setup clusterbom reconciler")
cbReconciler, err := controllersdi.NewClusterBomReconciler(
mgr.GetClient(),
ctrl.Log.WithName("controllers").WithName("ClusterBomReconciler"),
mgr.GetScheme(),
auditLog,
blockObject,
avcheck.NewAVCheck(),
uncachedClient,
hubControllerClient,
avCheckConfig)
if err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ClusterBomReconciler")
os.Exit(1)
}
if err = cbReconciler.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ClusterBomReconciler")
os.Exit(1)
}
return cbReconciler
}
func setupClusterBomStateReconciler(mgr manager.Manager, uncachedClient synchronize.UncachedClient,
blockObject *synchronize.BlockObject, avCheckConfig *avcheck.Configuration) avcheck.Controller {
setupLog.V(util.LogLevelDebug).Info("Setup clusterbom state reconciler")
cbStateReconciler := &controllersdi.ClusterBomStateReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("ClusterBomStateReconciler"),
Scheme: mgr.GetScheme(),
Cleaner: controllersdi.ClusterBomCleaner{
LastCheck: 0,
Succeeded: false,
},
BlockObject: blockObject,
AVCheck: avcheck.NewAVCheck(),
AvCheckConfig: avCheckConfig,
UncachedClient: uncachedClient,
}
if err := cbStateReconciler.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ClusterBomStateReconciler")
os.Exit(1)
}
return cbStateReconciler
}
func setupInstallationStateReconciler(mgr manager.Manager, uncachedClient synchronize.UncachedClient,
blockObject *synchronize.BlockObject) {
setupLog.V(util.LogLevelDebug).Info("Setup installation state reconciler")
installationStateReconciler := &controllersdi.InstallationStateReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("InstallationStateReconciler"),
Scheme: mgr.GetScheme(),
BlockObject: blockObject,
UncachedClient: uncachedClient,
}
if err := installationStateReconciler.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "InstallationStateReconciler")
os.Exit(1)
}
}
func setupDeploymentReconciler(mgr manager.Manager, appRepoClient client.Client, uncachedClient synchronize.UncachedClient,
blockObject *synchronize.BlockObject, eventRecorder record.EventRecorder, reconcileIntervalMinutes int64) avcheck.Controller {
setupLog.V(util.LogLevelDebug).Info("Setup deployment controller")
logger := ctrl.Log.WithName("controllers").WithName("DeploymentReconciler")
crAndSecretClient := mgr.GetClient()
deployerFactory := controllersdi.NewDeploymentFactory(crAndSecretClient, uncachedClient, appRepoClient, blockObject, reconcileIntervalMinutes)
deploymentReconciler := controllersdi.NewDeploymentReconciler(deployerFactory, crAndSecretClient, logger, mgr.GetScheme(),
util.NewThreadCounterMap(logger), blockObject, avcheck.NewAVCheck(), uncachedClient, eventRecorder)
if err := deploymentReconciler.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DeploymentReconciler")
os.Exit(1)
}
return deploymentReconciler
}
func startAdmissionHook(admissionHookConfig *admission.AdmissionHookConfig, skipAdmissionHook bool) {
if !skipAdmissionHook {
setupLog.V(util.LogLevelWarning).Info("Starting admission hook")
go admission.StartAdmissionServer(admissionHookConfig)
setupLog.V(util.LogLevelWarning).Info("Admission hook started")
}
}
func startReconciler(mgr manager.Manager, uncachedClient, hubControllerClient synchronize.UncachedClient,
reconcileIntervalMinutes, restartKappIntervalMinutes int64, skipReconcile, runsLocally bool) {
if !skipReconcile {
setupLog.V(util.LogLevelDebug).Info("Starting reconciler")
reconcileController := controllersdi.ReconcileController{
Client: mgr.GetClient(),
UncachedClient: uncachedClient,
HubControllerClient: hubControllerClient,
Log: ctrl.Log.WithName("controllers").WithName("ReconcileController"),
Scheme: mgr.GetScheme(),
Clock: &controllersdi.RealReconcileClock{},
UniqueID: string(uuid.NewUUID()),
ConfigMapKey: types.NamespacedName{Name: "reconcilemap", Namespace: util.GetPodNamespace()},
SyncDisabled: runsLocally,
}
reconcileInterval := time.Duration(reconcileIntervalMinutes) * time.Minute
restartKappInterval := time.Duration(restartKappIntervalMinutes) * time.Minute
go reconcileController.Reconcile(reconcileInterval, restartKappInterval)
}
}
func startAvailabilityCheck(avCheckConfig *avcheck.Configuration, mgr manager.Manager, monitoredControllers []avcheck.Controller) {
if avCheckConfig != nil {
setupLog.V(util.LogLevelWarning).Info("Starting availability check")
actor := avcheck.Actor{
K8sClient: mgr.GetClient(),
Log: ctrl.Log.WithName("Availability Check Actor"),
InitialBom: buildAVCheckBom(avCheckConfig),
ChangeInterval: avCheckConfig.ChangeInterval,
}
go actor.Start()
go avcheck.StartServer(avCheckConfig, monitoredControllers)
} else {
go avcheck.StartServerForBasicCheck()
}
}
// shutDown waits a moment to allow the controllers and the admission hook to finish their current work.
func shutDown(log logr.Logger) {
log.V(util.LogLevelWarning).Info("Start of shutdown interval")
time.Sleep(25 * time.Second)
log.V(util.LogLevelWarning).Info("End of shutdown interval")
}
func getAppRepoClient(appRepoKubeconfig string) client.Client {
setupLog.V(util.LogLevelDebug).Info("Creating AppRepository client")
apprepoConfig, err := clientcmd.BuildConfigFromFlags("", appRepoKubeconfig)
if err != nil {
setupLog.Error(err, "Unable to read AppRepository kubeconfig. Is the CLI flag set?")
os.Exit(1)
}
appRepoClient, err := client.New(apprepoConfig, client.Options{
Scheme: scheme,
})
if err != nil {
setupLog.Error(err, "Unable to create AppRepository client. Is the CLI flag set?")
os.Exit(1)
}
return appRepoClient
}
func getUncachedClient(config *rest.Config) synchronize.UncachedClient {
setupLog.V(util.LogLevelDebug).Info("Creating uncached client")
uncachedClient, err := synchronize.NewUncachedClient(config, client.Options{Scheme: scheme})
if err != nil {
setupLog.Error(err, "Unable to create uncached client")
os.Exit(1)
}
return uncachedClient
}
func getHubControllerClient(hubControllerKubeconfig string, runsLocally bool) synchronize.UncachedClient {
var hubControllerConfig *rest.Config
var err error
setupLog.V(util.LogLevelDebug).Info("Creating hub controller client")
if runsLocally {
hubControllerConfig, err = clientcmd.BuildConfigFromFlags("", hubControllerKubeconfig)
} else {
hubControllerConfig, err = rest.InClusterConfig()
}
if err != nil {
setupLog.Error(err, "Unable to get config for hub controller client")
os.Exit(1)
}
hubControllerClient, err := synchronize.NewUncachedClient(hubControllerConfig, client.Options{
Scheme: scheme,
})
if err != nil {
setupLog.Error(err, "Unable to create hub controller client")
os.Exit(1)
}
return hubControllerClient
}
func createLogEncoder() zapcore.Encoder {
encodeTime := func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
enc.AppendString(t.Format(time.RFC3339))
}
encoderCfg := uberzap.NewProductionEncoderConfig()
encoderCfg.TimeKey = "time"
encoderCfg.EncodeTime = encodeTime
return zapcore.NewJSONEncoder(encoderCfg)
}
func parseAVCheckConfig() *avcheck.Configuration {
setupLog.V(util.LogLevelDebug).Info("Reading config for availability check")
avCheckConfigJSON := os.Getenv("AVAILABILITY_CHECK")
if avCheckConfigJSON == "" {
return nil
}
var config avcheck.Configuration
err := json.Unmarshal([]byte(avCheckConfigJSON), &config)
if err != nil {
setupLog.Error(err, "cannot unmarshal availability check configJSON")
os.Exit(1)
}
err = config.Validate()
if err != nil {
setupLog.Error(err, "invalid availability check configJSON")
os.Exit(1)
}
return &config
}
func buildAVCheckBom(config *avcheck.Configuration) *hubv1.ClusterBom {
bom, err := avcheck.BuildBom(config.Namespace, config.BomName, config.SecretRef, config.InstallNamespace,
config.TarballURL, config.CatalogDefinition)
if err != nil {
setupLog.Error(err, "cannot build availability check bom")
os.Exit(1)
}
return bom
}
func getAllResourceNames(bom *hubv1.ClusterBom) []*types.NamespacedName {
resourceNames := []*types.NamespacedName{
{
Namespace: bom.GetNamespace(),
Name: bom.GetName(),
},
}
for _, applicationConfig := range bom.Spec.ApplicationConfigs {
resourceName := types.NamespacedName{
Namespace: bom.GetNamespace(),
Name: bom.GetName() + "-" + applicationConfig.ID,
}
resourceNames = append(resourceNames, &resourceName)
}
return resourceNames
}
|
[
"\"AVAILABILITY_CHECK\""
] |
[] |
[
"AVAILABILITY_CHECK"
] |
[]
|
["AVAILABILITY_CHECK"]
|
go
| 1 | 0 | |
vendor/github.com/Azure/azure-sdk-for-go/tools/profileBuilder/program.go
|
// +build go1.9
// Copyright 2017 Microsoft Corporation and contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// profileBuilder creates a series of packages filled entirely with alias types
// and functions supporting those alias types by directing traffic to the
// functions supporting the original types. This is useful associating a series
// of packages in separate API Versions for easier/safer use.
//
// The Azure-SDK-for-Go teams intends to use this tool to generated profiles
// that we will publish in this repository for general use. However, this tool
// in the case that one has their own list of Services at given API Versions,
// this may prove to be a useful tool for you.
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/marstr/collection"
goalias "github.com/marstr/goalias/model"
"github.com/marstr/randname"
)
var (
profileName string
outputLocation string
inputRoot string
inputList io.Reader
packageStrategy collection.Enumerable
outputLog *log.Logger
errLog *log.Logger
)
// WellKnownStrategy is an Enumerable which lists all known strategies for choosing packages for a profile.
type WellKnownStrategy string
// This block declares the definitive list of WellKnownStrategies
const (
WellKnownStrategyList WellKnownStrategy = "list"
WellKnownStrategyLatest WellKnownStrategy = "latest"
WellKnownStrategyPreview WellKnownStrategy = "preview"
)
const armPathModifier = "mgmt"
// If not the empty string, this string should be stamped into files generated by the profileBuilder.
// Note: This variable should be set by passing the argument "-X main.version=`{your value}`" to the Go linker. example: `go build -ldflags "-X main.version=f43d726b6e3f1e3eb7cbdba3982f0253000d5dc5"`
var version string
func main() {
var packages collection.Enumerator
type alias struct {
*goalias.AliasPackage
TargetPath string
}
// Find the names of all of the packages for inclusion in this profile.
packages = packageStrategy.Enumerate(nil).Select(func(x interface{}) interface{} {
if cast, ok := x.(string); ok {
return cast
}
return nil
})
// Parse the packages that were selected for inclusion in this profile.
packages = packages.SelectMany(func(x interface{}) collection.Enumerator {
results := make(chan interface{})
go func() {
defer close(results)
cast, ok := x.(string)
if !ok {
return
}
files := token.NewFileSet()
parsed, err := parser.ParseDir(files, cast, nil, 0)
if err != nil {
errLog.Printf("Couldn't open %q because: %v", cast, err)
return
}
for _, entry := range parsed {
results <- entry
}
}()
return results
})
// Generate the alias package from the originally parsed one.
packages = packages.ParallelSelect(func(x interface{}) interface{} {
var err error
var subject *goalias.AliasPackage
cast, ok := x.(*ast.Package)
if !ok {
return nil
}
var bundle alias
for filename := range cast.Files {
bundle.TargetPath = filepath.Dir(filename)
bundle.TargetPath = trimGoPath(bundle.TargetPath)
subject, err = goalias.NewAliasPackage(cast, bundle.TargetPath)
if err != nil {
errLog.Print(err)
return nil
}
bundle.TargetPath, err = getAliasPath(bundle.TargetPath, profileName)
if err != nil {
errLog.Print(err)
return nil
}
break
}
bundle.AliasPackage = subject
return &bundle
})
packages = packages.Where(func(x interface{}) bool {
return x != nil
})
// Update the "UserAgent" function in the generated profile, if it is present.
packages = packages.Select(func(x interface{}) interface{} {
cast := x.(*alias)
var userAgent *ast.FuncDecl
// Grab all functions in the alias package named "UserAgent"
userAgentCandidates := collection.Where(collection.AsEnumerable(cast.Files["models.go"].Decls), func(x interface{}) bool {
cast, ok := x.(*ast.FuncDecl)
return ok && cast.Name.Name == "UserAgent"
})
// There should really only be one of them, otherwise bailout because we don't understand the world anymore.
candidate, err := collection.Single(userAgentCandidates)
if err != nil {
return x
}
userAgent, ok := candidate.(*ast.FuncDecl)
if !ok {
return x
}
// Grab the expression being returned.
retResults := &userAgent.Body.List[0].(*ast.ReturnStmt).Results[0]
// Append a string literal to the result
updated := &ast.BinaryExpr{
Op: token.ADD,
X: *retResults,
Y: &ast.BasicLit{
Value: fmt.Sprintf("\" profiles/%s\"", profileName),
},
}
*retResults = updated
return x
})
// Add the MSFT Copyright Header, then write the alias package to disk.
products := packages.ParallelSelect(func(x interface{}) interface{} {
cast, ok := x.(*alias)
if !ok {
return false
}
files := token.NewFileSet()
outputPath := filepath.Join(outputLocation, cast.TargetPath, "models.go")
outputPath = strings.Replace(outputPath, `\`, `/`, -1)
err := os.MkdirAll(path.Dir(outputPath), os.ModePerm|os.ModeDir)
if err != nil {
errLog.Print("error creating directory:", err)
return false
}
outputFile, err := os.Create(outputPath)
if err != nil {
errLog.Print("error creating file: ", err)
return false
}
// TODO: This should really be added by the `goalias` package itself. Doing it here is a work around
fmt.Fprintln(outputFile, "// +build go1.9")
fmt.Fprintln(outputFile)
generatorStampBuilder := new(bytes.Buffer)
fmt.Fprintf(generatorStampBuilder, "// Copyright %4d Microsoft Corporation\n", time.Now().Year())
fmt.Fprintln(generatorStampBuilder, `//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.`)
fmt.Fprintln(outputFile, generatorStampBuilder.String())
generatorStampBuilder.Reset()
fmt.Fprintln(generatorStampBuilder, "// This code was auto-generated by:")
fmt.Fprintln(generatorStampBuilder, "// github.com/Azure/azure-sdk-for-go/tools/profileBuilder")
if version != "" {
fmt.Fprintln(generatorStampBuilder, "// commit ID:", version)
}
fmt.Fprintln(generatorStampBuilder)
fmt.Fprint(outputFile, generatorStampBuilder.String())
outputLog.Printf("Writing File: %s", outputPath)
printer.Fprint(outputFile, files, cast.ModelFile())
return true
})
generated := 0
// Write each aliased package that was found
for entry := range products {
if entry.(bool) {
generated++
}
}
outputLog.Print(generated, " packages generated.")
}
func init() {
const defaultName = "{randomly generated}"
var selectedStrategy string
var inputListLocation string
var useVerbose bool
flag.StringVar(&profileName, "name", defaultName, "The name that should be given to the generated profile.")
flag.StringVar(&outputLocation, "o", defaultOutputLocation(), "The output location for the package generated as a profile.")
flag.StringVar(&inputRoot, "root", defaultInputRoot(), "The location of the Azure SDK for Go's service packages.")
flag.StringVar(&inputListLocation, "l", "", "If the `list` strategy is chosen, -l is the location of the file to read for said list. If not present, stdin is used.")
flag.StringVar(&selectedStrategy, "s", string(WellKnownStrategyLatest), "The strategy to employ for finding packages to put in a profile.")
flag.BoolVar(&useVerbose, "v", false, "Write status to stderr as the program progresses")
flag.Parse()
// Setup Verbose Status Log and Error Log
var logWriter io.Writer
if useVerbose {
logWriter = os.Stderr
} else {
logWriter = ioutil.Discard
}
outputLog = log.New(logWriter, "[STATUS] ", 0)
outputLog.Print("Status Logging Enabled")
errLog = log.New(logWriter, "[ERROR] ", 0)
if version != "" {
outputLog.Print("profileBuilder Version: ", version)
}
// Sort out the Profile Name to be used.
if profileName == defaultName {
profileName = randname.AdjNoun{}.Generate()
outputLog.Print("Profile Name Set to: ", profileName)
}
inputList = os.Stdin
if inputListLocation == "" {
outputLog.Print("Reading input from standard input")
} else {
var err error
outputLog.Print("Reading input from: ", inputListLocation)
inputList, err = os.Open(inputListLocation)
if err != nil {
errLog.Print(err)
os.Exit(1)
}
}
wellKnownStrategies := map[WellKnownStrategy]collection.Enumerable{
WellKnownStrategyList: ListStrategy{Reader: inputList},
WellKnownStrategyLatest: LatestStrategy{Root: inputRoot, Predicate: IgnorePreview, VerboseOutput: outputLog},
WellKnownStrategyPreview: LatestStrategy{Root: inputRoot, Predicate: AcceptAll},
}
if s, ok := wellKnownStrategies[WellKnownStrategy(selectedStrategy)]; ok {
packageStrategy = s
outputLog.Printf("Using Well Known Strategy: %s", selectedStrategy)
} else {
errLog.Printf("Unknown strategy for identifying packages: %s\n", selectedStrategy)
os.Exit(1)
}
}
// AzureSDKforGoLocation returns the default location for the Azure-SDK-for-Go to reside.
func AzureSDKforGoLocation() string {
return path.Join(
os.Getenv("GOPATH"),
"src",
"github.com",
"Azure",
"azure-sdk-for-go",
)
}
func defaultOutputLocation() string {
return path.Join(AzureSDKforGoLocation(), "profiles")
}
func defaultInputRoot() string {
return path.Join(AzureSDKforGoLocation(), "services")
}
// getAliasPath takes an existing API Version path and a package name, and converts the path
// to a path which uses the new profile layout.
func getAliasPath(subject, profile string) (transformed string, err error) {
subject = strings.TrimSuffix(subject, "/")
subject = trimGoPath(subject)
matches := packageName.FindAllStringSubmatch(subject, -1)
if matches == nil {
err = errors.New("path does not resemble a known package path")
return
}
output := []string{
profile,
matches[0][1],
}
if matches[0][2] == armPathModifier {
output = append(output, armPathModifier)
}
output = append(output, matches[0][4])
transformed = strings.Join(output, "/")
return
}
// trimGoPath removes the prefix defined in the environment variabe GOPATH if it is present in the string provided.
var trimGoPath = func() func(string) string {
splitGo := strings.Split(os.Getenv("GOPATH"), string(os.PathSeparator))
splitGo = append(splitGo, "src")
return func(subject string) string {
splitPath := strings.Split(subject, string(os.PathSeparator))
for i, dir := range splitGo {
if splitPath[i] != dir {
return subject
}
}
packageIdentifier := splitPath[len(splitGo):]
return path.Join(packageIdentifier...)
}
}()
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
vendor/github.com/gopherjs/gopherjs/tool.go
|
package main
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"text/template"
"time"
gbuild "github.com/gopherjs/gopherjs/build"
"github.com/gopherjs/gopherjs/compiler"
"github.com/neelance/sourcemap"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/crypto/ssh/terminal"
)
var currentDirectory string
func init() {
var err error
currentDirectory, err = os.Getwd()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
currentDirectory, err = filepath.EvalSymlinks(currentDirectory)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
gopaths := filepath.SplitList(build.Default.GOPATH)
if len(gopaths) == 0 {
fmt.Fprintf(os.Stderr, "$GOPATH not set. For more details see: go help gopath\n")
os.Exit(1)
}
}
func main() {
options := &gbuild.Options{CreateMapFile: true}
var pkgObj string
pflag.BoolVarP(&options.Verbose, "verbose", "v", false, "print the names of packages as they are compiled")
flagVerbose := pflag.Lookup("verbose")
pflag.BoolVarP(&options.Quiet, "quiet", "q", false, "suppress non-fatal warnings")
flagQuiet := pflag.Lookup("quiet")
pflag.BoolVarP(&options.Watch, "watch", "w", false, "watch for changes to the source files")
flagWatch := pflag.Lookup("watch")
pflag.BoolVarP(&options.Minify, "minify", "m", false, "minify generated code")
flagMinify := pflag.Lookup("minify")
pflag.BoolVar(&options.Color, "color", terminal.IsTerminal(int(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb", "colored output")
flagColor := pflag.Lookup("color")
tags := pflag.String("tags", "", "a list of build tags to consider satisfied during the build")
flagTags := pflag.Lookup("tags")
cmdBuild := &cobra.Command{
Use: "build [packages]",
Short: "compile packages and dependencies",
}
cmdBuild.Flags().StringVarP(&pkgObj, "output", "o", "", "output file")
cmdBuild.Flags().AddFlag(flagVerbose)
cmdBuild.Flags().AddFlag(flagQuiet)
cmdBuild.Flags().AddFlag(flagWatch)
cmdBuild.Flags().AddFlag(flagMinify)
cmdBuild.Flags().AddFlag(flagColor)
cmdBuild.Flags().AddFlag(flagTags)
cmdBuild.Run = func(cmd *cobra.Command, args []string) {
options.BuildTags = strings.Fields(*tags)
for {
s := gbuild.NewSession(options)
exitCode := handleError(func() error {
if len(args) == 0 {
return s.BuildDir(currentDirectory, currentDirectory, pkgObj)
}
if strings.HasSuffix(args[0], ".go") || strings.HasSuffix(args[0], ".inc.js") {
for _, arg := range args {
if !strings.HasSuffix(arg, ".go") && !strings.HasSuffix(arg, ".inc.js") {
return fmt.Errorf("named files must be .go or .inc.js files")
}
}
if pkgObj == "" {
basename := filepath.Base(args[0])
pkgObj = basename[:len(basename)-3] + ".js"
}
names := make([]string, len(args))
for i, name := range args {
name = filepath.ToSlash(name)
names[i] = name
if s.Watcher != nil {
s.Watcher.Add(name)
}
}
if err := s.BuildFiles(args, pkgObj, currentDirectory); err != nil {
return err
}
return nil
}
for _, pkgPath := range args {
pkgPath = filepath.ToSlash(pkgPath)
if s.Watcher != nil {
s.Watcher.Add(pkgPath)
}
pkg, err := gbuild.Import(pkgPath, 0, s.InstallSuffix(), options.BuildTags)
if err != nil {
return err
}
if err := s.BuildPackage(pkg); err != nil {
return err
}
if pkgObj == "" {
pkgObj = filepath.Base(args[0]) + ".js"
}
if err := s.WriteCommandPackage(pkg, pkgObj); err != nil {
return err
}
}
return nil
}, options, nil)
if s.Watcher == nil {
os.Exit(exitCode)
}
s.WaitForChange()
}
}
cmdInstall := &cobra.Command{
Use: "install [packages]",
Short: "compile and install packages and dependencies",
}
cmdInstall.Flags().AddFlag(flagVerbose)
cmdInstall.Flags().AddFlag(flagQuiet)
cmdInstall.Flags().AddFlag(flagWatch)
cmdInstall.Flags().AddFlag(flagMinify)
cmdInstall.Flags().AddFlag(flagColor)
cmdInstall.Flags().AddFlag(flagTags)
cmdInstall.Run = func(cmd *cobra.Command, args []string) {
options.BuildTags = strings.Fields(*tags)
for {
s := gbuild.NewSession(options)
exitCode := handleError(func() error {
pkgs := args
if len(pkgs) == 0 {
firstGopathWorkspace := filepath.SplitList(build.Default.GOPATH)[0] // TODO: The GOPATH workspace that contains the package source should be chosen.
srcDir, err := filepath.EvalSymlinks(filepath.Join(firstGopathWorkspace, "src"))
if err != nil {
return err
}
if !strings.HasPrefix(currentDirectory, srcDir) {
return fmt.Errorf("gopherjs install: no install location for directory %s outside GOPATH", currentDirectory)
}
pkgPath, err := filepath.Rel(srcDir, currentDirectory)
if err != nil {
return err
}
pkgs = []string{pkgPath}
}
if cmd.Name() == "get" {
goGet := exec.Command("go", append([]string{"get", "-d", "-tags=js"}, pkgs...)...)
goGet.Stdout = os.Stdout
goGet.Stderr = os.Stderr
if err := goGet.Run(); err != nil {
return err
}
}
for _, pkgPath := range pkgs {
pkgPath = filepath.ToSlash(pkgPath)
if _, err := s.BuildImportPath(pkgPath); err != nil {
return err
}
pkg := s.Packages[pkgPath]
if err := s.WriteCommandPackage(pkg, pkg.PkgObj); err != nil {
return err
}
}
return nil
}, options, nil)
if s.Watcher == nil {
os.Exit(exitCode)
}
s.WaitForChange()
}
}
cmdGet := &cobra.Command{
Use: "get [packages]",
Short: "download and install packages and dependencies",
}
cmdGet.Flags().AddFlag(flagVerbose)
cmdGet.Flags().AddFlag(flagQuiet)
cmdGet.Flags().AddFlag(flagWatch)
cmdGet.Flags().AddFlag(flagMinify)
cmdGet.Flags().AddFlag(flagColor)
cmdGet.Flags().AddFlag(flagTags)
cmdGet.Run = cmdInstall.Run
cmdRun := &cobra.Command{
Use: "run [gofiles...] [arguments...]",
Short: "compile and run Go program",
}
cmdRun.Run = func(cmd *cobra.Command, args []string) {
os.Exit(handleError(func() error {
lastSourceArg := 0
for {
if lastSourceArg == len(args) || !(strings.HasSuffix(args[lastSourceArg], ".go") || strings.HasSuffix(args[lastSourceArg], ".inc.js")) {
break
}
lastSourceArg++
}
if lastSourceArg == 0 {
return fmt.Errorf("gopherjs run: no go files listed")
}
tempfile, err := ioutil.TempFile(currentDirectory, filepath.Base(args[0])+".")
if err != nil && strings.HasPrefix(currentDirectory, runtime.GOROOT()) {
tempfile, err = ioutil.TempFile("", filepath.Base(args[0])+".")
}
if err != nil {
return err
}
defer func() {
tempfile.Close()
os.Remove(tempfile.Name())
os.Remove(tempfile.Name() + ".map")
}()
s := gbuild.NewSession(options)
if err := s.BuildFiles(args[:lastSourceArg], tempfile.Name(), currentDirectory); err != nil {
return err
}
if err := runNode(tempfile.Name(), args[lastSourceArg:], "", options.Quiet); err != nil {
return err
}
return nil
}, options, nil))
}
cmdTest := &cobra.Command{
Use: "test [packages]",
Short: "test packages",
}
bench := cmdTest.Flags().String("bench", "", "Run benchmarks matching the regular expression. By default, no benchmarks run. To run all benchmarks, use '--bench=.'.")
run := cmdTest.Flags().String("run", "", "Run only those tests and examples matching the regular expression.")
short := cmdTest.Flags().Bool("short", false, "Tell long-running tests to shorten their run time.")
verbose := cmdTest.Flags().BoolP("verbose", "v", false, "Log all tests as they are run. Also print all text from Log and Logf calls even if the test succeeds.")
compileOnly := cmdTest.Flags().BoolP("compileonly", "c", false, "Compile the test binary to pkg.test.js but do not run it (where pkg is the last element of the package's import path). The file name can be changed with the -o flag.")
outputFilename := cmdTest.Flags().StringP("output", "o", "", "Compile the test binary to the named file. The test still runs (unless -c is specified).")
cmdTest.Flags().AddFlag(flagMinify)
cmdTest.Flags().AddFlag(flagColor)
cmdTest.Run = func(cmd *cobra.Command, args []string) {
os.Exit(handleError(func() error {
pkgs := make([]*gbuild.PackageData, len(args))
for i, pkgPath := range args {
pkgPath = filepath.ToSlash(pkgPath)
var err error
pkgs[i], err = gbuild.Import(pkgPath, 0, "", nil)
if err != nil {
return err
}
}
if len(pkgs) == 0 {
firstGopathWorkspace := filepath.SplitList(build.Default.GOPATH)[0]
srcDir, err := filepath.EvalSymlinks(filepath.Join(firstGopathWorkspace, "src"))
if err != nil {
return err
}
var pkg *gbuild.PackageData
if strings.HasPrefix(currentDirectory, srcDir) {
pkgPath, err := filepath.Rel(srcDir, currentDirectory)
if err != nil {
return err
}
if pkg, err = gbuild.Import(pkgPath, 0, "", nil); err != nil {
return err
}
}
if pkg == nil {
if pkg, err = gbuild.ImportDir(currentDirectory, 0); err != nil {
return err
}
pkg.ImportPath = "_" + currentDirectory
}
pkgs = []*gbuild.PackageData{pkg}
}
var exitErr error
for _, pkg := range pkgs {
if len(pkg.TestGoFiles) == 0 && len(pkg.XTestGoFiles) == 0 {
fmt.Printf("? \t%s\t[no test files]\n", pkg.ImportPath)
continue
}
s := gbuild.NewSession(options)
tests := &testFuncs{Package: pkg.Package}
collectTests := func(testPkg *gbuild.PackageData, testPkgName string, needVar *bool) error {
if err := s.BuildPackage(testPkg); err != nil {
return err
}
for _, decl := range testPkg.Archive.Declarations {
if strings.HasPrefix(decl.FullName, testPkg.ImportPath+".Test") {
tests.Tests = append(tests.Tests, testFunc{Package: testPkgName, Name: decl.FullName[len(testPkg.ImportPath)+1:]})
*needVar = true
}
if strings.HasPrefix(decl.FullName, testPkg.ImportPath+".Benchmark") {
tests.Benchmarks = append(tests.Benchmarks, testFunc{Package: testPkgName, Name: decl.FullName[len(testPkg.ImportPath)+1:]})
*needVar = true
}
}
return nil
}
if err := collectTests(&gbuild.PackageData{
Package: &build.Package{
ImportPath: pkg.ImportPath,
Dir: pkg.Dir,
GoFiles: append(pkg.GoFiles, pkg.TestGoFiles...),
Imports: append(pkg.Imports, pkg.TestImports...),
},
IsTest: true,
JSFiles: pkg.JSFiles,
}, "_test", &tests.NeedTest); err != nil {
return err
}
if err := collectTests(&gbuild.PackageData{
Package: &build.Package{
ImportPath: pkg.ImportPath + "_test",
Dir: pkg.Dir,
GoFiles: pkg.XTestGoFiles,
Imports: pkg.XTestImports,
},
IsTest: true,
}, "_xtest", &tests.NeedXtest); err != nil {
return err
}
buf := bytes.NewBuffer(nil)
if err := testmainTmpl.Execute(buf, tests); err != nil {
return err
}
fset := token.NewFileSet()
mainFile, err := parser.ParseFile(fset, "_testmain.go", buf, 0)
if err != nil {
return err
}
mainPkg := &gbuild.PackageData{
Package: &build.Package{
Name: "main",
ImportPath: "main",
},
}
mainPkg.Archive, err = compiler.Compile("main", []*ast.File{mainFile}, fset, s.ImportContext, options.Minify)
if err != nil {
return err
}
if *compileOnly && *outputFilename == "" {
*outputFilename = pkg.Package.Name + "_test.js"
}
var outfile *os.File
if *outputFilename != "" {
outfile, err = os.Create(*outputFilename)
if err != nil {
return err
}
} else {
outfile, err = ioutil.TempFile(currentDirectory, "test.")
if err != nil {
return err
}
}
defer func() {
outfile.Close()
if *outputFilename == "" {
os.Remove(outfile.Name())
os.Remove(outfile.Name() + ".map")
}
}()
if err := s.WriteCommandPackage(mainPkg, outfile.Name()); err != nil {
return err
}
if *compileOnly {
continue
}
var args []string
if *bench != "" {
args = append(args, "-test.bench", *bench)
}
if *run != "" {
args = append(args, "-test.run", *run)
}
if *short {
args = append(args, "-test.short")
}
if *verbose {
args = append(args, "-test.v")
}
status := "ok "
start := time.Now()
if err := runNode(outfile.Name(), args, pkg.Dir, options.Quiet); err != nil {
if _, ok := err.(*exec.ExitError); !ok {
return err
}
exitErr = err
status = "FAIL"
}
fmt.Printf("%s\t%s\t%.3fs\n", status, pkg.ImportPath, time.Now().Sub(start).Seconds())
}
return exitErr
}, options, nil))
}
cmdTool := &cobra.Command{
Use: "tool [command] [args...]",
Short: "run specified go tool",
}
cmdTool.Flags().BoolP("e", "e", false, "")
cmdTool.Flags().BoolP("l", "l", false, "")
cmdTool.Flags().BoolP("m", "m", false, "")
cmdTool.Flags().StringP("o", "o", "", "")
cmdTool.Flags().StringP("D", "D", "", "")
cmdTool.Flags().StringP("I", "I", "", "")
cmdTool.Run = func(cmd *cobra.Command, args []string) {
os.Exit(handleError(func() error {
if len(args) == 2 {
switch args[0][1] {
case 'g':
basename := filepath.Base(args[1])
s := gbuild.NewSession(options)
if err := s.BuildFiles([]string{args[1]}, basename[:len(basename)-3]+".js", currentDirectory); err != nil {
return err
}
return nil
}
}
cmdTool.Help()
return nil
}, options, nil))
}
cmdServe := &cobra.Command{
Use: "serve",
Short: "compile on-the-fly and serve",
}
cmdServe.Flags().AddFlag(flagVerbose)
cmdServe.Flags().AddFlag(flagQuiet)
cmdServe.Flags().AddFlag(flagMinify)
cmdServe.Flags().AddFlag(flagColor)
cmdServe.Flags().AddFlag(flagTags)
var addr string
cmdServe.Flags().StringVarP(&addr, "http", "", ":8080", "HTTP bind address to serve")
cmdServe.Run = func(cmd *cobra.Command, args []string) {
options.BuildTags = strings.Fields(*tags)
dirs := append(filepath.SplitList(build.Default.GOPATH), build.Default.GOROOT)
sourceFiles := http.FileServer(serveCommandFileSystem{options: options, dirs: dirs, sourceMaps: make(map[string][]byte)})
ln, err := net.Listen("tcp", addr)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if tcpAddr := ln.Addr().(*net.TCPAddr); tcpAddr.IP.Equal(net.IPv4zero) || tcpAddr.IP.Equal(net.IPv6zero) { // Any available addresses.
fmt.Printf("serving at http://localhost:%d and on port %d of any available addresses\n", tcpAddr.Port, tcpAddr.Port)
} else { // Specific address.
fmt.Printf("serving at http://%s\n", tcpAddr)
}
fmt.Fprintln(os.Stderr, http.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}, sourceFiles))
}
rootCmd := &cobra.Command{
Use: "gopherjs",
Long: "GopherJS is a tool for compiling Go source code to JavaScript.",
}
rootCmd.AddCommand(cmdBuild, cmdGet, cmdInstall, cmdRun, cmdTest, cmdTool, cmdServe)
rootCmd.Execute()
}
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
type serveCommandFileSystem struct {
options *gbuild.Options
dirs []string
sourceMaps map[string][]byte
}
func (fs serveCommandFileSystem) Open(name string) (http.File, error) {
dir, file := path.Split(name)
base := path.Base(dir) // base is parent folder name, which becomes the output file name.
isPkg := file == base+".js"
isMap := file == base+".js.map"
isIndex := file == "index.html"
if isPkg || isMap || isIndex {
// If we're going to be serving our special files, make sure there's a Go command in this folder.
s := gbuild.NewSession(fs.options)
pkg, err := gbuild.Import(path.Dir(name[1:]), 0, s.InstallSuffix(), fs.options.BuildTags)
if err != nil || pkg.Name != "main" {
isPkg = false
isMap = false
isIndex = false
}
switch {
case isPkg:
buf := bytes.NewBuffer(nil)
browserErrors := bytes.NewBuffer(nil)
exitCode := handleError(func() error {
if err := s.BuildPackage(pkg); err != nil {
return err
}
sourceMapFilter := &compiler.SourceMapFilter{Writer: buf}
m := &sourcemap.Map{File: base + ".js"}
sourceMapFilter.MappingCallback = gbuild.NewMappingCallback(m, fs.options.GOROOT, fs.options.GOPATH)
deps, err := compiler.ImportDependencies(pkg.Archive, s.ImportContext.Import)
if err != nil {
return err
}
if err := compiler.WriteProgramCode(deps, sourceMapFilter); err != nil {
return err
}
mapBuf := bytes.NewBuffer(nil)
m.WriteTo(mapBuf)
buf.WriteString("//# sourceMappingURL=" + base + ".js.map\n")
fs.sourceMaps[name+".map"] = mapBuf.Bytes()
return nil
}, fs.options, browserErrors)
if exitCode != 0 {
buf = browserErrors
}
return newFakeFile(base+".js", buf.Bytes()), nil
case isMap:
if content, ok := fs.sourceMaps[name]; ok {
return newFakeFile(base+".js.map", content), nil
}
}
}
for _, d := range fs.dirs {
f, err := http.Dir(filepath.Join(d, "src")).Open(name)
if err == nil {
return f, nil
}
}
if isIndex {
// If there was no index.html file in any dirs, supply our own.
return newFakeFile("index.html", []byte(`<html><head><meta charset="utf-8"><script src="`+base+`.js"></script></head></html>`)), nil
}
return nil, os.ErrNotExist
}
type fakeFile struct {
name string
size int
io.ReadSeeker
}
func newFakeFile(name string, content []byte) *fakeFile {
return &fakeFile{name: name, size: len(content), ReadSeeker: bytes.NewReader(content)}
}
func (f *fakeFile) Close() error {
return nil
}
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, os.ErrInvalid
}
func (f *fakeFile) Stat() (os.FileInfo, error) {
return f, nil
}
func (f *fakeFile) Name() string {
return f.name
}
func (f *fakeFile) Size() int64 {
return int64(f.size)
}
func (f *fakeFile) Mode() os.FileMode {
return 0
}
func (f *fakeFile) ModTime() time.Time {
return time.Time{}
}
func (f *fakeFile) IsDir() bool {
return false
}
func (f *fakeFile) Sys() interface{} {
return nil
}
// If browserErrors is non-nil, errors are written for presentation in browser.
func handleError(f func() error, options *gbuild.Options, browserErrors *bytes.Buffer) int {
switch err := f().(type) {
case nil:
return 0
case compiler.ErrorList:
for _, entry := range err {
printError(entry, options, browserErrors)
}
return 1
case *exec.ExitError:
return err.Sys().(syscall.WaitStatus).ExitStatus()
default:
printError(err, options, browserErrors)
return 1
}
}
// sprintError returns an annotated error string without trailing newline.
func sprintError(err error) string {
makeRel := func(name string) string {
if relname, err := filepath.Rel(currentDirectory, name); err == nil {
return relname
}
return name
}
switch e := err.(type) {
case *scanner.Error:
return fmt.Sprintf("%s:%d:%d: %s", makeRel(e.Pos.Filename), e.Pos.Line, e.Pos.Column, e.Msg)
case types.Error:
pos := e.Fset.Position(e.Pos)
return fmt.Sprintf("%s:%d:%d: %s", makeRel(pos.Filename), pos.Line, pos.Column, e.Msg)
default:
return fmt.Sprintf("%s", e)
}
}
// printError prints err to Stderr with options. If browserErrors is non-nil, errors are also written for presentation in browser.
func printError(err error, options *gbuild.Options, browserErrors *bytes.Buffer) {
e := sprintError(err)
options.PrintError("%s\n", e)
if browserErrors != nil {
fmt.Fprintln(browserErrors, `console.error("`+template.JSEscapeString(e)+`");`)
}
}
func runNode(script string, args []string, dir string, quiet bool) error {
var allArgs []string
if b, _ := strconv.ParseBool(os.Getenv("SOURCE_MAP_SUPPORT")); os.Getenv("SOURCE_MAP_SUPPORT") == "" || b {
allArgs = []string{"--require", "source-map-support/register"}
if err := exec.Command("node", "--require", "source-map-support/register", "--eval", "").Run(); err != nil {
if !quiet {
fmt.Fprintln(os.Stderr, "gopherjs: Source maps disabled. Use Node.js 4.x with source-map-support module for nice stack traces.")
}
allArgs = []string{}
}
}
if runtime.GOOS != "windows" {
allArgs = append(allArgs, "--stack_size=10000", script)
}
allArgs = append(allArgs, args...)
node := exec.Command("node", allArgs...)
node.Dir = dir
node.Stdin = os.Stdin
node.Stdout = os.Stdout
node.Stderr = os.Stderr
err := node.Run()
if _, ok := err.(*exec.ExitError); err != nil && !ok {
err = fmt.Errorf("could not run Node.js: %s", err.Error())
}
return err
}
type testFuncs struct {
Tests []testFunc
Benchmarks []testFunc
Examples []testFunc
Package *build.Package
NeedTest bool
NeedXtest bool
}
type testFunc struct {
Package string // imported package name (_test or _xtest)
Name string // function name
Output string // output, for examples
}
var testmainTmpl = template.Must(template.New("main").Parse(`
package main
import (
"regexp"
"testing"
{{if .NeedTest}}
_test {{.Package.ImportPath | printf "%q"}}
{{end}}
{{if .NeedXtest}}
_xtest {{.Package.ImportPath | printf "%s_test" | printf "%q"}}
{{end}}
)
var tests = []testing.InternalTest{
{{range .Tests}}
{"{{.Name}}", {{.Package}}.{{.Name}}},
{{end}}
}
var benchmarks = []testing.InternalBenchmark{
{{range .Benchmarks}}
{"{{.Name}}", {{.Package}}.{{.Name}}},
{{end}}
}
var examples = []testing.InternalExample{
{{range .Examples}}
{"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}},
{{end}}
}
var matchPat string
var matchRe *regexp.Regexp
func matchString(pat, str string) (result bool, err error) {
if matchRe == nil || matchPat != pat {
matchPat = pat
matchRe, err = regexp.Compile(matchPat)
if err != nil {
return
}
}
return matchRe.MatchString(str), nil
}
func main() {
testing.Main(matchString, tests, benchmarks, examples)
}
`))
|
[
"\"TERM\"",
"\"SOURCE_MAP_SUPPORT\"",
"\"SOURCE_MAP_SUPPORT\""
] |
[] |
[
"SOURCE_MAP_SUPPORT",
"TERM"
] |
[]
|
["SOURCE_MAP_SUPPORT", "TERM"]
|
go
| 2 | 0 | |
sodelicious/asgi.py
|
"""
ASGI config for sodelicious project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sodelicious.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/e2e/util.go
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"bytes"
"fmt"
"io"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/watch"
"github.com/blang/semver"
"github.com/davecgh/go-spew/spew"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
const (
// How long to wait for the pod to be listable
podListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
podStartTimeout = 5 * time.Minute
// How long to wait for the pod to no longer be running
podNoLongerRunningTimeout = 30 * time.Second
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
namespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
serviceStartTimeout = 1 * time.Minute
// String used to mark pod deletion
nonExist = "NonExist"
// How often to poll pods and nodes.
poll = 2 * time.Second
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
serviceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
singleCallTimeout = 30 * time.Second
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
nodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
podReadyBeforeTimeout = 2 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = podListTimeout + (20 * time.Second)
podRespondingTimeout = 2 * time.Minute
serviceRespondingTimeout = 2 * time.Minute
endpointRegisterTimeout = time.Minute
)
// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var subResourcePodProxyVersion = version.MustParse("v1.1.0")
var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0")
func getServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
}
func GetServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) {
return getServicesProxyRequest(c, request)
}
type CloudConfig struct {
ProjectID string
Zone string
Cluster string
MasterName string
NodeInstanceGroup string
NumNodes int
ClusterTag string
ServiceAccount string
Provider cloudprovider.Interface
}
// unique identifier of the e2e run
var runId = util.NewUUID()
type CreateTestingNSFn func(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error)
type TestContextType struct {
KubeConfig string
KubeContext string
KubeVolumeDir string
CertDir string
Host string
RepoRoot string
Provider string
CloudConfig CloudConfig
KubectlPath string
OutputDir string
ReportDir string
prefix string
MinStartupPods int
UpgradeTarget string
PrometheusPushGateway string
VerifyServiceAccount bool
DeleteNamespace bool
CleanStart bool
// If set to true framework will start a goroutine monitoring resource usage of system add-ons.
// It will read the data every 30 seconds from all Nodes and print summary during afterEach.
GatherKubeSystemResourceUsageData bool
GatherLogsSizes bool
GatherMetricsAfterTest bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
OutputPrintType string
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
// and labels to be applied to a namespace.
CreateTestingNS CreateTestingNSFn
}
var testContext TestContextType
func SetTestContext(t TestContextType) {
testContext = t
}
type ContainerFailures struct {
status *api.ContainerStateTerminated
restarts int
}
// Convenient wrapper around cache.Store that returns list of api.Pod instead of interface{}.
type podStore struct {
cache.Store
stopCh chan struct{}
}
func newPodStore(c *client.Client, namespace string, label labels.Selector, field fields.Selector) *podStore {
lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).Watch(options)
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
stopCh := make(chan struct{})
cache.NewReflector(lw, &api.Pod{}, store, 0).RunUntil(stopCh)
return &podStore{store, stopCh}
}
func (s *podStore) List() []*api.Pod {
objects := s.Store.List()
pods := make([]*api.Pod, 0)
for _, o := range objects {
pods = append(pods, o.(*api.Pod))
}
return pods
}
func (s *podStore) Stop() {
close(s.stopCh)
}
type RCConfig struct {
Client *client.Client
Image string
Command []string
Name string
Namespace string
PollInterval time.Duration
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
ReadinessProbe *api.Probe
// Env vars, set the same for every pod.
Env map[string]string
// Extra labels added to every pod.
Labels map[string]string
// Ports to declare in the container (map of name to containerPort).
Ports map[string]int
// Pointer to a list of pods; if non-nil, will be set to a list of pods
// created by this RC by RunRC.
CreatedPods *[]*api.Pod
// Maximum allowable container failures. If exceeded, RunRC returns an error.
// Defaults to replicas*0.1 if unspecified.
MaxContainerFailures *int
}
type DeploymentConfig struct {
RCConfig
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func logf(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
logf("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
logf("FAIL", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
logf("SKIP", msg)
Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if testContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, testContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if providerIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, testContext.Provider)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !providerIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, testContext.Provider)
}
}
func providerIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(testContext.Provider) {
return true
}
}
return false
}
func SkipUnlessServerVersionGTE(v semver.Version, c client.ServerVersionInterface) {
gte, err := serverVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
// providersWithSSH are those providers where each node is accessible with SSH
var providersWithSSH = []string{"gce", "gke", "aws"}
// providersWithMasterSSH are those providers where master node is accessible with SSH
var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"}
type podCondition func(pod *api.Pod) (bool, error)
// podReady returns whether pod has a condition of Ready with a status of true.
func podReady(pod *api.Pod) bool {
for _, cond := range pod.Status.Conditions {
if cond.Type == api.PodReady && cond.Status == api.ConditionTrue {
return true
}
}
return false
}
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []api.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// podRunningReady checks whether pod p's phase is running and it has a ready
// condition of status true.
func podRunningReady(p *api.Pod) (bool, error) {
// Check the phase is running.
if p.Status.Phase != api.PodRunning {
return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodRunning, p.Status.Phase)
}
// Check the ready condition is true.
if !podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionTrue, p.Status.Conditions)
}
return true, nil
}
// podNotReady checks whether pod p's has a ready condition of status false.
func podNotReady(p *api.Pod) (bool, error) {
// Check the ready condition is false.
if podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionFalse, p.Status.Conditions)
}
return true, nil
}
// check if a Pod is controlled by a Replication Controller in the List
func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api.Pod) bool {
for _, rc := range rcs.Items {
selector := labels.SelectorFromSet(rc.Spec.Selector)
if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) {
return true
}
}
return false
}
// waitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// replication controller. Also, it ensures that at least minPods are running
// and ready. It has separate behavior from other 'wait for' pods functions in
// that it requires the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting.
func waitForPodsRunningReady(ns string, minPods int, timeout time.Duration) error {
c, err := loadClient()
if err != nil {
return err
}
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
if wait.PollImmediate(poll, timeout, func() (bool, error) {
// We get the new list of pods and replication controllers in every
// iteration because more pods come online during startup and we want to
// ensure they are also checked.
rcList, err := c.ReplicationControllers(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
return false, nil
}
replicas := 0
for _, rc := range rcList.Items {
replicas += rc.Spec.Replicas
}
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return false, nil
}
nOk, replicaOk, badPods := 0, 0, []api.Pod{}
for _, pod := range podList.Items {
if res, err := podRunningReady(&pod); res && err == nil {
nOk++
if hasReplicationControllersForPod(rcList, pod) {
replicaOk++
}
} else {
if pod.Status.Phase != api.PodFailed {
Logf("The status of Pod %s is %s, waiting for it to be either Running or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
badPods = append(badPods, pod)
} else if !hasReplicationControllersForPod(rcList, pod) {
Logf("Pod %s is Failed, but it's not controlled by a ReplicationController", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by a replication controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
logPodStates(badPods)
return false, nil
}) != nil {
return fmt.Errorf("Not all pods in namespace '%s' running and ready within %v", ns, timeout)
}
return nil
}
func logFailedContainers(ns string) {
c, err := loadClient()
if err != nil {
Logf("Failed to load client")
return
}
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return
}
for _, pod := range podList.Items {
if res, err := podRunningReady(&pod); res && err == nil {
Logf("Ignoring Ready pod %v/%v", pod.Namespace, pod.Name)
} else {
for _, container := range pod.Spec.Containers {
logs, err := getPreviousPodLogs(c, ns, pod.Name, container.Name)
if err != nil {
Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
By(fmt.Sprintf("Previous logs of %v/%v:%v on node %v", ns, pod.Name, container.Name, pod.Spec.NodeName))
Logf(logs)
}
}
}
}
// deleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func deleteNamespaces(c *client.Client, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.Namespaces().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.Namespaces().Delete(nsName)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func waitForNamespacesDeleted(c *client.Client, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName string, timeout time.Duration) error {
Logf("Waiting up to %v for service account %s to be provisioned in ns %s", timeout, serviceAccountName, ns)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
sa, err := c.ServiceAccounts(ns).Get(serviceAccountName)
if apierrs.IsNotFound(err) {
Logf("Get service account %s in ns %s failed, ignoring for %v: %v", serviceAccountName, ns, poll, err)
continue
}
if err != nil {
Logf("Get service account %s in ns %s failed: %v", serviceAccountName, ns, err)
return err
}
if len(sa.Secrets) == 0 {
Logf("Service account %s in ns %s had 0 secrets, ignoring for %v: %v", serviceAccountName, ns, poll, err)
continue
}
Logf("Service account %s in ns %s with secrets found. (%v)", serviceAccountName, ns, time.Since(start))
return nil
}
return fmt.Errorf("Service account %s in namespace %s not ready within %v", serviceAccountName, ns, timeout)
}
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pod, err := c.Pods(ns).Get(podName)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err)
return err
}
// Aligning this text makes it much more readable
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
podName, ns, poll, err)
continue
}
done, err := condition(pod)
if done {
return err
}
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
}
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
// waitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func waitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pods, err := c.Pods(api.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s", len(conditionNotMatch), desc)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// waitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func waitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", serviceAccountProvisionTimeout)
}
// waitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func waitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Client, pvName string, poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(runId)
namespaceObj := &api.Namespace{
ObjectMeta: api.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: api.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *api.Namespace
if err := wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
var err error
got, err = c.Namespaces().Create(namespaceObj)
if err != nil {
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if testContext.VerifyServiceAccount {
if err := waitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
return nil, err
}
}
return got, nil
}
// checkTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func checkTestingNSDeletedExcept(c *client.Client, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == api.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c *client.Client, namespace string, timeout time.Duration) error {
if err := c.Namespaces().Delete(namespace); err != nil {
return err
}
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := c.Namespaces().Get(namespace); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// check for pods that were not deleted
remaining := []string{}
missingTimestamp := false
if pods, perr := c.Pods(namespace).List(api.ListOptions{}); perr == nil {
for _, pod := range pods.Items {
Logf("Pod %s %s on node %s remains, has deletion timestamp %s", namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
remaining = append(remaining, pod.Name)
if pod.DeletionTimestamp == nil {
missingTimestamp = true
}
}
}
// a timeout occurred
if err != nil {
if missingTimestamp {
return fmt.Errorf("namespace %s was not deleted within limit: %v, some pods were not marked with a deletion timestamp, pods remaining: %v", namespace, err, remaining)
}
return fmt.Errorf("namespace %s was not deleted within limit: %v, pods remaining: %v", namespace, err, remaining)
}
// pods were not deleted but the namespace was deleted
if len(remaining) > 0 {
return fmt.Errorf("pods remained within namespace %s after deletion: %v", namespace, remaining)
}
return nil
}
// Waits default amount of time (podStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, podStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c *client.Client, podName string, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
}
func waitTimeoutForPodRunningInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error {
return waitForPodCondition(c, namespace, podName, "running", timeout, func(pod *api.Pod) (bool, error) {
if pod.Status.Phase == api.PodRunning {
Logf("Found pod '%s' on node '%s'", podName, pod.Spec.NodeName)
return true, nil
}
if pod.Status.Phase == api.PodFailed {
return true, fmt.Errorf("Giving up; pod went into failed status: \n%s", spew.Sprintf("%#v", pod))
}
return false, nil
})
}
// Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func waitForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string) error {
return waitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, podNoLongerRunningTimeout)
}
func waitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error {
return waitForPodCondition(c, namespace, podName, "no longer running", timeout, func(pod *api.Pod) (bool, error) {
if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
Logf("Found pod '%s' with status '%s' on node '%s'", podName, pod.Status.Phase, pod.Spec.NodeName)
return true, nil
}
return false, nil
})
}
// waitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
func waitForPodNotPending(c *client.Client, ns, podName string) error {
return waitForPodCondition(c, ns, podName, "!pending", podStartTimeout, func(pod *api.Pod) (bool, error) {
if pod.Status.Phase != api.PodPending {
Logf("Saw pod '%s' in namespace '%s' out of pending state (found '%q')", podName, ns, pod.Status.Phase)
return true, nil
}
return false, nil
})
}
// waitForPodTerminatedInNamespace returns an error if it took too long for the pod
// to terminate or if the pod terminated with an unexpected reason.
func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespace string) error {
return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *api.Pod) (bool, error) {
if pod.Status.Phase == api.PodFailed {
if pod.Status.Reason == reason {
return true, nil
} else {
return true, fmt.Errorf("Expected pod %n/%n to be terminated with reason %v, got reason: ", namespace, podName, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodCondition(c, namespace, podName, "success or failure", podStartTimeout, func(pod *api.Pod) (bool, error) {
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
ci, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, contName)
if !ok {
Logf("No Status.Info for container '%s' in pod '%s' yet", contName, podName)
} else {
if ci.State.Terminated != nil {
if ci.State.Terminated.ExitCode == 0 {
By("Saw pod success")
return true, nil
}
return true, fmt.Errorf("pod '%s' terminated with failure: %+v", podName, ci.State.Terminated)
}
Logf("Nil State.Terminated for container '%s' in pod '%s' in namespace '%s' so far", contName, podName, namespace)
}
return false, nil
})
}
// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
var p *api.Pod = nil
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == node {
Logf("Pod %s found on node %s", pod.Name, node)
p = &pod
return true, nil
}
}
return false, nil
})
return p, err
}
func waitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// waitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
// NodeController evicts pod after 5 minutes, so we need timeout greater than that.
// Additionally, there can be non-zero grace period, so we are setting 10 minutes
// to be on the safe size.
return waitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
}
// waitForService waits until the service appears (exist == true), or disappears (exist == false)
func waitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Services(namespace).Get(name)
switch {
case err == nil:
if !exist {
return false, nil
}
Logf("Service %s in namespace %s found.", name, namespace)
return true, nil
case apierrs.IsNotFound(err):
if exist {
return false, nil
}
Logf("Service %s in namespace %s disappeared.", name, namespace)
return true, nil
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
//waitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func waitForServiceEndpointsNum(c *client.Client, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to %d", serviceName, expectNum)
list, err := c.Endpoints(namespace).List(api.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *api.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func waitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.ReplicationControllers(namespace).Get(name)
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
func waitForEndpoint(c *client.Client, ns, name string) error {
for t := time.Now(); time.Since(t) < endpointRegisterTimeout; time.Sleep(poll) {
endpoint, err := c.Endpoints(ns).Get(name)
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get entpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
type podProxyResponseChecker struct {
c *client.Client
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *api.PodList
}
// checkAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) checkAllResponses() (done bool, err error) {
successes := 0
options := api.ListOptions{LabelSelector: r.label}
currentPods, err := r.c.Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, r.c)
if err != nil {
return false, err
}
var body []byte
if subResourceProxyAvailable {
body, err = r.c.Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
} else {
body, err = r.c.Get().
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
if err != nil {
Logf("Controller %s: Failed to GET from replica %d [%s]: %v:", r.controllerName, i+1, pod.Name, err)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// serverVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func serverVersionGTE(v semver.Version, c client.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := version.Parse(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.GTE(v), nil
}
func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(poll, podRespondingTimeout, podProxyResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
}
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []api.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if len(created) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c *client.Client, pods *api.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
for _, pod := range pods.Items {
// TODO: make waiting parallel.
err := waitForPodRunningInNamespace(c, pod.Name, pod.Namespace)
if err != nil {
e = append(e, err)
}
}
return e
}
func verifyPods(c *client.Client, ns, name string, wantName bool, replicas int) error {
pods, err := podsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
err = podsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
return nil
}
func serviceResponding(c *client.Client, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(poll, serviceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := getServicesProxyRequest(c, c.Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
body, err := proxyRequest.Namespace(ns).
Name(name).
Do().
Raw()
if err != nil {
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func loadConfig() (*restclient.Config, error) {
switch {
case testContext.KubeConfig != "":
Logf(">>> testContext.KubeConfig: %s\n", testContext.KubeConfig)
c, err := clientcmd.LoadFromFile(testContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if testContext.KubeContext != "" {
Logf(">>> testContext.KubeContext: %s\n", testContext.KubeContext)
c.CurrentContext = testContext.KubeContext
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: testContext.Host}}).ClientConfig()
default:
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
}
func loadClientFromConfig(config *restclient.Config) (*client.Client, error) {
c, err := client.New(config)
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
if c.Client.Timeout == 0 {
c.Client.Timeout = singleCallTimeout
}
return c, nil
}
func loadClient() (*client.Client, error) {
config, err := loadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return loadClientFromConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func expectNoError(err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func cleanup(filePath string, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
runKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
for _, selector := range selectors {
resources := runKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := runKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c *client.Client, podID string) error
// validateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func validateController(c *client.Client, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
// NB: kubectl adds the "exists" function to the standard template functions.
// This lets us check to see if the "running" entry exists for each of the containers
// we care about. Exists will never return an error and it's safe to check a chain of
// things, any one of which may not exist. In the below template, all of info,
// containername, and running might be nil, so the normal index function isn't very
// helpful.
// This template is unit-tested in kubectl, so if you change it, update the unit test.
// You can read about the syntax here: http://golang.org/pkg/text/template/.
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := runKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "--api-version=v1", "-l", testname, fmt.Sprintf("--namespace=%v", ns))
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := runKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := runKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns))
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", podStartTimeout.Seconds(), testname)
}
// kubectlCmd runs the kubectl executable through the wrapper script.
func kubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if testContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+testContext.Host)
}
if testContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+testContext.KubeConfig)
// Reference the KubeContext
if testContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+testContext.KubeContext)
}
} else {
if testContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(testContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(testContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(testContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(testContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func newKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = kubectlCmd(args...)
return b
}
func (b *kubectlBuilder) withTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) withStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) withStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) execOrDie() string {
str, err := b.exec()
Logf("stdout: %q", str)
Expect(err).NotTo(HaveOccurred())
return str
}
func (b kubectlBuilder) exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("Error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
return "", fmt.Errorf("Error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("Timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
// TODO: trimspace should be unnecessary after switching to use kubectl binary directly
return strings.TrimSpace(stdout.String()), nil
}
// runKubectlOrDie is a convenience wrapper over kubectlBuilder
func runKubectlOrDie(args ...string) string {
return newKubectlCommand(args...).execOrDie()
}
// runKubectl is a convenience wrapper over kubectlBuilder
func runKubectl(args ...string) (string, error) {
return newKubectlCommand(args...).exec()
}
// runKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func runKubectlOrDieInput(data string, args ...string) string {
return newKubectlCommand(args...).withStdinData(data).execOrDie()
}
func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func tryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
func testContainerOutput(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) {
testContainerOutputMatcher(scenarioName, c, pod, containerIndex, expectedOutput, ns, ContainSubstring)
}
// testContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
func testContainerOutputRegexp(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) {
testContainerOutputMatcher(scenarioName, c, pod, containerIndex, expectedOutput, ns, MatchRegexp)
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func testContainerOutputMatcher(scenarioName string,
c *client.Client,
pod *api.Pod,
containerIndex int,
expectedOutput []string, ns string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
defer c.Pods(ns).Delete(pod.Name, api.NewDeleteOptions(0))
if _, err := c.Pods(ns).Create(pod); err != nil {
Failf("Failed to create pod: %v", err)
}
// Wait for client pod to complete.
var containerName string
for id, container := range pod.Spec.Containers {
expectNoError(waitForPodSuccessInNamespace(c, pod.Name, container.Name, ns))
if id == containerIndex {
containerName = container.Name
}
}
if containerName == "" {
Failf("Invalid container index: %d", containerIndex)
}
// Grab its logs. Get host first.
podStatus, err := c.Pods(ns).Get(pod.Name)
if err != nil {
Failf("Failed to get pod status: %v", err)
}
By(fmt.Sprintf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
var logs string
start := time.Now()
// Sometimes the actual containers take a second to get started, try to get logs for 60s
for time.Now().Sub(start) < (60 * time.Second) {
err = nil
logs, err = getPodLogs(c, ns, pod.Name, containerName)
if err != nil {
By(fmt.Sprintf("Warning: Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
time.Sleep(5 * time.Second)
continue
}
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", logs))
break
}
for _, m := range expectedOutput {
Expect(logs).To(matcher(m), "%q in container output", m)
}
}
// podInfo contains pod information useful for debugging e2e tests.
type podInfo struct {
oldHostname string
oldPhase string
hostname string
phase string
}
// PodDiff is a map of pod name to podInfos
type PodDiff map[string]*podInfo
// Print formats and prints the give PodDiff.
func (p PodDiff) Print(ignorePhases sets.String) {
for name, info := range p {
if ignorePhases.Has(info.phase) {
continue
}
if info.phase == nonExist {
Logf("Pod %v was deleted, had phase %v and host %v", name, info.oldPhase, info.oldHostname)
continue
}
phaseChange, hostChange := false, false
msg := fmt.Sprintf("Pod %v ", name)
if info.oldPhase != info.phase {
phaseChange = true
if info.oldPhase == nonExist {
msg += fmt.Sprintf("in phase %v ", info.phase)
} else {
msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
}
}
if info.oldHostname != info.hostname {
hostChange = true
if info.oldHostname == nonExist || info.oldHostname == "" {
msg += fmt.Sprintf("assigned host %v ", info.hostname)
} else {
msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
}
}
if phaseChange || hostChange {
Logf(msg)
}
}
}
// Diff computes a PodDiff given 2 lists of pods.
func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
podInfoMap := PodDiff{}
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
for _, pod := range curPods {
podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist}
}
// Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist.
for _, pod := range oldPods {
if info, ok := podInfoMap[pod.Name]; ok {
info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase)
} else {
podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)}
}
}
return podInfoMap
}
// RunDeployment Launches (and verifies correctness) of a Deployment
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling cleanup).
func RunDeployment(config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *DeploymentConfig) create() error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
deployment := &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.DeploymentSpec{
Replicas: config.Replicas,
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
if err != nil {
return fmt.Errorf("Error creating deployment: %v", err)
}
Logf("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
return nil
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling cleanup).
func RunRC(config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *RCConfig) create() error {
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: config.Replicas,
Selector: map[string]string{
"name": config.Name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: api.DNSDefault,
},
},
},
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
return nil
}
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: v})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
}
func (config *RCConfig) start() error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
} else {
maxContainerFailures = *config.MaxContainerFailures
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything())
defer podStore.Stop()
interval := config.PollInterval
if interval <= 0 {
interval = 10 * time.Second
}
timeout := config.Timeout
if timeout <= 0 {
timeout = 5 * time.Minute
}
oldPods := make([]*api.Pod, 0)
oldRunning := 0
lastChange := time.Now()
for oldRunning != config.Replicas {
time.Sleep(interval)
terminating := 0
running := 0
runningButNotReady := 0
waiting := 0
pending := 0
unknown := 0
inactive := 0
failedContainers := 0
containerRestartNodes := sets.NewString()
pods := podStore.List()
created := []*api.Pod{}
for _, p := range pods {
if p.DeletionTimestamp != nil {
terminating++
continue
}
created = append(created, p)
if p.Status.Phase == api.PodRunning {
ready := false
for _, c := range p.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
ready = true
break
}
}
if ready {
// Only count a pod is running when it is also ready.
running++
} else {
runningButNotReady++
}
for _, v := range FailedContainers(p) {
failedContainers = failedContainers + v.restarts
containerRestartNodes.Insert(p.Spec.NodeName)
}
} else if p.Status.Phase == api.PodPending {
if p.Spec.NodeName == "" {
waiting++
} else {
pending++
}
} else if p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed {
inactive++
} else if p.Status.Phase == api.PodUnknown {
unknown++
}
}
pods = created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
config.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
promPushRunningPending(running, pending)
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", running, pending, waiting, inactive, unknown, runningButNotReady)
}
if failedContainers > maxContainerFailures {
dumpNodeDebugInfo(config.Client, containerRestartNodes.List())
// Get the logs from the failed containers to help diagnose what caused them to fail
logFailedContainers(config.Namespace)
return fmt.Errorf("%d containers failed which is more than allowed %d", failedContainers, maxContainerFailures)
}
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
// This failure mode includes:
// kubelet is dead, so node controller deleted pods and rc creates more
// - diagnose by noting the pod diff below.
// pod is unhealthy, so replication controller creates another to take its place
// - diagnose by comparing the previous "2 Pod states" lines for inactive pods
errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods))
Logf("%v, pods that changed since the last iteration:", errorStr)
Diff(oldPods, pods).Print(sets.NewString())
return fmt.Errorf(errorStr)
}
if len(pods) > len(oldPods) || running > oldRunning {
lastChange = time.Now()
}
oldPods = pods
oldRunning = running
if time.Since(lastChange) > timeout {
dumpPodDebugInfo(config.Client, pods)
break
}
}
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := api.ListOptions{LabelSelector: label}
if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil {
for _, pod := range pods.Items {
Logf("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
} else {
Logf("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
}
return nil
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// optionally waits for pods to start running (if waitForRunning == true)
func startPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
startPodsID := string(util.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
expectNoError(err)
}
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := waitForPodsWithLabelRunning(c, namespace, label)
expectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas)
}
}
func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) {
badNodes := sets.NewString()
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
if p.Spec.NodeName != "" {
Logf("Pod %v assigned to host %v (IP: %v) in %v", p.Name, p.Spec.NodeName, p.Status.HostIP, p.Status.Phase)
badNodes.Insert(p.Spec.NodeName)
} else {
Logf("Pod %v still unassigned", p.Name)
}
}
}
dumpNodeDebugInfo(c, badNodes.List())
}
func dumpAllNamespaceInfo(c *client.Client, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := c.Events(namespace).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/cleanup events.
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []api.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(o[j].FirstTimestamp)
}
func dumpAllPodInfo(c *client.Client) {
pods, err := c.Pods("").List(api.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c *client.Client) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
dumpNodeDebugInfo(c, names)
}
func dumpNodeDebugInfo(c *client.Client, nodeNames []string) {
for _, n := range nodeNames {
Logf("\nLogging node info for node %v", n)
node, err := c.Nodes().Get(n)
if err != nil {
Logf("Error getting node info %v", err)
}
Logf("Node Info: %v", node)
Logf("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
Logf("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
Logf("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
Logf("Unable to retrieve kubelet pods for node %v", n)
continue
}
for _, p := range podList.Items {
Logf("%v started at %v (%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.ContainerStatuses))
for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c *client.Client, nodeName string) []api.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": api.NamespaceAll,
"source": "kubelet",
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
events, err := c.Events(api.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []api.Event{}
}
return events.Items
}
// Convenient wrapper around listing nodes supporting retries.
func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
var nodes *api.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
expectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return waitForRCPodsRunning(c, ns, name)
}
// Wait up to 10 minutes for pods to become Running. Assume that the pods of the
// rc are labels with {"name":rcName}.
func waitForRCPodsRunning(c *client.Client, ns, rcName string) error {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
err := waitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", rcName, err)
}
return nil
}
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func waitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error {
running := false
podStore := newPodStore(c, ns, label, fields.Everything())
defer podStore.Stop()
waitLoop:
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
pods := podStore.List()
if len(pods) == 0 {
continue waitLoop
}
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
continue waitLoop
}
}
running = true
break
}
if !running {
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
}
return nil
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (bool, error) {
podStore := newPodStore(c, ns, label, fields.Everything())
defer podStore.Stop()
pods := podStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func waitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
err = wait.PollImmediate(poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = waitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to podListTimeout for getting pods with certain label
func waitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
options := api.ListOptions{LabelSelector: label}
pods, err = c.Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// Delete a Replication Controller and all pods it spawned
func DeleteRC(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperForReplicationController(c, 10*time.Minute)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, api.NewDeleteOptions(0))
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
deleteRCTime := time.Now().Sub(startTime)
Logf("Deleting RC %s took: %v", name, deleteRCTime)
if err != nil {
return fmt.Errorf("error while stopping RC: %s: %v", name, err)
}
err = waitForRCPodsGone(c, rc)
if err != nil {
return fmt.Errorf("error while deleting RC %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
return nil
}
// waitForRCPodsGone waits until there are no pods reported under an RC's selector (because the pods
// have completed termination).
func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error {
return wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) {
selector := labels.SelectorFromSet(rc.Spec.Selector)
options := api.ListOptions{LabelSelector: selector}
if pods, err := c.Pods(rc.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := c.Extensions().ReplicaSets(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), c)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, api.NewDeleteOptions(0))
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(c, rc)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) {
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
expectNoError(err)
options := api.ListOptions{LabelSelector: selector}
if pods, err := c.Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Waits for the deployment to reach desired state.
// Returns an error if minAvailable or maxCreated is broken at any times.
func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int) error {
var oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
var newRS *extensions.ReplicaSet
var deployment *extensions.Deployment
err := wait.Poll(poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
oldRSs, allOldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForReplicaSets(c, allRSs, minReadySeconds)
if err != nil {
return false, err
}
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfReplicaSets(c, allRSs, minReadySeconds)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
if totalAvailable < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfReplicaSets(c, allRSs, minReadySeconds)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deployment.Status.Replicas == desiredUpdatedReplicas &&
deployment.Status.UpdatedReplicas == desiredUpdatedReplicas &&
deploymentutil.GetReplicaCountForReplicaSets(oldRSs) == 0 &&
deploymentutil.GetReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) == desiredUpdatedReplicas {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfReplicaSets(c, allRSs, minReadySeconds)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %s status to match expectation: %v", deploymentName, err)
}
return nil
}
// waitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func waitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// waitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func waitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
err := wait.Poll(poll, 1*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return false, err
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision ||
newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision ||
deployment.Spec.Template.Spec.Containers[0].Image != image || newRS.Spec.Template.Spec.Containers[0].Image != image {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, nil, newRS)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %s revision and image to match expectation: %v", deploymentName, err)
}
return nil
}
func waitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label}
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !deploymentutil.IsPodAvailable(&pod, minReadySeconds) {
return false, nil
}
}
return true, nil
})
}
// Waits for the deployment to clean up old rcs.
func waitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
_, oldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
Logf("Deployment: %+v. Selector = %+v", deployment, deployment.Spec.Selector)
for i := range allOldRSs {
Logf("All old ReplicaSets (%d/%d) of deployment %s: %+v. Selector = %+v", i+1, len(allOldRSs), deployment.Name, allOldRSs[i], allOldRSs[i].Spec.Selector)
}
Logf("New ReplicaSet of deployment %s: %+v. Selector = %+v", deployment.Name, newRS, newRS.Spec.Selector)
}
func waitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, poll, 1*time.Minute)
}
func logPodsOfReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) {
allPods, err := deploymentutil.GetPodsForReplicaSets(c, rss)
if err == nil {
for _, pod := range allPods {
availability := "not available"
if deploymentutil.IsPodAvailable(&pod, minReadySeconds) {
availability = "available"
}
Logf("Pod %s is %s: %+v", pod.Name, availability, pod)
}
}
}
// Waits for the number of events on the given object to reach a desired count.
func waitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func waitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDeploymentFunc func(d *extensions.Deployment)
func updateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace)
err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = deployments.Update(deployment); err == nil {
Logf("updating deployment %s", name)
return true, nil
}
return false, nil
})
return deployment, err
}
// FailedContainers inspects all containers in a pod and returns failure
// information for containers that have failed or been restarted.
// A map is returned where the key is the containerID and the value is a
// struct containing the restart and failure information
func FailedContainers(pod *api.Pod) map[string]ContainerFailures {
var state ContainerFailures
states := make(map[string]ContainerFailures)
statuses := pod.Status.ContainerStatuses
if len(statuses) == 0 {
return nil
} else {
for _, status := range statuses {
if status.State.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.State.Terminated}
} else if status.LastTerminationState.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.LastTerminationState.Terminated}
}
if status.RestartCount > 0 {
var ok bool
if state, ok = states[status.ContainerID]; !ok {
state = ContainerFailures{}
}
state.restarts = status.RestartCount
states[status.ContainerID] = state
}
}
}
return states
}
// Prints the histogram of the events and returns the number of bad events.
func BadEvents(events []*api.Event) int {
type histogramKey struct {
reason string
source string
}
histogram := make(map[histogramKey]int)
for _, e := range events {
histogram[histogramKey{reason: e.Reason, source: e.Source.Component}]++
}
for key, number := range histogram {
Logf("- reason: %s, source: %s -> %d", key.reason, key.source, number)
}
badPatterns := []string{"kill", "fail"}
badEvents := 0
for key, number := range histogram {
for _, s := range badPatterns {
if strings.Contains(key.reason, s) {
Logf("WARNING %d events from %s with reason: %s", number, key.source, key.reason)
badEvents += number
break
}
}
}
return badEvents
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this exludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) {
nodelist := ListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, api.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, "22"))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := getSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func issueSSHCommand(cmd, provider string, node *api.Node) error {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == api.NodeExternalIP {
host = a.Address + ":22"
break
}
}
if host == "" {
return fmt.Errorf("couldn't find external IP address for node %s", node.Name)
}
Logf("Calling %s on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return fmt.Errorf("failed running %q: %v (exit code %d)", cmd, err, result.Code)
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *api.Pod {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "hostexec",
Image: "gcr.io/google_containers/hostexec:1.2",
ImagePullPolicy: api.PullIfNotPresent,
},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return runKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
expectNoError(err)
return stdout
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client *client.Client, ns, name string) *api.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.Pods(ns).Create(hostExecPod)
expectNoError(err)
err = waitForPodRunningInNamespace(client, pod.Name, pod.Namespace)
expectNoError(err)
return pod
}
// getSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func getSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
default:
return nil, fmt.Errorf("getSigner(...) not implemented for %s", provider)
}
key := filepath.Join(keydir, keyfile)
return sshutil.MakePrivateKeySignerFromFile(key)
}
// checkPodsRunning returns whether all pods whose names are listed in podNames
// in namespace ns are running and ready, using c and waiting at most timeout.
func checkPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
np, desc := len(podNames), "running and ready"
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for ix := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := waitForPodCondition(c, ns, name, desc, timeout, podRunningReady)
result <- err == nil
}(podNames[ix])
}
// Wait for them all to finish.
success := true
// TODO(a-robinson): Change to `for range` syntax and remove logging once we
// support only Go >= 1.4.
for _, podName := range podNames {
if !<-result {
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// waitForNodeToBeReady returns whether node name is ready within timeout.
func waitForNodeToBeReady(c *client.Client, name string, timeout time.Duration) bool {
return waitForNodeToBe(c, name, api.NodeReady, true, timeout)
}
// waitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func waitForNodeToBeNotReady(c *client.Client, name string, timeout time.Duration) bool {
return waitForNodeToBe(c, name, api.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
if (cond.Status == api.ConditionTrue) == wantTrue {
return true
} else {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == api.ConditionTrue, wantTrue, cond.Reason, cond.Message)
return false
}
}
}
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
return false
}
// waitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func waitForNodeToBe(c *client.Client, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.Nodes().Get(name)
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if isNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// checks whether all registered nodes are ready
func allNodesReady(c *client.Client, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []api.Node
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
if !isNodeConditionSetAsExpected(&node, api.NodeReady, true) {
notReady = append(notReady, node)
}
}
return len(notReady) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func filterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) {
var l []api.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// parseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func parseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func restartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !providerIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", testContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
result, err := SSH("sudo pkill kube-proxy", host, testContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
result, err := SSH("sudo /bin/sh -c 'pgrep kube-proxy | wc -l'", host, testContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func restartApiserver() error {
// TODO: Make it work for all providers.
if !providerIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", testContext.Provider)
}
var command string
if providerIs("gce", "gke") {
command = "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
result, err := SSH(command, getMasterHost()+":22", testContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func waitForApiserverUp(c *client.Client) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// waitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
filterNodes(nodes, func(node api.Node) bool {
return isNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
// getHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func getHostExternalAddress(client *client.Client, p *api.Pod) (externalAddress string, err error) {
node, err := client.Nodes().Get(p.Spec.NodeName)
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return nil, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client *client.Client, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingress(ns).Get(name)
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// waitForIngressAddress waits for the Ingress to acquire an address.
func waitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func lookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
return runKubectlOrDie("log", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func lookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
return runKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func lookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return runKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, error) {
svc, err := client.Services(ns).Get(name)
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == svcPort {
if p.NodePort != 0 {
return p.NodePort, nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// getNodePortURL returns the url to a nodeport Service.
func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// It should be OK to list unschedulable Node here.
nodes, err := client.Nodes().List(api.ListOptions{})
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// scaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func scaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error {
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
rcs, err := client.ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
}
if len(rcs.Items) == 0 {
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
}
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(client, ns, name, replicas, false); err != nil {
return err
}
rc, err := client.ReplicationControllers(ns).Get(name)
if err != nil {
return err
}
if replicas == 0 {
if err := waitForRCPodsGone(client, rc); err != nil {
return err
}
} else {
if err := waitForPodsWithLabelRunning(
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
return err
}
}
}
return nil
}
func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if testContext.Provider == "gce" || testContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, ok := testContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", testContext.CloudConfig.Provider)
}
project := testContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(testContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", testContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for ix := range list.Items {
item := list.Items[ix]
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer unblockNetwork(from, to)
// blockNetwork(from, to)
// ...
// }
//
func blockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, testContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func unblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, testContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *api.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
|
[
"\"KUBE_SSH_USER\"",
"\"USER\"",
"\"HOME\"",
"\"AWS_SSH_KEY\""
] |
[] |
[
"USER",
"HOME",
"AWS_SSH_KEY",
"KUBE_SSH_USER"
] |
[]
|
["USER", "HOME", "AWS_SSH_KEY", "KUBE_SSH_USER"]
|
go
| 4 | 0 | |
pkg/cmd/opts/jenkins.go
|
package opts
import (
"fmt"
"net/url"
"os"
"sort"
"strings"
"time"
gojenkins "github.com/jenkins-x/golang-jenkins"
"github.com/jenkins-x/jx/v2/pkg/builds"
jxjenkins "github.com/jenkins-x/jx/v2/pkg/jenkins"
"github.com/jenkins-x/jx/v2/pkg/kube"
"github.com/jenkins-x/jx/v2/pkg/kube/services"
"github.com/jenkins-x/jx/v2/pkg/log"
"github.com/jenkins-x/jx/v2/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// JenkinsSelectorOptions used to represent the options used to refer to a Jenkins.
// if nothing is specified it assumes the current team is using a static Jenkins server as its execution engine.
// otherwise we can refer to other additional Jenkins Apps to implement custom Jenkins servers
type JenkinsSelectorOptions struct {
UseCustomJenkins bool
CustomJenkinsName string
// cached client
cachedCustomJenkinsClient gojenkins.JenkinsClient
}
// AddFlags add the command flags for picking a custom Jenkins App to work with
func (o *JenkinsSelectorOptions) AddFlags(cmd *cobra.Command) {
cmd.Flags().BoolVarP(&o.UseCustomJenkins, "custom", "m", false, "Use a custom Jenkins App instead of the default execution engine in Jenkins X")
cmd.Flags().StringVarP(&o.CustomJenkinsName, "jenkins-name", "j", "", "The name of the custom Jenkins App if you don't wish to use the default execution engine in Jenkins X")
}
// IsCustom returns true if a custom Jenkins App is specified
func (o *JenkinsSelectorOptions) IsCustom() bool {
return o.UseCustomJenkins || o.CustomJenkinsName != ""
}
// GetAllPipelineJobNames returns all the pipeline job names
func (o *CommonOptions) GetAllPipelineJobNames(jenkinsClient gojenkins.JenkinsClient, jobNames *[]string, jobName string) error {
job, err := jenkinsClient.GetJob(jobName)
if err != nil {
return err
}
if len(job.Jobs) == 0 {
*jobNames = append(*jobNames, job.FullName)
}
for _, j := range job.Jobs {
err = o.GetAllPipelineJobNames(jenkinsClient, jobNames, job.FullName+"/"+j.Name)
if err != nil {
return err
}
}
return nil
}
// SetJenkinsClient sets the JenkinsClient - usually used in testing
func (o *CommonOptions) SetJenkinsClient(jenkinsClient gojenkins.JenkinsClient) {
o.jenkinsClient = jenkinsClient
}
// JenkinsClient returns the Jenkins client
func (o *CommonOptions) JenkinsClient() (gojenkins.JenkinsClient, error) {
if o.jenkinsClient == nil {
kubeClient, ns, err := o.KubeClientAndDevNamespace()
if err != nil {
return nil, err
}
o.factory.SetBatch(o.BatchMode)
jenkins, err := o.factory.CreateJenkinsClient(kubeClient, ns, o.GetIOFileHandles())
if err != nil {
return nil, err
}
o.jenkinsClient = jenkins
}
return o.jenkinsClient, nil
}
// CustomJenkinsClient returns the Jenkins client for the custom jenkins app
func (o *CommonOptions) CustomJenkinsClient(jenkinsServiceName string) (gojenkins.JenkinsClient, error) {
kubeClient, ns, err := o.KubeClientAndDevNamespace()
if err != nil {
return nil, err
}
o.factory.SetBatch(o.BatchMode)
return o.factory.CreateCustomJenkinsClient(kubeClient, ns, jenkinsServiceName, o.GetIOFileHandles())
}
// CustomJenkinsURL returns the default or the custom Jenkins URL
func (o *CommonOptions) CustomJenkinsURL(jenkinsSelector *JenkinsSelectorOptions, kubeClient kubernetes.Interface, ns string) (string, error) {
if !jenkinsSelector.UseCustomJenkins {
return o.factory.GetJenkinsURL(kubeClient, ns)
}
customJenkinsName, err := o.PickCustomJenkinsName(jenkinsSelector, kubeClient, ns)
if err != nil {
return "", err
}
return o.factory.GetCustomJenkinsURL(kubeClient, ns, customJenkinsName)
}
// PickCustomJenkinsName picks the name of a custom jenkins server App if available
func (o *CommonOptions) PickCustomJenkinsName(jenkinsSelector *JenkinsSelectorOptions, kubeClient kubernetes.Interface, ns string) (string, error) {
if !jenkinsSelector.UseCustomJenkins {
return "", nil
}
customJenkinsName := jenkinsSelector.CustomJenkinsName
if customJenkinsName == "" {
serviceInterface := kubeClient.CoreV1().Services(ns)
selector := kube.LabelKind + "=" + kube.ValueKindJenkins
serviceList, err := serviceInterface.List(metav1.ListOptions{
LabelSelector: selector,
})
if err != nil {
return "", errors.Wrapf(err, "failed to list Jenkins services in namespace %s with selector %s", ns, selector)
}
switch len(serviceList.Items) {
case 0:
return "", fmt.Errorf("No Jenkins App services found in namespace %s with selector %s\nAre you sure you installed a Jenkins App in this namespace?\nTry jx add app jx-app-jenkins", ns, selector)
case 1:
customJenkinsName = serviceList.Items[0].Name
default:
names := []string{}
for _, svc := range serviceList.Items {
names = append(names, svc.Name)
}
sort.Strings(names)
if o.BatchMode {
return "", util.MissingOptionWithOptions("jenkins-name", names)
}
customJenkinsName, err = util.PickName(names, "Pick which custom Jenkins App you wish to use: ", "Jenkins Apps are a way to add custom Jenkins servers into Jenkins X", o.GetIOFileHandles())
if err != nil {
return "", err
}
}
}
jenkinsSelector.CustomJenkinsName = customJenkinsName
if customJenkinsName == "" {
return "", fmt.Errorf("failed to find a csutom Jenkins App name in namespace %s", ns)
}
return customJenkinsName, nil
}
// CreateCustomJenkinsClient creates either a regular Jenkins client or if useCustom is true creates a JenkinsClient
// for a custom jenkins App. If no customJenkinsName is specified and there is only one available it is used. Otherwise
// the user is prompted to pick the Jenkins App to use if not in batch mode.
func (o *CommonOptions) CreateCustomJenkinsClient(jenkinsSelector *JenkinsSelectorOptions) (gojenkins.JenkinsClient, error) {
isProw, err := o.IsProw()
if err != nil {
return nil, err
}
if isProw {
jenkinsSelector.UseCustomJenkins = true
}
if jenkinsSelector == nil || !jenkinsSelector.UseCustomJenkins {
return o.JenkinsClient()
}
if jenkinsSelector.cachedCustomJenkinsClient != nil {
return jenkinsSelector.cachedCustomJenkinsClient, nil
}
kubeClient, ns, err := o.KubeClientAndDevNamespace()
if err != nil {
return nil, err
}
customJenkinsName, err := o.PickCustomJenkinsName(jenkinsSelector, kubeClient, ns)
if err != nil {
return nil, err
}
jenkinsClient, err := o.CustomJenkinsClient(customJenkinsName)
if err == nil {
jenkinsSelector.cachedCustomJenkinsClient = jenkinsClient
}
return jenkinsClient, err
}
// GetJenkinsJobs returns the existing Jenkins jobs
func (o *CommonOptions) GetJenkinsJobs(jenkinsSelector *JenkinsSelectorOptions, filter string) (map[string]gojenkins.Job, error) {
jobMap := map[string]gojenkins.Job{}
jenkins, err := o.CreateCustomJenkinsClient(jenkinsSelector)
if err != nil {
return jobMap, err
}
jobs, err := jenkins.GetJobs()
if err != nil {
return jobMap, err
}
o.AddJenkinsJobs(jenkins, &jobMap, filter, "", jobs)
return jobMap, nil
}
// AddJenkinsJobs add the given jobs to Jenkins
func (o *CommonOptions) AddJenkinsJobs(jenkins gojenkins.JenkinsClient, jobMap *map[string]gojenkins.Job, filter string, prefix string, jobs []gojenkins.Job) {
for _, j := range jobs {
name := jxjenkins.JobName(prefix, &j)
if jxjenkins.IsPipeline(&j) {
if filter == "" || strings.Contains(name, filter) {
(*jobMap)[name] = j
continue
}
}
if j.Jobs != nil {
o.AddJenkinsJobs(jenkins, jobMap, filter, name, j.Jobs)
} else {
job, err := jenkins.GetJob(name)
if err == nil && job.Jobs != nil {
o.AddJenkinsJobs(jenkins, jobMap, filter, name, job.Jobs)
}
}
}
}
// TailJenkinsBuildLog tail the build log of the given Jenkins jobs name
func (o *CommonOptions) TailJenkinsBuildLog(jenkinsSelector *JenkinsSelectorOptions, jobName string, build *gojenkins.Build) error {
jenkins, err := o.CreateCustomJenkinsClient(jenkinsSelector)
if err != nil {
return nil
}
u, err := url.Parse(build.Url)
if err != nil {
return err
}
buildPath := u.Path
log.Logger().Infof("%s %s", "tailing the log of", fmt.Sprintf("%s #%d", jobName, build.Number))
// TODO Logger
return jenkins.TailLog(buildPath, o.Out, time.Second, time.Hour*100)
}
// GetJenkinsJobName returns the Jenkins job name
func (o *CommonOptions) GetJenkinsJobName() string {
owner := os.Getenv("REPO_OWNER")
repo := os.Getenv("REPO_NAME")
branch := o.GetBranchName("")
if owner != "" && repo != "" && branch != "" {
return fmt.Sprintf("%s/%s/%s", owner, repo, branch)
}
job := os.Getenv("JOB_NAME")
if job != "" {
return job
}
return ""
}
func (o *CommonOptions) GetBranchName(dir string) string {
branch := builds.GetBranchName()
if branch == "" {
if dir == "" {
dir = "."
}
var err error
branch, err = o.Git().Branch(dir)
if err != nil {
log.Logger().Warnf("failed to get the git branch name in dir %s", dir)
}
}
return branch
}
// UpdateJenkinsURL updates the Jenkins URL
func (o *CommonOptions) UpdateJenkinsURL(namespaces []string) error {
client, err := o.KubeClient()
if err != nil {
return err
}
// loop over each namespace and update the Jenkins URL if a Jenkins service is found
for _, n := range namespaces {
externalURL, err := services.GetServiceURLFromName(client, "jenkins", n)
if err != nil {
// skip namespace if no Jenkins service found
continue
}
log.Logger().Infof("Updating Jenkins with new external URL details %s", externalURL)
jenkins, err := o.factory.CreateJenkinsClient(client, n, o.GetIOFileHandles())
if err != nil {
return err
}
data := url.Values{}
data.Add("script", fmt.Sprintf(groovy, externalURL))
err = jenkins.Post("/scriptText", data, nil)
}
return nil
}
|
[
"\"REPO_OWNER\"",
"\"REPO_NAME\"",
"\"JOB_NAME\""
] |
[] |
[
"JOB_NAME",
"REPO_NAME",
"REPO_OWNER"
] |
[]
|
["JOB_NAME", "REPO_NAME", "REPO_OWNER"]
|
go
| 3 | 0 | |
youtube_dl/YoutubeDL.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import socket
import sys
import time
import tokenize
import traceback
import random
from .compat import (
compat_basestring,
compat_cookiejar,
compat_expanduser,
compat_get_terminal_size,
compat_http_client,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DownloadError,
encode_compat_str,
encodeFilename,
error_to_compat_str,
ExtractorError,
format_bytes,
formatSeconds,
GeoRestrictedError,
ISO3166Utils,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
PostProcessingError,
preferredencoding,
prepend_extension,
register_socks_protocols,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
subtitles_filename,
UnavailableVideoError,
url_basename,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
)
from .cache import Cache
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
from .downloader import get_suitable_downloader
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegMergerPP,
FFmpegPostProcessor,
get_postprocessor,
)
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forceid: Force printing ID.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
simulate: Do not download the video files.
format: Video format code. See options.py for more information.
outtmpl: Template for output names.
restrictfilenames: Do not allow "&" and spaces in file names
ignoreerrors: Do not stop on download errors.
force_generic_extractor: Force downloader to use the generic extractor
nooverwrites: Prevent overwriting files.
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
write_all_thumbnails: Write all thumbnail formats to files
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
cookiefile: File name where cookies should be read from and dumped to.
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites. (Experimental)
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
youtube_dl/postprocessor/__init__.py for a list.
as well as any further keyword arguments for the
postprocessor.
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
merge_output_format: Extension to use when merging formats.
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: (Experimental) Client-side IP address to bind to.
call_home: Boolean, true iff we are allowed to contact the
youtube-dl servers for debugging.
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called with the info_dict of
every video.
If it returns a message, the video is ignored.
If it returns None, the video is downloaded.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header (experimental)
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header (experimental)
The following options determine which downloader is picked:
external_downloader: Executable of the external downloader to call.
None or unset for standard (built-in) downloader.
hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see youtube_dl/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
noresizebuffer, retries, continuedl, noprogress, consoletitle,
xattr_set_filesize, external_downloader_args, hls_use_mpegts.
The following options are used by the post processors:
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
otherwise prefer avconv.
postprocessor_args: A list of additional command-line arguments for the
postprocessor.
"""
params = None
_ies = []
_pps = []
_download_retcode = None
_num_downloads = None
_screen_file = None
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = {
# Default parameters
'nocheckcertificate': False,
}
self.params.update(params)
self.cache = Cache(self)
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning(
'%s is deprecated. Use %s instead.' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.version_info >= (3,) and sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
not params.get('restrictfilenames', False)):
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
if isinstance(params.get('outtmpl'), bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
self._setup_opener()
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_class = get_postprocessor(pp_def_raw['key'])
pp_def = dict(pp_def_raw)
del pp_def['key']
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['youtube-dl'] +
[a for i, a in enumerate(argv) if i not in idxs] +
['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
if not isinstance(ie, type):
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp)
pp.set_downloader(self)
def add_progress_hook(self, ph):
"""Add the progress hook (currently only for the file downloader)"""
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode."""
return self.to_stdout(message, skip_eol, check_quiet=True)
def _write_string(self, s, out=None):
write_string(s, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, check_quiet=False):
"""Print message to stdout if not in quiet mode."""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not check_quiet or not self.params.get('quiet', False):
message = self._bidi_workaround(message)
terminator = ['\n', ''][skip_eol]
output = message + terminator
self._write_string(output, self._screen_file)
def to_stderr(self, message):
"""Print message to stderr."""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
message = self._bidi_workaround(message)
output = message + '\n'
self._write_string(output, self._err_file)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save()
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def report_warning(self, message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time())
autonumber_size = self.params.get('autonumber_size')
if autonumber_size is None:
autonumber_size = 5
template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
elif template_dict.get('height'):
template_dict['resolution'] = '%sp' % template_dict['height']
elif template_dict.get('width'):
template_dict['resolution'] = '%dx?' % template_dict['width']
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id'))
template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
for k, v in template_dict.items()
if v is not None and not isinstance(v, (list, tuple, dict)))
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(template_dict['n_entries'])),
'autonumber': autonumber_size,
}
FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
if mobj:
outtmpl = re.sub(
FIELD_SIZE_COMPAT_RE,
r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
outtmpl)
NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'upload_year', 'upload_month', 'upload_day',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
# Missing numeric fields used together with integer presentation types
# in format specification will break the argument substitution since
# string 'NA' is returned for missing fields. We will patch output
# template for missing fields to meet string presentation type.
for numeric_field in NUMERIC_FIELDS:
if numeric_field not in template_dict:
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
FORMAT_RE = r'''(?x)
(?<!%)
%
\({0}\) # mapping key
(?:[#0\-+ ]+)? # conversion flags (optional)
(?:\d+)? # minimum field width (optional)
(?:\.\d+)? # precision (optional)
[hlL]? # length modifier (optional)
[diouxXeEfFgGcrs%] # conversion type
'''
outtmpl = re.sub(
FORMAT_RE.format(numeric_field),
r'%({0})s'.format(numeric_field), outtmpl)
tmpl = compat_expanduser(outtmpl)
filename = tmpl % template_dict
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
filename = encodeFilename(filename, True).decode(preferredencoding())
return sanitize_path(filename)
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def _match_entry(self, info_dict, incomplete):
""" Returns None iff the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
if self.in_download_archive(info_dict):
return '%s has already been recorded in archive' % video_title
if not incomplete:
match_filter = self.params.get('match_filter')
if match_filter is not None:
ret = match_filter(info_dict)
if ret is not None:
return ret
return None
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True, force_generic_extractor=False):
'''
Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result
'''
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
ie = self.get_info_extractor(ie.ie_key())
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
try:
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
break
except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback())
break
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
break
else:
raise
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def add_default_extra_info(self, ie_result, ie, url):
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'webpage_url': url,
'webpage_url_basename': url_basename(url),
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
extract_flat is True):
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(ie_result))
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(ie_result['url'],
download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
assert new_result.get('_type') != 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type == 'playlist' or result_type == 'multi_video':
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
playlist_results = []
playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = iter_playlistitems(playlistitems_str)
ie_entries = ie_result['entries']
if isinstance(ie_entries, list):
n_all_entries = len(ie_entries)
if playlistitems:
entries = [
ie_entries[i - 1] for i in playlistitems
if -n_all_entries <= i - 1 < n_all_entries]
else:
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
entries = []
for item in playlistitems:
entries.extend(ie_entries.getslice(
item - 1, item
))
else:
entries = ie_entries.getslice(
playliststart, playlistend)
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
else: # iterable
if playlistitems:
entry_list = list(ie_entries)
entries = [entry_list[i - 1] for i in playlistitems]
else:
entries = list(itertools.islice(
ie_entries, playliststart, playlistend))
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
reason = self._match_entry(entry, incomplete=True)
if reason is not None:
self.to_screen('[download] ' + reason)
continue
entry_result = self.process_ie_result(entry,
download=download,
extra_info=extra)
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(
r,
{
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
)
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
$
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.search(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'!=': operator.ne,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
\s*(?P<value>[a-zA-Z0-9._-]+)
\s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.search(filter_spec)
if m:
comparison_value = m.group('value')
op = STR_OPERATORS[m.group('op')]
if not m:
raise ValueError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
video_selector = current_selector
audio_selector = _parse_format_selection(tokens, inside_merge=True)
if not video_selector or not audio_selector:
raise syntax_error('"+" must be between two format selectors', start)
current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _build_selector_function(selector):
if isinstance(selector, list):
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
for format in f(ctx):
yield format
return selector_function
elif selector.type == GROUP:
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST:
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == SINGLE:
format_spec = selector.selector
def selector_function(ctx):
formats = list(ctx['formats'])
if not formats:
return
if format_spec == 'all':
for f in formats:
yield f
elif format_spec in ['best', 'worst', None]:
format_idx = 0 if format_spec == 'worst' else -1
audiovideo_formats = [
f for f in formats
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
if audiovideo_formats:
yield audiovideo_formats[format_idx]
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) we will fallback to best/worst
# {video,audio}-only format
elif ctx['incomplete_formats']:
yield formats[format_idx]
elif format_spec == 'bestaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[-1]
elif format_spec == 'worstaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[0]
elif format_spec == 'bestvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[-1]
elif format_spec == 'worstvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
filter_f = lambda f: f['format_id'] == format_spec
matches = list(filter(filter_f, formats))
if matches:
yield matches[-1]
elif selector.type == MERGE:
def _merge(formats_info):
format_1, format_2 = [f['format_id'] for f in formats_info]
# The first format must contain the video and the
# second the audio
if formats_info[0].get('vcodec') == 'none':
self.report_error('The first format must '
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
# Formats must be opposite (video+audio)
if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
self.report_error(
'Both formats %s and %s are video-only, you must specify "-f video+audio"'
% (format_1, format_2))
return
output_ext = (
formats_info[0]['ext']
if self.params.get('merge_output_format') is None
else self.params['merge_output_format'])
return {
'requested_formats': formats_info,
'format': '%s+%s' % (formats_info[0].get('format'),
formats_info[1].get('format')),
'format_id': '%s+%s' % (formats_info[0].get('format_id'),
formats_info[1].get('format_id')),
'width': formats_info[0].get('width'),
'height': formats_info[0].get('height'),
'resolution': formats_info[0].get('resolution'),
'fps': formats_info[0].get('fps'),
'vcodec': formats_info[0].get('vcodec'),
'vbr': formats_info[0].get('vbr'),
'stretched_ratio': formats_info[0].get('stretched_ratio'),
'acodec': formats_info[1].get('acodec'),
'abr': formats_info[1].get('abr'),
'ext': output_ext,
}
video_selector, audio_selector = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
yield _merge(pair)
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
if not isinstance(info_dict['id'], compat_str):
self.report_warning('"id" field is not a string - forcing string conversion')
info_dict['id'] = compat_str(info_dict['id'])
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '', t.get('url')))
for i, t in enumerate(thumbnails):
t['url'] = sanitize_url(t['url'])
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
if t.get('id') is None:
t['id'] = '%d' % i
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
return
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
subtitles = info_dict.get('subtitles')
if subtitles:
for _, subtitle in subtitles.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
if self.params.get('listsubtitles', False):
if 'automatic_captions' in info_dict:
self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
return
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles,
info_dict.get('automatic_captions'))
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
raise ExtractorError('No video formats found!')
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
if 'url' not in format:
raise ExtractorError('Missing "url" key in result (index %d)' % i)
format['url'] = sanitize_url(format['url'])
if format.get('format_id') is None:
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
for format_id, ambiguous_formats in formats_dict.items():
if len(ambiguous_formats) > 1:
for i, format in enumerate(ambiguous_formats):
format['format_id'] = '%s-%d' % (format_id, i)
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
)
# Automatically determine file extension if missing
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Automatically determine protocol if missing (useful for format
# selection purposes)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
if self.params.get('listformats'):
self.list_formats(info_dict)
return
req_format = self.params.get('format')
if req_format is None:
req_format_list = []
if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and
not info_dict.get('is_live')):
merger = FFmpegMergerPP(self)
if merger.available and merger.can_merge():
req_format_list.append('bestvideo+bestaudio')
req_format_list.append('best')
req_format = '/'.join(req_format_list)
format_selector = self.build_format_selector(req_format)
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
# https://github.com/rg3/youtube-dl/pull/5556).
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
# https://github.com/rg3/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
# all formats are audio-only
all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
if download:
if len(formats_to_download) > 1:
self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
for format in formats_to_download:
new_info = dict(info_dict)
new_info.update(format)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
if self.params.get('allsubtitles', False):
requested_langs = available_subs.keys()
else:
if self.params.get('subtitleslangs', False):
requested_langs = self.params.get('subtitleslangs')
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(available_subs.keys())[0]]
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
info_dict['fulltitle'] = info_dict['title']
if len(info_dict['title']) > 200:
info_dict['title'] = info_dict['title'][:197] + '...'
if 'format' not in info_dict:
info_dict['format'] = info_dict['ext']
reason = self._match_entry(info_dict, incomplete=False)
if reason is not None:
self.to_screen('[download] ' + reason)
return
self._num_downloads += 1
info_dict['_filename'] = filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get('forcetitle', False):
self.to_stdout(info_dict['fulltitle'])
if self.params.get('forceid', False):
self.to_stdout(info_dict['id'])
if self.params.get('forceurl', False):
if info_dict.get('requested_formats') is not None:
for f in info_dict['requested_formats']:
self.to_stdout(f['url'] + f.get('play_path', ''))
else:
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
self.to_stdout(info_dict['thumbnail'])
if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
self.to_stdout(info_dict['description'])
if self.params.get('forcefilename', False) and filename is not None:
self.to_stdout(filename)
if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
if self.params.get('forceformat', False):
self.to_stdout(info_dict['format'])
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(info_dict))
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
return
if filename is None:
return
try:
dn = os.path.dirname(sanitize_path(encodeFilename(filename)))
if dn and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error('unable to create directory ' + error_to_compat_str(err))
return
if self.params.get('writedescription', False):
descfn = replace_extension(filename, 'description', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
elif info_dict.get('description') is None:
self.report_warning('There\'s no description to write.')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and info_dict.get('requested_subtitles'):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['requested_subtitles']
ie = self.get_info_extractor(info_dict['extractor_key'])
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
if sub_info.get('data') is not None:
sub_data = sub_info['data']
else:
try:
sub_data = ie._download_webpage(
sub_info['url'], info_dict['id'], note=False)
except ExtractorError as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err.cause)))
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
# Use newline='' to prevent conversion of newline characters
# See https://github.com/rg3/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_data)
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
if self.params.get('writeinfojson', False):
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video description metadata is already present')
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(self.filter_requested_info(info_dict), infofn)
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
self._write_thumbnails(info_dict, filename)
if not self.params.get('skip_download', False):
try:
def dl(name, info):
fd = get_suitable_downloader(info, self.params)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if self.params.get('verbose'):
self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
return fd.download(name, info)
if info_dict.get('requested_formats') is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self)
if not merger.available:
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged.')
else:
postprocessors = [merger]
def compatible_formats(formats):
video, audio = formats
# Check extension
video_ext, audio_ext = audio.get('ext'), video.get('ext')
if video_ext and audio_ext:
COMPATIBLE_EXTS = (
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
('webm')
)
for exts in COMPATIBLE_EXTS:
if video_ext in exts and audio_ext in exts:
return True
# TODO: Check acodec/vcodec
return False
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext == info_dict['ext']
else filename)
requested_formats = info_dict['requested_formats']
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv.')
# Ensure filename always has a correct extension for successful merge
filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
if os.path.exists(encodeFilename(filename)):
self.to_screen(
'[download] %s has already been downloaded and '
'merged' % filename)
else:
for f in requested_formats:
new_info = dict(info_dict)
new_info.update(f)
fname = self.prepare_filename(new_info)
fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext'])
downloaded.append(fname)
partial_success = dl(fname, new_info)
success = success and partial_success
info_dict['__postprocessors'] = postprocessors
info_dict['__files_to_merge'] = downloaded
else:
# Just a single file
success = dl(filename, info_dict)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and filename != '-':
# Fixup content
fixup_policy = self.params.get('fixup')
if fixup_policy is None:
fixup_policy = 'detect_or_warn'
INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
stretched_ratio = info_dict.get('stretched_ratio')
if stretched_ratio is not None and stretched_ratio != 1:
if fixup_policy == 'warn':
self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
info_dict['id'], stretched_ratio))
elif fixup_policy == 'detect_or_warn':
stretched_pp = FFmpegFixupStretchedPP(self)
if stretched_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(stretched_pp)
else:
self.report_warning(
'%s: Non-uniform pixel ratio (%s). %s'
% (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('requested_formats') is None and
info_dict.get('container') == 'm4a_dash'):
if fixup_policy == 'warn':
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container.'
% info_dict['id'])
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM4aPP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('protocol') == 'm3u8_native' or
info_dict.get('protocol') == 'm3u8' and
self.params.get('hls_prefer_native')):
if fixup_policy == 'warn':
self.report_warning('%s: malformated aac bitstream.' % (
info_dict['id']))
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM3u8PP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: malformated aac bitstream. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
self.report_error('postprocessing: %s' % str(err))
return
self.record_download_archive(info_dict)
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1 and
'%' not in outtmpl and
self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
else:
if self.params.get('dump_single_json', False):
self.to_stdout(json.dumps(res))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.filter_requested_info(json.loads('\n'.join(f)))
try:
self.process_ie_result(info, download=True)
except DownloadError:
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def filter_requested_info(info_dict):
return dict(
(k, v) for k, v in info_dict.items()
if k not in ['requested_formats', 'requested_subtitles'])
def post_process(self, filename, ie_info):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
pps_chain = []
if ie_info.get('__postprocessors') is not None:
pps_chain.extend(ie_info['__postprocessors'])
pps_chain.extend(self._pps)
for pp in pps_chain:
files_to_delete = []
try:
files_to_delete, info = pp.run(info)
except PostProcessingError as e:
self.report_error(e.msg)
if files_to_delete and not self.params.get('keepvideo', False):
for old_filename in files_to_delete:
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
def _make_archive_id(self, info_dict):
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key')
if extractor is None:
if 'id' in info_dict:
extractor = info_dict.get('ie_key') # key in a playlist
if extractor is None:
return None # Incomplete video information
return extractor.lower() + ' ' + info_dict['id']
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if vid_id is None:
return False # Incomplete video information
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
if line.strip() == vid_id:
return True
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('height') is not None:
if format.get('width') is not None:
res = '%sx%s' % (format['width'], format['height'])
else:
res = '%sp' % format['height']
elif format.get('width') is not None:
res = '%dx?' % format['width']
else:
res = default
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None and
fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
table = [
[f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
if len(formats) > 1:
table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:\n%s' %
(info_dict['id'], render_table(header_line, table)))
def list_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_screen(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
self.to_screen(render_table(
['Language', 'formats'],
[[lang, ', '.join(f['ext'] for f in reversed(formats))]
for lang, formats in subtitles.items()]))
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
if type('') is not compat_str:
# Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
self.report_warning(
'Your Python is broken! Update to a newer and supported version')
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled' + '\n')
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: ' + out + '\n')
except Exception:
try:
sys.exc_clear()
except Exception:
pass
self._write_string('[debug] Python version %s - %s\n' % (
platform.python_version(), platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_str = ', '.join(
'%s %s' % (exe, v)
for exe, v in sorted(exe_versions.items())
if v
)
if not exe_str:
exe_str = 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
if opts_cookiefile is None:
self.cookiejar = compat_cookiejar.CookieJar()
else:
opts_cookiefile = compat_expanduser(opts_cookiefile)
self.cookiejar = compat_cookiejar.MozillaCookieJar(
opts_cookiefile)
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load()
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/rg3/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_thumbnails(self, info_dict, filename):
if self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails')
if thumbnails:
thumbnails = [thumbnails[-1]]
elif self.params.get('write_all_thumbnails', False):
thumbnails = info_dict.get('thumbnails')
else:
return
if not thumbnails:
# No thumbnails present, so return immediately
return
for t in thumbnails:
thumb_ext = determine_ext(t['url'], 'jpg')
suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
self.to_screen('[%s] %s: Thumbnail %sis already present' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
else:
self.to_screen('[%s] %s: Downloading thumbnail %s...' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(t['url'], error_to_compat_str(err)))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Sphinx configuration
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import time
import aiida_nwchem
from aiida.manage.configuration import load_documentation_profile
# -- AiiDA-related setup --------------------------------------------------
# Load the dummy profile even if we are running locally, this way the documentation will succeed even if the current
# default profile of the AiiDA installation does not use a Django backend.
load_documentation_profile()
# If we are not on READTHEDOCS load the Sphinx theme manually
if not os.environ.get('READTHEDOCS', None):
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- General configuration ------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.contentui',
'aiida.sphinxext',
'sphinxcontrib.napoleon',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'aiida': ('https://aiida-core.readthedocs.io/en/latest', None),
}
nitpick_ignore = [('py:obj', 'module')]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#~ master_doc = 'index'
master_doc = 'index'
# General information about the project.
project = u'aiida-nwchem'
copyright_first_year = "2021"
copyright_owners = "The AiiDA Team"
current_year = str(time.localtime().tm_year)
copyright_year_string = current_year if current_year == copyright_first_year else "{}-{}".format(
copyright_first_year, current_year)
# pylint: disable=redefined-builtin
copyright = u'{}, {}. All rights reserved'.format(copyright_year_string,
copyright_owners)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = aiida_nwchem.__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# exclude_patterns = ['doc.rst']
#~ exclude_patterns = ['index.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#~ html_theme = 'basicstrap'
## SET BELOW
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'display_version': True,
}
# Add any paths that contain custom themes here, relative to this directory.
#~ html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = "images/.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#~ html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://aiida-nwchem.readthedocs.io'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiida-nwchem-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# NOTE: Diabling API docs
# def run_apidoc(_):
# """Runs sphinx-apidoc when building the documentation.
# Needs to be done in conf.py in order to include the APIdoc in the
# build on readthedocs.
# See also https://github.com/rtfd/readthedocs.org/issues/1139
# """
# source_dir = os.path.abspath(os.path.dirname(__file__))
# apidoc_dir = os.path.join(source_dir, 'apidoc')
# package_dir = os.path.join(source_dir, os.pardir, os.pardir, 'aiida_nwchem')
# # In #1139, they suggest the route below, but this ended up
# # calling sphinx-build, not sphinx-apidoc
# #from sphinx.apidoc import main
# #main([None, '-e', '-o', apidoc_dir, package_dir, '--force'])
# import subprocess
# cmd_path = 'sphinx-apidoc'
# if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# # If we are, assemble the path manually
# cmd_path = os.path.abspath(
# os.path.join(sys.prefix, 'bin', 'sphinx-apidoc'))
# options = [
# '-o',
# apidoc_dir,
# package_dir,
# '--private',
# '--force',
# '--no-toc',
# ]
# # See https://stackoverflow.com/a/30144019
# env = os.environ.copy()
# env["SPHINX_APIDOC_OPTIONS"] = 'members,special-members,private-members,undoc-members,show-inheritance'
# subprocess.check_call([cmd_path] + options, env=env)
# def setup(app):
# app.connect('builder-inited', run_apidoc)
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# ]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# ]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Warnings to ignore when using the -n (nitpicky) option
# We should ignore any python built-in exception, for instance
nitpick_ignore = []
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
service/user_openid_service.go
|
package service
import (
"Miniprogram-server-Golang/model"
"Miniprogram-server-Golang/serializer"
"os"
"github.com/gin-gonic/gin"
"github.com/medivhzhan/weapp/v2"
)
// 获取用户token服务
type UserOpenIDService struct {
Code string `form:"code" json:"code"`
}
// 用户登录函数,获取openid和sessionkey,作为之后操作的验证
func (service *UserOpenIDService) GetCode(c *gin.Context) serializer.Response {
res, err := weapp.Login(os.Getenv("APP_ID"), os.Getenv("APP_SECRET"), service.Code)
if err != nil {
//处理错误
return serializer.ParamErr("获取openid失败", err)
}
if err := res.GetResponseError(); err != nil {
//处理小程序传送的错误信息
return serializer.ParamErr("小程序报错", err)
}
//查看数据库中是否已有token信息
var wid int64
var token string
//err = model.DB2.QueryRow("select wid from wx_mp_user where wid = ?", UID).Scan(&wid)
err = model.DB2.QueryRow("select wid, token from wx_mp_user where openid = ?", res.OpenID).
Scan(&wid, &token)
if err != nil {
//如果没有,重新存入并返回
result, err2 := model.DB2.Exec("insert into wx_mp_user(openid, token) values(?,?)", res.OpenID, res.SessionKey)
var err3 error
wid, err3 = result.LastInsertId()
if err2 != nil || err3 != nil {
return serializer.Err(1008, "获取请求失败,请退出重试", nil)
}
}
return serializer.BuildStatusResponse(token, wid, 1, 0)
}
|
[
"\"APP_ID\"",
"\"APP_SECRET\""
] |
[] |
[
"APP_ID",
"APP_SECRET"
] |
[]
|
["APP_ID", "APP_SECRET"]
|
go
| 2 | 0 | |
chain/store/store.go
|
package store
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"io"
"os"
"strconv"
"sync"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/minio/blake2b-simd"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/journal"
bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/metrics"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"go.uber.org/multierr"
"github.com/filecoin-project/lotus/chain/types"
lru "github.com/hashicorp/golang-lru"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
car "github.com/ipld/go-car"
carutil "github.com/ipld/go-car/util"
cbg "github.com/whyrusleeping/cbor-gen"
pubsub "github.com/whyrusleeping/pubsub"
"golang.org/x/xerrors"
)
var log = logging.Logger("chainstore")
var chainHeadKey = dstore.NewKey("head")
var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
var DefaultTipSetCacheSize = 8192
var DefaultMsgMetaCacheSize = 2048
func init() {
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
tscs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err)
}
DefaultTipSetCacheSize = tscs
}
if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" {
mmcs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err)
}
DefaultMsgMetaCacheSize = mmcs
}
}
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee func(rev, app []*types.TipSet) error
// Journal event types.
const (
evtTypeHeadChange = iota
)
type HeadChangeEvt struct {
From types.TipSetKey
FromHeight abi.ChainEpoch
To types.TipSetKey
ToHeight abi.ChainEpoch
RevertCount int
ApplyCount int
}
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
// latest head tipset references) being tracked in the Datastore (key-value
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct {
bs bstore.Blockstore
ds dstore.Datastore
heaviestLk sync.Mutex
heaviest *types.TipSet
bestTips *pubsub.PubSub
pubLk sync.Mutex
tstLk sync.Mutex
tipsets map[abi.ChainEpoch][]cid.Cid
cindex *ChainIndex
reorgCh chan<- reorg
reorgNotifeeCh chan ReorgNotifee
mmCache *lru.ARCCache
tsCache *lru.ARCCache
vmcalls vm.SyscallBuilder
evtTypes [1]journal.EventType
}
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
cs := &ChainStore{
bs: bs,
ds: ds,
bestTips: pubsub.New(64),
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
mmCache: c,
tsCache: tsc,
vmcalls: vmcalls,
}
cs.evtTypes = [1]journal.EventType{
evtTypeHeadChange: journal.J.RegisterEventType("sync", "head_change"),
}
ci := NewChainIndex(cs.LoadTipSet)
cs.cindex = ci
hcnf := func(rev, app []*types.TipSet) error {
cs.pubLk.Lock()
defer cs.pubLk.Unlock()
notif := make([]*api.HeadChange, len(rev)+len(app))
for i, r := range rev {
notif[i] = &api.HeadChange{
Type: HCRevert,
Val: r,
}
}
for i, r := range app {
notif[i+len(rev)] = &api.HeadChange{
Type: HCApply,
Val: r,
}
}
cs.bestTips.Pub(notif, "headchange")
return nil
}
hcmetric := func(rev, app []*types.TipSet) error {
ctx := context.Background()
for _, r := range app {
stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height())))
}
return nil
}
cs.reorgNotifeeCh = make(chan ReorgNotifee)
cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric})
return cs
}
func (cs *ChainStore) Load() error {
head, err := cs.ds.Get(chainHeadKey)
if err == dstore.ErrNotFound {
log.Warn("no previous chain state found")
return nil
}
if err != nil {
return xerrors.Errorf("failed to load chain state from datastore: %w", err)
}
var tscids []cid.Cid
if err := json.Unmarshal(head, &tscids); err != nil {
return xerrors.Errorf("failed to unmarshal stored chain head: %w", err)
}
ts, err := cs.LoadTipSet(types.NewTipSetKey(tscids...))
if err != nil {
return xerrors.Errorf("loading tipset: %w", err)
}
cs.heaviest = ts
return nil
}
func (cs *ChainStore) writeHead(ts *types.TipSet) error {
data, err := json.Marshal(ts.Cids())
if err != nil {
return xerrors.Errorf("failed to marshal tipset: %w", err)
}
if err := cs.ds.Put(chainHeadKey, data); err != nil {
return xerrors.Errorf("failed to write chain head to datastore: %w", err)
}
return nil
}
const (
HCRevert = "revert"
HCApply = "apply"
HCCurrent = "current"
)
func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange {
cs.pubLk.Lock()
subch := cs.bestTips.Sub("headchange")
head := cs.GetHeaviestTipSet()
cs.pubLk.Unlock()
out := make(chan []*api.HeadChange, 16)
out <- []*api.HeadChange{{
Type: HCCurrent,
Val: head,
}}
go func() {
defer close(out)
var unsubOnce sync.Once
for {
select {
case val, ok := <-subch:
if !ok {
log.Warn("chain head sub exit loop")
return
}
if len(out) > 0 {
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
}
select {
case out <- val.([]*api.HeadChange):
case <-ctx.Done():
}
case <-ctx.Done():
unsubOnce.Do(func() {
go cs.bestTips.Unsub(subch)
})
}
}
}()
return out
}
func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
cs.reorgNotifeeCh <- f
}
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
return cs.ds.Has(key)
}
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
if err := cs.ds.Put(key, []byte{0}); err != nil {
return xerrors.Errorf("cache block validation: %w", err)
}
return nil
}
func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
return err
}
if err := cs.PutTipSet(context.TODO(), ts); err != nil {
return err
}
return cs.ds.Put(dstore.NewKey("0"), b.Cid().Bytes())
}
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
for _, b := range ts.Blocks() {
if err := cs.PersistBlockHeaders(b); err != nil {
return err
}
}
expanded, err := cs.expandTipset(ts.Blocks()[0])
if err != nil {
return xerrors.Errorf("errored while expanding tipset: %w", err)
}
log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
}
return nil
}
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
// head.
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
w, err := cs.Weight(ctx, ts)
if err != nil {
return err
}
heaviestW, err := cs.Weight(ctx, cs.heaviest)
if err != nil {
return err
}
if w.GreaterThan(heaviestW) {
// TODO: don't do this for initial sync. Now that we don't have a
// difference between 'bootstrap sync' and 'caught up' sync, we need
// some other heuristic.
return cs.takeHeaviestTipSet(ctx, ts)
}
return nil
}
type reorg struct {
old *types.TipSet
new *types.TipSet
}
func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg {
out := make(chan reorg, 32)
notifees := make([]ReorgNotifee, len(initialNotifees))
copy(notifees, initialNotifees)
go func() {
defer log.Warn("reorgWorker quit")
for {
select {
case n := <-cs.reorgNotifeeCh:
notifees = append(notifees, n)
case r := <-out:
revert, apply, err := cs.ReorgOps(r.old, r.new)
if err != nil {
log.Error("computing reorg ops failed: ", err)
continue
}
journal.J.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
return HeadChangeEvt{
From: r.old.Key(),
FromHeight: r.old.Height(),
To: r.new.Key(),
ToHeight: r.new.Height(),
RevertCount: len(revert),
ApplyCount: len(apply),
}
})
// reverse the apply array
for i := len(apply)/2 - 1; i >= 0; i-- {
opp := len(apply) - 1 - i
apply[i], apply[opp] = apply[opp], apply[i]
}
for _, hcf := range notifees {
if err := hcf(revert, apply); err != nil {
log.Error("head change func errored (BAD): ", err)
}
}
case <-ctx.Done():
return
}
}
}()
return out
}
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
// memory and in the ChainStore. It also sends a notification to deliver to
// ReorgNotifees.
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End()
if cs.heaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
}
cs.reorgCh <- reorg{
old: cs.heaviest,
new: ts,
}
} else {
log.Warnf("no heaviest tipset found, using %s", ts.Cids())
}
span.AddAttributes(trace.BoolAttribute("newHead", true))
log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height())
cs.heaviest = ts
if err := cs.writeHead(ts); err != nil {
log.Errorf("failed to write chain head: %s", err)
return nil
}
return nil
}
// SetHead sets the chainstores current 'best' head node.
// This should only be called if something is broken and needs fixing
func (cs *ChainStore) SetHead(ts *types.TipSet) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
return cs.takeHeaviestTipSet(context.TODO(), ts)
}
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
for _, c := range ts.Cids() {
has, err := cs.bs.Has(c)
if err != nil {
return false, err
}
if !has {
return false, nil
}
}
return true, nil
}
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) {
sb, err := cs.bs.Get(c)
if err != nil {
return nil, err
}
return types.DecodeBlock(sb.RawData())
}
func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
v, ok := cs.tsCache.Get(tsk)
if ok {
return v.(*types.TipSet), nil
}
var blks []*types.BlockHeader
for _, c := range tsk.Cids() {
b, err := cs.GetBlock(c)
if err != nil {
return nil, xerrors.Errorf("get block %s: %w", c, err)
}
blks = append(blks, b)
}
ts, err := types.NewTipSet(blks)
if err != nil {
return nil, err
}
cs.tsCache.Add(tsk, ts)
return ts, nil
}
// IsAncestorOf returns true if 'a' is an ancestor of 'b'
func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) {
if b.Height() <= a.Height() {
return false, nil
}
cur := b
for !a.Equals(cur) && cur.Height() > a.Height() {
next, err := cs.LoadTipSet(cur.Parents())
if err != nil {
return false, err
}
cur = next
}
return cur.Equals(a), nil
}
func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, error) {
l, _, err := cs.ReorgOps(a, b)
if err != nil {
return nil, err
}
return cs.LoadTipSet(l[len(l)-1].Parents())
}
func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
return ReorgOps(cs.LoadTipSet, a, b)
}
func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
left := a
right := b
var leftChain, rightChain []*types.TipSet
for !left.Equals(right) {
if left.Height() > right.Height() {
leftChain = append(leftChain, left)
par, err := lts(left.Parents())
if err != nil {
return nil, nil, err
}
left = par
} else {
rightChain = append(rightChain, right)
par, err := lts(right.Parents())
if err != nil {
log.Infof("failed to fetch right.Parents: %s", err)
return nil, nil, err
}
right = par
}
}
return leftChain, rightChain, nil
}
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
return cs.heaviest
}
func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error {
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
tss := cs.tipsets[b.Height]
for _, oc := range tss {
if oc == b.Cid() {
log.Debug("tried to add block to tipset tracker that was already there")
return nil
}
}
cs.tipsets[b.Height] = append(tss, b.Cid())
// TODO: do we want to look for slashable submissions here? might as well...
return nil
}
func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error {
sbs := make([]block.Block, len(b))
for i, header := range b {
var err error
sbs[i], err = header.ToStorageBlock()
if err != nil {
return err
}
}
batchSize := 256
calls := len(b) / batchSize
var err error
for i := 0; i <= calls; i++ {
start := batchSize * i
end := start + batchSize
if end > len(b) {
end = len(b)
}
err = multierr.Append(err, cs.bs.PutMany(sbs[start:end]))
}
return err
}
type storable interface {
ToStorageBlock() (block.Block, error)
}
func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) {
b, err := m.ToStorageBlock()
if err != nil {
return cid.Undef, err
}
if err := bs.Put(b); err != nil {
return cid.Undef, err
}
return b.Cid(), nil
}
func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) {
return PutMessage(cs.bs, m)
}
func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) {
// Hold lock for the whole function for now, if it becomes a problem we can
// fix pretty easily
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
all := []*types.BlockHeader{b}
tsets, ok := cs.tipsets[b.Height]
if !ok {
return types.NewTipSet(all)
}
inclMiners := map[address.Address]bool{b.Miner: true}
for _, bhc := range tsets {
if bhc == b.Cid() {
continue
}
h, err := cs.GetBlock(bhc)
if err != nil {
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
if inclMiners[h.Miner] {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache", h.Miner, h.Height)
continue
}
if types.CidArrsEqual(h.Parents, b.Parents) {
all = append(all, h)
inclMiners[h.Miner] = true
}
}
// TODO: other validation...?
return types.NewTipSet(all)
}
func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error {
if err := cs.PersistBlockHeaders(b); err != nil {
return err
}
ts, err := cs.expandTipset(b)
if err != nil {
return err
}
if err := cs.MaybeTakeHeavierTipSet(ctx, ts); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed: %w", err)
}
return nil
}
func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) {
data, err := cs.ds.Get(dstore.NewKey("0"))
if err != nil {
return nil, err
}
c, err := cid.Cast(data)
if err != nil {
return nil, err
}
genb, err := cs.bs.Get(c)
if err != nil {
return nil, err
}
return types.DecodeBlock(genb.RawData())
}
func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) {
m, err := cs.GetMessage(c)
if err == nil {
return m, nil
}
if err != bstore.ErrNotFound {
log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err)
}
return cs.GetSignedMessage(c)
}
func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) {
sb, err := cs.bs.Get(c)
if err != nil {
log.Errorf("get message get failed: %s: %s", c, err)
return nil, err
}
return types.DecodeMessage(sb.RawData())
}
func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) {
sb, err := cs.bs.Get(c)
if err != nil {
log.Errorf("get message get failed: %s: %s", c, err)
return nil, err
}
return types.DecodeSignedMessage(sb.RawData())
}
func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) {
ctx := context.TODO()
a, err := adt.AsArray(cs.Store(ctx), root)
if err != nil {
return nil, xerrors.Errorf("amt load: %w", err)
}
var (
cids []cid.Cid
cborCid cbg.CborCid
)
if err := a.ForEach(&cborCid, func(i int64) error {
c := cid.Cid(cborCid)
cids = append(cids, c)
return nil
}); err != nil {
return nil, xerrors.Errorf("failed to traverse amt: %w", err)
}
if uint64(len(cids)) != a.Length() {
return nil, xerrors.Errorf("found %d cids, expected %d", len(cids), a.Length())
}
return cids, nil
}
type BlockMessages struct {
Miner address.Address
BlsMessages []types.ChainMsg
SecpkMessages []types.ChainMsg
WinCount int64
}
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
applied := make(map[address.Address]uint64)
selectMsg := func(m *types.Message) (bool, error) {
// The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise
if _, ok := applied[m.From]; !ok {
applied[m.From] = m.Nonce
}
if applied[m.From] != m.Nonce {
return false, nil
}
applied[m.From]++
return true, nil
}
var out []BlockMessages
for _, b := range ts.Blocks() {
bms, sms, err := cs.MessagesForBlock(b)
if err != nil {
return nil, xerrors.Errorf("failed to get messages for block: %w", err)
}
bm := BlockMessages{
Miner: b.Miner,
BlsMessages: make([]types.ChainMsg, 0, len(bms)),
SecpkMessages: make([]types.ChainMsg, 0, len(sms)),
WinCount: b.ElectionProof.WinCount,
}
for _, bmsg := range bms {
b, err := selectMsg(bmsg.VMMessage())
if err != nil {
return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err)
}
if b {
bm.BlsMessages = append(bm.BlsMessages, bmsg)
}
}
for _, smsg := range sms {
b, err := selectMsg(smsg.VMMessage())
if err != nil {
return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err)
}
if b {
bm.SecpkMessages = append(bm.SecpkMessages, smsg)
}
}
out = append(out, bm)
}
return out, nil
}
func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) {
bmsgs, err := cs.BlockMsgsForTipset(ts)
if err != nil {
return nil, err
}
var out []types.ChainMsg
for _, bm := range bmsgs {
for _, blsm := range bm.BlsMessages {
out = append(out, blsm)
}
for _, secm := range bm.SecpkMessages {
out = append(out, secm)
}
}
return out, nil
}
type mmCids struct {
bls []cid.Cid
secpk []cid.Cid
}
func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) {
o, ok := cs.mmCache.Get(mmc)
if ok {
mmcids := o.(*mmCids)
return mmcids.bls, mmcids.secpk, nil
}
cst := cbor.NewCborStore(cs.bs)
var msgmeta types.MsgMeta
if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil {
return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err)
}
blscids, err := cs.readAMTCids(msgmeta.BlsMessages)
if err != nil {
return nil, nil, xerrors.Errorf("loading bls message cids for block: %w", err)
}
secpkcids, err := cs.readAMTCids(msgmeta.SecpkMessages)
if err != nil {
return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err)
}
cs.mmCache.Add(mmc, &mmCids{
bls: blscids,
secpk: secpkcids,
})
return blscids, secpkcids, nil
}
func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
fts, err := cs.LoadTipSet(from)
if err != nil {
return nil, xerrors.Errorf("loading from tipset %s: %w", from, err)
}
tts, err := cs.LoadTipSet(to)
if err != nil {
return nil, xerrors.Errorf("loading to tipset %s: %w", to, err)
}
revert, apply, err := cs.ReorgOps(fts, tts)
if err != nil {
return nil, xerrors.Errorf("error getting tipset branches: %w", err)
}
path := make([]*api.HeadChange, len(revert)+len(apply))
for i, r := range revert {
path[i] = &api.HeadChange{Type: HCRevert, Val: r}
}
for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 {
path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]}
}
return path, nil
}
func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages)
if err != nil {
return nil, nil, err
}
blsmsgs, err := cs.LoadMessagesFromCids(blscids)
if err != nil {
return nil, nil, xerrors.Errorf("loading bls messages for block: %w", err)
}
secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids)
if err != nil {
return nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err)
}
return blsmsgs, secpkmsgs, nil
}
func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) {
ctx := context.TODO()
a, err := adt.AsArray(cs.Store(ctx), b.ParentMessageReceipts)
if err != nil {
return nil, xerrors.Errorf("amt load: %w", err)
}
var r types.MessageReceipt
if found, err := a.Get(uint64(i), &r); err != nil {
return nil, err
} else if !found {
return nil, xerrors.Errorf("failed to find receipt %d", i)
}
return &r, nil
}
func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, error) {
msgs := make([]*types.Message, 0, len(cids))
for i, c := range cids {
m, err := cs.GetMessage(c)
if err != nil {
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
}
msgs = append(msgs, m)
}
return msgs, nil
}
func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) {
msgs := make([]*types.SignedMessage, 0, len(cids))
for i, c := range cids {
m, err := cs.GetSignedMessage(c)
if err != nil {
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
}
msgs = append(msgs, m)
}
return msgs, nil
}
func (cs *ChainStore) Blockstore() bstore.Blockstore {
return cs.bs
}
func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store {
return adt.WrapStore(ctx, cbor.NewCborStore(bs))
}
func (cs *ChainStore) Store(ctx context.Context) adt.Store {
return ActorStore(ctx, cs.bs)
}
func (cs *ChainStore) VMSys() vm.SyscallBuilder {
return cs.vmcalls
}
func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) {
var out []*types.FullBlock
for _, b := range ts.Blocks() {
bmsgs, smsgs, err := cs.MessagesForBlock(b)
if err != nil {
// TODO: check for 'not found' errors, and only return nil if this
// is actually a 'not found' error
return nil, nil
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
out = append(out, fb)
}
return NewFullTipSet(out), nil
}
func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
h := blake2b.New256()
if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil {
return nil, xerrors.Errorf("deriving randomness: %w", err)
}
VRFDigest := blake2b.Sum256(rbase)
_, err := h.Write(VRFDigest[:])
if err != nil {
return nil, xerrors.Errorf("hashing VRFDigest: %w", err)
}
if err := binary.Write(h, binary.BigEndian, round); err != nil {
return nil, xerrors.Errorf("deriving randomness: %w", err)
}
_, err = h.Write(entropy)
if err != nil {
return nil, xerrors.Errorf("hashing entropy: %w", err)
}
return h.Sum(nil), nil
}
func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetBeaconRandomness")
defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...))
if err != nil {
return nil, err
}
if round > ts.Height() {
return nil, xerrors.Errorf("cannot draw randomness from the future")
}
searchHeight := round
if searchHeight < 0 {
searchHeight = 0
}
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true)
if err != nil {
return nil, err
}
be, err := cs.GetLatestBeaconEntry(randTs)
if err != nil {
return nil, err
}
// if at (or just past -- for null epochs) appropriate epoch
// or at genesis (works for negative epochs)
return DrawRandomness(be.Data, pers, round, entropy)
}
func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetChainRandomness")
defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...))
if err != nil {
return nil, err
}
if round > ts.Height() {
return nil, xerrors.Errorf("cannot draw randomness from the future")
}
searchHeight := round
if searchHeight < 0 {
searchHeight = 0
}
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true)
if err != nil {
return nil, err
}
mtb := randTs.MinTicketBlock()
// if at (or just past -- for null epochs) appropriate epoch
// or at genesis (works for negative epochs)
return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy)
}
// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given
// height. In the case that the given height is a null round, the 'prev' flag
// selects the tipset before the null round if true, and the tipset following
// the null round if false.
func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) {
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
if h > ts.Height() {
return nil, xerrors.Errorf("looking for tipset with height greater than start point")
}
if h == ts.Height() {
return ts, nil
}
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
if err != nil {
return nil, err
}
if lbts.Height() < h {
log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h)
lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h)
if err != nil {
return nil, err
}
}
if lbts.Height() == h || !prev {
return lbts, nil
}
return cs.LoadTipSet(lbts.Parents())
}
func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
if root.Prefix().Codec != cid.DagCBOR {
return in, nil
}
data, err := bs.Get(root)
if err != nil {
return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err)
}
var rerr error
err = cbg.ScanForLinks(bytes.NewReader(data.RawData()), func(c cid.Cid) {
if rerr != nil {
// No error return on ScanForLinks :(
return
}
// traversed this already...
if !walked.Visit(c) {
return
}
in = append(in, c)
var err error
in, err = recurseLinks(bs, walked, c, in)
if err != nil {
rerr = err
}
})
if err != nil {
return nil, xerrors.Errorf("scanning for links failed: %w", err)
}
return in, rerr
}
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
seen := cid.NewSet()
walked := cid.NewSet()
h := &car.CarHeader{
Roots: ts.Cids(),
Version: 1,
}
if err := car.WriteHeader(h, w); err != nil {
return xerrors.Errorf("failed to write car header: %s", err)
}
blocksToWalk := ts.Cids()
currentMinHeight := ts.Height()
walkChain := func(blk cid.Cid) error {
if !seen.Visit(blk) {
return nil
}
data, err := cs.bs.Get(blk)
if err != nil {
return xerrors.Errorf("getting block: %w", err)
}
if err := carutil.LdWrite(w, blk.Bytes(), data.RawData()); err != nil {
return xerrors.Errorf("failed to write block to car output: %w", err)
}
var b types.BlockHeader
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
}
if currentMinHeight > b.Height {
currentMinHeight = b.Height
if currentMinHeight%builtin.EpochsInDay == 0 {
log.Infow("export", "height", currentMinHeight)
}
}
var cids []cid.Cid
if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots {
mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
if err != nil {
return xerrors.Errorf("recursing messages failed: %w", err)
}
cids = mcids
}
if b.Height > 0 {
for _, p := range b.Parents {
blocksToWalk = append(blocksToWalk, p)
}
} else {
// include the genesis block
cids = append(cids, b.Parents...)
}
out := cids
if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots {
cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
if err != nil {
return xerrors.Errorf("recursing genesis state failed: %w", err)
}
out = append(out, cids...)
}
for _, c := range out {
if seen.Visit(c) {
if c.Prefix().Codec != cid.DagCBOR {
continue
}
data, err := cs.bs.Get(c)
if err != nil {
return xerrors.Errorf("writing object to car (get %s): %w", c, err)
}
if err := carutil.LdWrite(w, c.Bytes(), data.RawData()); err != nil {
return xerrors.Errorf("failed to write out car object: %w", err)
}
}
}
return nil
}
log.Infow("export started")
exportStart := build.Clock.Now()
for len(blocksToWalk) > 0 {
next := blocksToWalk[0]
blocksToWalk = blocksToWalk[1:]
if err := walkChain(next); err != nil {
return xerrors.Errorf("walk chain failed: %w", err)
}
}
log.Infow("export finished", "duration", build.Clock.Now().Sub(exportStart).Seconds())
return nil
}
func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) {
header, err := car.LoadCar(cs.Blockstore(), r)
if err != nil {
return nil, xerrors.Errorf("loadcar failed: %w", err)
}
root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...))
if err != nil {
return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err)
}
return root, nil
}
func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
next, err := cs.LoadTipSet(cur.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return &types.BeaconEntry{
Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9},
}, nil
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
type chainRand struct {
cs *ChainStore
blks []cid.Cid
}
func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand {
return &chainRand{
cs: cs,
blks: blks,
}
}
func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy)
}
func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy)
}
func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) {
if tsk.IsEmpty() {
return cs.GetHeaviestTipSet(), nil
}
return cs.LoadTipSet(tsk)
}
|
[
"\"LOTUS_CHAIN_TIPSET_CACHE\"",
"\"LOTUS_CHAIN_MSGMETA_CACHE\"",
"\"LOTUS_IGNORE_DRAND\""
] |
[] |
[
"LOTUS_CHAIN_TIPSET_CACHE",
"LOTUS_IGNORE_DRAND",
"LOTUS_CHAIN_MSGMETA_CACHE"
] |
[]
|
["LOTUS_CHAIN_TIPSET_CACHE", "LOTUS_IGNORE_DRAND", "LOTUS_CHAIN_MSGMETA_CACHE"]
|
go
| 3 | 0 | |
tests/integration/ceph_base_deploy_test.go
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"fmt"
"os"
"strings"
"time"
"testing"
"github.com/coreos/pkg/capnslog"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/tests/framework/clients"
"github.com/rook/rook/tests/framework/installer"
"github.com/rook/rook/tests/framework/utils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
const (
defaultNamespace = "default"
// UPDATE these versions when the integration test matrix changes
// These versions are for running a minimal test suite for more efficient tests across different versions of K8s
// instead of running all suites on all versions
// To run on multiple versions, add a comma separate list such as 1.16.0,1.17.0
flexDriverMinimalTestVersion = "1.14.0"
cephMasterSuiteMinimalTestVersion = "1.15.0"
multiClusterMinimalTestVersion = "1.15.0"
helmMinimalTestVersion = "1.16.0"
upgradeMinimalTestVersion = "1.17.0"
smokeSuiteMinimalTestVersion = "1.18.0"
)
var (
logger = capnslog.NewPackageLogger("github.com/rook/rook", "integrationTest")
)
// Test to make sure all rook components are installed and Running
func checkIfRookClusterIsInstalled(s suite.Suite, k8sh *utils.K8sHelper, opNamespace, clusterNamespace string, mons int) {
logger.Infof("Make sure all Pods in Rook Cluster %s are running", clusterNamespace)
assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-operator", opNamespace, 1, "Running"),
"Make sure there is 1 rook-operator present in Running state")
assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-mgr", clusterNamespace, 1, "Running"),
"Make sure there is 1 rook-ceph-mgr present in Running state")
assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-osd", clusterNamespace, 1, "Running"),
"Make sure there is at lest 1 rook-ceph-osd present in Running state")
assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-mon", clusterNamespace, mons, "Running"),
fmt.Sprintf("Make sure there are %d rook-ceph-mon present in Running state", mons))
assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-crashcollector", clusterNamespace, 1, "Running"),
"Make sure there is at lest 1 rook-ceph-crash present in Running state")
}
func checkIfRookClusterIsHealthy(s suite.Suite, testClient *clients.TestClient, clusterNamespace string) {
logger.Infof("Testing cluster %s health", clusterNamespace)
var err error
retryCount := 0
for retryCount < utils.RetryLoop {
healthy, err := clients.IsClusterHealthy(testClient, clusterNamespace)
if healthy {
logger.Infof("cluster %s is healthy", clusterNamespace)
return
}
retryCount++
logger.Infof("waiting for cluster %s to become healthy. err: %+v", clusterNamespace, err)
<-time.After(time.Duration(utils.RetryInterval) * time.Second)
}
require.Nil(s.T(), err)
}
func HandlePanics(r interface{}, op installer.TestSuite, t func() *testing.T) {
if r != nil {
logger.Infof("unexpected panic occurred during test %s, --> %v", t().Name(), r)
t().Fail()
op.Teardown()
t().FailNow()
}
}
// TestCluster struct for handling panic and test suite tear down
type TestCluster struct {
installer *installer.CephInstaller
kh *utils.K8sHelper
helper *clients.TestClient
T func() *testing.T
clusterName string
namespace string
storeType string
storageClassName string
useHelm bool
usePVC bool
mons int
rbdMirrorWorkers int
rookCephCleanup bool
skipOSDCreation bool
minimalMatrixK8sVersion string
rookVersion string
cephVersion cephv1.CephVersionSpec
}
func checkIfShouldRunForMinimalTestMatrix(t func() *testing.T, k8sh *utils.K8sHelper, version string) {
testArgs := os.Getenv("TEST_ARGUMENTS")
if !strings.Contains(testArgs, "min-test-matrix") {
logger.Infof("running all tests")
return
}
versions := strings.Split(version, ",")
logger.Infof("checking if tests are running on k8s %q", version)
matchedVersion := false
kubeVersion := ""
for _, v := range versions {
kubeVersion, matchedVersion = k8sh.VersionMinorMatches(v)
if matchedVersion {
break
}
}
if !matchedVersion {
logger.Infof("Skipping test suite since kube version %q does not match", kubeVersion)
t().Skip()
}
logger.Infof("Running test suite since kube version is %q", kubeVersion)
}
// StartTestCluster creates new instance of TestCluster struct
func StartTestCluster(t func() *testing.T, cluster *TestCluster) (*TestCluster, *utils.K8sHelper) {
kh, err := utils.CreateK8sHelper(t)
require.NoError(t(), err)
checkIfShouldRunForMinimalTestMatrix(t, kh, cluster.minimalMatrixK8sVersion)
cluster.installer = installer.NewCephInstaller(t, kh.Clientset, cluster.useHelm, cluster.clusterName, cluster.rookVersion, cluster.cephVersion, cluster.rookCephCleanup)
cluster.kh = kh
cluster.helper = nil
cluster.T = t
if cluster.rookVersion != installer.VersionMaster {
// make sure we have the images from a previous release locally so the test doesn't hit a timeout
assert.NoError(t(), kh.GetDockerImage("rook/ceph:"+cluster.rookVersion))
}
assert.NoError(t(), kh.GetDockerImage(cluster.cephVersion.Image))
cluster.Setup()
return cluster, kh
}
// Setup is a wrapper for setting up rook
func (op *TestCluster) Setup() {
// Turn on DEBUG logging
capnslog.SetGlobalLogLevel(capnslog.DEBUG)
isRookInstalled, err := op.installer.InstallRook(op.namespace, op.storeType, op.usePVC, op.storageClassName,
cephv1.MonSpec{Count: op.mons, AllowMultiplePerNode: true}, false /* startWithAllNodes */, op.rbdMirrorWorkers, op.skipOSDCreation, op.rookVersion)
if !isRookInstalled || err != nil {
logger.Errorf("Rook was not installed successfully: %v", err)
if !op.installer.T().Failed() {
op.installer.GatherAllRookLogs(op.installer.T().Name(), op.namespace, installer.SystemNamespace(op.namespace))
}
op.T().Fail()
op.Teardown()
op.T().FailNow()
}
}
// SetInstallData updates the installer helper based on the version of Rook desired
func (op *TestCluster) SetInstallData(version string) {}
// Teardown is a wrapper for tearDown after Suite
func (op *TestCluster) Teardown() {
op.installer.UninstallRook(op.namespace)
}
|
[
"\"TEST_ARGUMENTS\""
] |
[] |
[
"TEST_ARGUMENTS"
] |
[]
|
["TEST_ARGUMENTS"]
|
go
| 1 | 0 | |
mesonbuild/mesonlib.py
|
# Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
from pathlib import Path
import sys
import stat
import time
import platform, subprocess, operator, os, shlex, shutil, re
import collections
from enum import Enum
from functools import lru_cache, update_wrapper
from itertools import tee, filterfalse
import typing as T
import uuid
from mesonbuild import mlog
_T = T.TypeVar('_T')
_U = T.TypeVar('_U')
have_fcntl = False
have_msvcrt = False
# {subproject: project_meson_version}
project_meson_versions = {}
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
# In Windows and using the MSI installed executable.
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
GIT = shutil.which('git')
def git(cmd: T.List[str], workingdir: str, **kwargs) -> subprocess.CompletedProcess:
pc = subprocess.run([GIT, '-C', workingdir] + cmd,
# Redirect stdin to DEVNULL otherwise git messes up the
# console and ANSI colors stop working on Windows.
stdin=subprocess.DEVNULL, **kwargs)
# Sometimes git calls git recursively, such as `git submodule update
# --recursive` which will be without the above workaround, so set the
# console mode again just in case.
mlog.setup_console()
return pc
def set_meson_command(mainfile):
global python_command
global meson_command
# On UNIX-like systems `meson` is a Python script
# On Windows `meson` and `meson.exe` are wrapper exes
if not mainfile.endswith('.py'):
meson_command = [mainfile]
elif os.path.isabs(mainfile) and mainfile.endswith('mesonmain.py'):
# Can't actually run meson with an absolute path to mesonmain.py, it must be run as -m mesonbuild.mesonmain
meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
else:
# Either run uninstalled, or full path to meson-script.py
meson_command = python_command + [mainfile]
# We print this value for unit tests.
if 'MESON_COMMAND_TESTS' in os.environ:
mlog.log('meson_command is {!r}'.format(meson_command))
def is_ascii_string(astring) -> bool:
try:
if isinstance(astring, str):
astring.encode('ascii')
elif isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array):
import locale
# Warn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_windows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning('''You are using {!r} which is not a Unicode-compatible '
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class MesonException(Exception):
'''Exceptions thrown by Meson'''
def get_msg_with_context(self):
s = ''
if hasattr(self, 'lineno') and hasattr(self, 'file'):
s = get_error_location_string(self.file, self.lineno) + ' '
s += str(self)
return s
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms=None, owner=None, group=None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self):
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s):
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built: bool, subdir: str, fname: str):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
assert(isinstance(self.subdir, str))
assert(isinstance(self.fname, str))
def __str__(self) -> str:
return self.relative_name()
def __repr__(self) -> str:
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
@lru_cache(maxsize=None)
def from_source_file(source_root: str, subdir: str, fname: str):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir: str, fname: str):
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname: str):
return File(False, '', fname)
@lru_cache(maxsize=None)
def rel_to_builddir(self, build_to_src: str) -> str:
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
@lru_cache(maxsize=None)
def absolute_path(self, srcdir: str, builddir: str) -> str:
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending: str) -> bool:
return self.fname.endswith(ending)
def split(self, s: str) -> T.List[str]:
return self.fname.split(s)
def __eq__(self, other) -> bool:
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self) -> int:
return hash((self.fname, self.subdir, self.is_built))
@lru_cache(maxsize=None)
def relative_name(self) -> str:
return os.path.join(self.subdir, self.fname)
def get_compiler_for_source(compilers, src):
for comp in compilers:
if comp.can_compile(src):
return comp
raise MesonException('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers, sources):
compsrclist = {}
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
class OrderedEnum(Enum):
"""
An Enum which additionally offers homogeneous ordered comparison.
"""
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class MachineChoice(OrderedEnum):
"""Enum class representing one of the two abstract machine names used in
most places: the build, and host, machines.
"""
BUILD = 0
HOST = 1
def get_lower_case_name(self):
return PerMachine('build', 'host')[self]
def get_prefix(self):
return PerMachine('build.', '')[self]
class PerMachine(T.Generic[_T]):
def __init__(self, build: _T, host: _T):
self.build = build
self.host = host
def __getitem__(self, machine: MachineChoice) -> _T:
return {
MachineChoice.BUILD: self.build,
MachineChoice.HOST: self.host,
}[machine]
def __setitem__(self, machine: MachineChoice, val: _T) -> None:
setattr(self, machine.get_lower_case_name(), val)
def miss_defaulting(self) -> "PerMachineDefaultable[T.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerMachineDefaultable() # type: PerMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
class PerThreeMachine(PerMachine[_T]):
"""Like `PerMachine` but includes `target` too.
It turns out just one thing do we need track the target machine. There's no
need to computer the `target` field so we don't bother overriding the
`__getitem__`/`__setitem__` methods.
"""
def __init__(self, build: _T, host: _T, target: _T):
super().__init__(build, host)
self.target = target
def miss_defaulting(self) -> "PerThreeMachineDefaultable[T.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerThreeMachineDefaultable() # type: PerThreeMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
unfreeze.target = self.target
if unfreeze.target == unfreeze.host:
unfreeze.target = None
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def matches_build_machine(self, machine: MachineChoice) -> bool:
return self.build == self[machine]
class PerMachineDefaultable(PerMachine[T.Optional[_T]]):
"""Extends `PerMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
super().__init__(None, None)
def default_missing(self) -> "PerMachine[T.Optional[_T]]":
"""Default host to build
This allows just specifying nothing in the native case, and just host in the
cross non-compiler case.
"""
freeze = PerMachine(self.build, self.host)
if freeze.host is None:
freeze.host = freeze.build
return freeze
class PerThreeMachineDefaultable(PerMachineDefaultable, PerThreeMachine[T.Optional[_T]]):
"""Extends `PerThreeMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
PerThreeMachine.__init__(self, None, None, None)
def default_missing(self) -> "PerThreeMachine[T.Optional[_T]]":
"""Default host to build and target to host.
This allows just specifying nothing in the native case, just host in the
cross non-compiler case, and just target in the native-built
cross-compiler case.
"""
freeze = PerThreeMachine(self.build, self.host, self.target)
if freeze.host is None:
freeze.host = freeze.build
if freeze.target is None:
freeze.target = freeze.host
return freeze
def is_sunos() -> bool:
return platform.system().lower() == 'sunos'
def is_osx() -> bool:
return platform.system().lower() == 'darwin'
def is_linux() -> bool:
return platform.system().lower() == 'linux'
def is_android() -> bool:
return platform.system().lower() == 'android'
def is_haiku() -> bool:
return platform.system().lower() == 'haiku'
def is_openbsd() -> bool:
return platform.system().lower() == 'openbsd'
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin() -> bool:
return platform.system().lower().startswith('cygwin')
def is_debianlike() -> bool:
return os.path.isfile('/etc/debian_version')
def is_dragonflybsd() -> bool:
return platform.system().lower() == 'dragonfly'
def is_netbsd() -> bool:
return platform.system().lower() == 'netbsd'
def is_freebsd() -> bool:
return platform.system().lower() == 'freebsd'
def exe_exists(arglist: T.List[str]) -> bool:
try:
if subprocess.run(arglist, timeout=10).returncode == 0:
return True
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
return False
@lru_cache(maxsize=None)
def darwin_get_object_archs(objpath):
'''
For a specific object (executable, static library, dylib, etc), run `lipo`
to fetch the list of archs supported by it. Supports both thin objects and
'fat' objects.
'''
_, stdo, stderr = Popen_safe(['lipo', '-info', objpath])
if not stdo:
mlog.debug('lipo {}: {}'.format(objpath, stderr))
return None
stdo = stdo.rsplit(': ', 1)[1]
# Convert from lipo-style archs to meson-style CPUs
stdo = stdo.replace('i386', 'x86')
stdo = stdo.replace('arm64', 'aarch64')
# Add generic name for armv7 and armv7s
if 'armv7' in stdo:
stdo += ' arm'
return stdo.split()
def detect_vcs(source_dir):
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
# FIXME: this is much cleaner with pathlib.Path
segs = source_dir.replace('\\', '/').split('/')
for i in range(len(segs), -1, -1):
curdir = '/'.join(segs[:i])
for vcs in vcs_systems:
if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = curdir
return vcs
return None
# a helper class which implements the same version ordering as RPM
class Version:
def __init__(self, s):
self._s = s
# split into numeric, alphabetic and non-alphanumeric sequences
sequences = re.finditer(r'(\d+|[a-zA-Z]+|[^a-zA-Z\d]+)', s)
# non-alphanumeric separators are discarded
sequences = [m for m in sequences if not re.match(r'[^a-zA-Z\d]+', m.group(1))]
# numeric sequences are converted from strings to ints
sequences = [int(m.group(1)) if m.group(1).isdigit() else m.group(1) for m in sequences]
self._v = sequences
def __str__(self):
return '%s (V=%s)' % (self._s, str(self._v))
def __repr__(self):
return '<Version: {}>'.format(self._s)
def __lt__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.lt)
return NotImplemented
def __gt__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.gt)
return NotImplemented
def __le__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.le)
return NotImplemented
def __ge__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.ge)
return NotImplemented
def __eq__(self, other):
if isinstance(other, Version):
return self._v == other._v
return NotImplemented
def __ne__(self, other):
if isinstance(other, Version):
return self._v != other._v
return NotImplemented
def __cmp(self, other, comparator):
# compare each sequence in order
for ours, theirs in zip(self._v, other._v):
# sort a non-digit sequence before a digit sequence
ours_is_int = isinstance(ours, int)
theirs_is_int = isinstance(theirs, int)
if ours_is_int != theirs_is_int:
return comparator(ours_is_int, theirs_is_int)
if ours != theirs:
return comparator(ours, theirs)
# if equal length, all components have matched, so equal
# otherwise, the version with a suffix remaining is greater
return comparator(len(self._v), len(other._v))
def _version_extract_cmpop(vstr2: str) -> T.Tuple[T.Callable[[T.Any, T.Any], bool], str]:
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
return (cmpop, vstr2)
def version_compare(vstr1: str, vstr2: str) -> bool:
(cmpop, vstr2) = _version_extract_cmpop(vstr2)
return cmpop(Version(vstr1), Version(vstr2))
def version_compare_many(vstr1, conditions):
if not isinstance(conditions, (list, tuple, frozenset)):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
# determine if the minimum version satisfying the condition |condition| exceeds
# the minimum version for a feature |minimum|
def version_compare_condition_with_min(condition: str, minimum: str) -> bool:
if condition.startswith('>='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('<='):
return False
elif condition.startswith('!='):
return False
elif condition.startswith('=='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.le
condition = condition[1:]
elif condition.startswith('>'):
cmpop = operator.lt
condition = condition[1:]
elif condition.startswith('<'):
return False
else:
cmpop = operator.le
# Declaring a project(meson_version: '>=0.46') and then using features in
# 0.46.0 is valid, because (knowing the meson versioning scheme) '0.46.0' is
# the lowest version which satisfies the constraint '>=0.46'.
#
# But this will fail here, because the minimum version required by the
# version constraint ('0.46') is strictly less (in our version comparison)
# than the minimum version needed for the feature ('0.46.0').
#
# Map versions in the constraint of the form '0.46' to '0.46.0', to embed
# this knowledge of the meson versioning scheme.
condition = condition.strip()
if re.match(r'^\d+.\d+$', condition):
condition += '.0'
return cmpop(Version(minimum), Version(condition))
def default_libdir():
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if is_freebsd():
return 'lib'
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir():
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix():
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs() -> T.List[str]:
if is_windows():
return ['C:/mingw/lib'] # TODO: get programmatically
if is_osx():
return ['/usr/lib'] # TODO: get programmatically
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
if is_freebsd():
return unixdirs
# FIXME: this needs to be further genericized for aarch64 etc.
machine = platform.machine()
if machine in ('i386', 'i486', 'i586', 'i686'):
plat = 'i386'
elif machine.startswith('arm'):
plat = 'arm'
else:
plat = ''
# Solaris puts 32-bit libraries in the main /lib & /usr/lib directories
# and 64-bit libraries in platform specific subdirectories.
if is_sunos():
if machine == 'i86pc':
plat = 'amd64'
elif machine.startswith('sun4'):
plat = 'sparcv9'
usr_platdir = Path('/usr/lib/') / plat
if usr_platdir.is_dir():
unixdirs += [str(x) for x in (usr_platdir).iterdir() if x.is_dir()]
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
lib_platdir = Path('/lib/') / plat
if lib_platdir.is_dir():
unixdirs += [str(x) for x in (lib_platdir).iterdir() if x.is_dir()]
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
return unixdirs
def has_path_sep(name, sep='/\\'):
'Checks if any of the specified @sep path separators are in @name'
for each in sep:
if each in name:
return True
return False
if is_windows():
# shlex.split is not suitable for splitting command line on Window (https://bugs.python.org/issue1724822);
# shlex.quote is similarly problematic. Below are "proper" implementations of these functions according to
# https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments and
# https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
_whitespace = ' \t\n\r'
_find_unsafe_char = re.compile(r'[{}"]'.format(_whitespace)).search
def quote_arg(arg):
if arg and not _find_unsafe_char(arg):
return arg
result = '"'
num_backslashes = 0
for c in arg:
if c == '\\':
num_backslashes += 1
else:
if c == '"':
# Escape all backslashes and the following double quotation mark
num_backslashes = num_backslashes * 2 + 1
result += num_backslashes * '\\' + c
num_backslashes = 0
# Escape all backslashes, but let the terminating double quotation
# mark we add below be interpreted as a metacharacter
result += (num_backslashes * 2) * '\\' + '"'
return result
def split_args(cmd: T.Sequence[str]) -> T.List[str]:
result = []
arg = ''
num_backslashes = 0
num_quotes = 0
in_quotes = False
for c in cmd:
if c == '\\':
num_backslashes += 1
else:
if c == '"' and not (num_backslashes % 2):
# unescaped quote, eat it
arg += (num_backslashes // 2) * '\\'
num_quotes += 1
in_quotes = not in_quotes
elif c in _whitespace and not in_quotes:
if arg or num_quotes:
# reached the end of the argument
result.append(arg)
arg = ''
num_quotes = 0
else:
if c == '"':
# escaped quote
num_backslashes = (num_backslashes - 1) // 2
arg += num_backslashes * '\\' + c
num_backslashes = 0
if arg or num_quotes:
result.append(arg)
return result
else:
def quote_arg(arg):
return shlex.quote(arg)
def split_args(cmd):
return shlex.split(cmd)
def join_args(args):
return ' '.join([quote_arg(x) for x in args])
def do_replacement(regex, line, variable_format, confdata):
missing_variables = set()
start_tag = '@'
backslash_tag = '\\@'
if variable_format == 'cmake':
start_tag = '${'
backslash_tag = '\\${'
def variable_replace(match):
# Pairs of escape characters before '@' or '\@'
if match.group(0).endswith('\\'):
num_escapes = match.end(0) - match.start(0)
return '\\' * (num_escapes // 2)
# Single escape character and '@'
elif match.group(0) == backslash_tag:
return start_tag
# Template variable to be replaced
else:
varname = match.group(1)
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
pass
elif isinstance(var, int):
var = str(var)
else:
msg = 'Tried to replace variable {!r} value with ' \
'something other than a string or int: {!r}'
raise MesonException(msg.format(varname, var))
else:
missing_variables.add(varname)
var = ''
return var
return re.sub(regex, variable_replace, line), missing_variables
def do_mesondefine(line, confdata):
arr = line.split()
if len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
return '#define %s %s\n' % (varname, v)
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def do_conf_file(src, dst, confdata, variable_format, encoding='utf-8'):
try:
with open(src, encoding=encoding, newline='') as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
if variable_format in ['meson', 'cmake@']:
regex = re.compile(r'(?:\\\\)+(?=\\?@)|\\@|@([-a-zA-Z0-9_]+)@')
elif variable_format == 'cmake':
regex = re.compile(r'(?:\\\\)+(?=\\?\$)|\\\${|\${([-a-zA-Z0-9_]+)}')
else:
raise MesonException('Format "{}" not handled'.format(variable_format))
search_token = '#mesondefine'
if variable_format != 'meson':
search_token = '#cmakedefine'
result = []
missing_variables = set()
# Detect when the configuration data is empty and no tokens were found
# during substitution so we can warn the user to use the `copy:` kwarg.
confdata_useless = not confdata.keys()
for line in data:
if line.startswith(search_token):
confdata_useless = False
line = do_mesondefine(line, confdata)
else:
line, missing = do_replacement(regex, line, variable_format, confdata)
missing_variables.update(missing)
if missing:
confdata_useless = False
result.append(line)
dst_tmp = dst + '~'
try:
with open(dst_tmp, 'w', encoding=encoding, newline='') as f:
f.writelines(result)
except Exception as e:
raise MesonException('Could not write output file %s: %s' % (dst, str(e)))
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables, confdata_useless
CONF_C_PRELUDE = '''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
'''
CONF_NASM_PRELUDE = '''; Autogenerated by the Meson build system.
; Do not edit, your changes will be lost.
'''
def dump_conf_header(ofilename, cdata, output_format):
if output_format == 'c':
prelude = CONF_C_PRELUDE
prefix = '#'
elif output_format == 'nasm':
prelude = CONF_NASM_PRELUDE
prefix = '%'
ofilename_tmp = ofilename + '~'
with open(ofilename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write(prelude)
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
if output_format == 'c':
ofile.write('/* %s */\n' % desc)
elif output_format == 'nasm':
for line in desc.split('\n'):
ofile.write('; %s\n' % line)
if isinstance(v, bool):
if v:
ofile.write('%sdefine %s\n\n' % (prefix, k))
else:
ofile.write('%sundef %s\n\n' % (prefix, k))
elif isinstance(v, (int, str)):
ofile.write('%sdefine %s %s\n\n' % (prefix, k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
replace_if_different(ofilename, ofilename_tmp)
def replace_if_different(dst, dst_tmp):
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'rb') as f1, open(dst_tmp, 'rb') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
def listify(item: T.Any,
flatten: bool = True,
unholder: bool = False) -> T.List[T.Any]:
'''
Returns a list with all args embedded in a list if they are not a list.
This function preserves order.
@flatten: Convert lists of lists to a flat list
@unholder: Replace each item with the object it holds, if required
Note: unholding only works recursively when flattening
'''
if not isinstance(item, list):
if unholder and hasattr(item, 'held_object'):
item = item.held_object
return [item]
result = []
for i in item:
if unholder and hasattr(i, 'held_object'):
i = i.held_object
if flatten and isinstance(i, list):
result += listify(i, flatten=True, unholder=unholder)
else:
result.append(i)
return result
def extract_as_list(dict_object, *keys, pop=False, **kwargs):
'''
Extracts all values from given dict_object and listifies them.
'''
result = []
fetch = dict_object.get
if pop:
fetch = dict_object.pop
# If there's only one key, we don't return a list with one element
if len(keys) == 1:
return listify(fetch(keys[0], []), **kwargs)
# Return a list of values corresponding to *keys
for key in keys:
result.append(listify(fetch(key, []), **kwargs))
return result
def typeslistify(item: 'T.Union[_T, T.List[_T]]',
types: 'T.Union[T.Type[_T], T.Tuple[T.Type[_T]]]') -> T.List[_T]:
'''
Ensure that type(@item) is one of @types or a
list of items all of which are of type @types
'''
if isinstance(item, types):
item = T.cast(T.List[_T], [item])
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item: T.Union[str, T.List[str]]) -> T.List[str]:
return typeslistify(item, str)
def expand_arguments(args):
expended_args = []
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
print('Error expanding command line arguments, %s not found' % args_file)
print(e)
return None
return expended_args
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def Popen_safe(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
import locale
encoding = locale.getpreferredencoding()
# Redirect stdin to DEVNULL otherwise the command run by us here might mess
# up the console and ANSI colors will stop working on Windows.
if 'stdin' not in kwargs:
kwargs['stdin'] = subprocess.DEVNULL
if sys.version_info < (3, 6) or not sys.stdout.encoding or encoding.upper() != 'UTF-8':
p, o, e = Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
else:
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
o, e = p.communicate(write)
# Sometimes the command that we run will call another command which will be
# without the above stdin workaround, so set the console mode again just in
# case.
mlog.log("****** process args : ", args)
mlog.setup_console()
return p, o, e
def Popen_safe_new(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
import locale
encoding = locale.getpreferredencoding()
# Redirect stdin to DEVNULL otherwise the command run by us here might mess
# up the console and ANSI colors will stop working on Windows.
if 'stdin' not in kwargs:
kwargs['stdin'] = subprocess.DEVNULL
if sys.version_info < (3, 6) or not sys.stdout.encoding or encoding.upper() != 'UTF-8':
mlog.log(" use popen_safe_legacy")
p, o, e = Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
else:
mlog.log(" use subprocess popen")
mlog.log("write : ", write)
mlog.log("args", args)
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, **kwargs)
mlog.log("output",p.stdout.readline())
mlog.log("error",p.stderr.readline())
o, e = p.communicate(write)
# Sometimes the command that we run will call another command which will be
# without the above stdin workaround, so set the console mode again just in
# case.
mlog.log("****** process args : ", args)
mlog.setup_console()
return p, o, e
def Popen_safe_legacy(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
p = subprocess.Popen(args, universal_newlines=False, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
input_ = None # type: T.Optional[bytes]
if write is not None:
input_ = write.encode('utf-8')
o, e = p.communicate(input_)
if o is not None:
if sys.stdout.encoding:
o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\r\n', '\n')
else:
o = o.decode(errors='replace').replace('\r\n', '\n')
if e is not None:
if sys.stderr.encoding:
e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\r\n', '\n')
else:
e = e.decode(errors='replace').replace('\r\n', '\n')
return p, o, e
def iter_regexin_iter(regexiter, initer):
'''
Takes each regular expression in @regexiter and tries to search for it in
every item in @initer. If there is a match, returns that match.
Else returns False.
'''
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return False
def _substitute_values_check_errors(command, values):
# Error checking
inregex = ('@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@')
outregex = ('@OUTPUT([0-9]+)?@', '@OUTDIR@')
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(inregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(outregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match.group(), len(values['@OUTPUT@'])))
def substitute_values(command, values):
'''
Substitute the template strings in the @values dict into the list of
strings @command and return a new list. For a full list of the templates,
see get_filenames_templates_dict()
If multiple inputs/outputs are given in the @values dictionary, we
substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not
just a part of it, and in that case we substitute *all* of them.
'''
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = []
rx_keys = [re.escape(key) for key in values if key not in ('@INPUT@', '@OUTPUT@')]
value_rx = re.compile('|'.join(rx_keys)) if rx_keys else None
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
elif value_rx:
outcmd.append(value_rx.sub(lambda m: values[m.group(0)], vv))
else:
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs, outputs):
'''
Create a dictionary with template strings as keys and values as values for
the following templates:
@INPUT@ - the full path to one or more input files, from @inputs
@OUTPUT@ - the full path to one or more output files, from @outputs
@OUTDIR@ - the full path to the directory containing the output files
If there is only one input file, the following keys are also created:
@PLAINNAME@ - the filename of the input file
@BASENAME@ - the filename of the input file with the extension removed
If there is more than one input file, the following keys are also created:
@INPUT0@, @INPUT1@, ... one for each input file
If there is more than one output file, the following keys are also created:
@OUTPUT0@, @OUTPUT1@, ... one for each output file
'''
values = {}
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.basename(inputs[0])
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.dirname(outputs[0])
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def _make_tree_writable(topdir):
# Ensure all files and directories under topdir are writable
# (and readable) by owner.
for d, _, files in os.walk(topdir):
os.chmod(d, os.stat(d).st_mode | stat.S_IWRITE | stat.S_IREAD)
for fname in files:
fpath = os.path.join(d, fname)
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
def windows_proof_rmtree(f):
# On Windows if anyone is holding a file open you can't
# delete it. As an example an anti virus scanner might
# be scanning files you are trying to delete. The only
# way to fix this is to try again and again.
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
# Start by making the tree wriable.
_make_tree_writable(f)
for d in delays:
try:
shutil.rmtree(f)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
# Try one last time and throw if it fails.
shutil.rmtree(f)
def windows_proof_rm(fpath):
"""Like windows_proof_rmtree, but for a single file."""
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
for d in delays:
try:
os.unlink(fpath)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
os.unlink(fpath)
def detect_subprojects(spdir_name, current_dir='', result=None):
if result is None:
result = {}
spdir = os.path.join(current_dir, spdir_name)
if not os.path.exists(spdir):
return result
for trial in glob(os.path.join(spdir, '*')):
basename = os.path.basename(trial)
if trial == 'packagecache':
continue
append_this = True
if os.path.isdir(trial):
detect_subprojects(spdir_name, trial, result)
elif trial.endswith('.wrap') and os.path.isfile(trial):
basename = os.path.splitext(basename)[0]
else:
append_this = False
if append_this:
if basename in result:
result[basename].append(trial)
else:
result[basename] = [trial]
return result
# This isn't strictly correct. What we really want here is something like:
# class StringProtocol(typing_extensions.Protocol):
#
# def __str__(self) -> str: ...
#
# This would more accurately embody what this funcitonc an handle, but we
# don't have that yet, so instead we'll do some casting to work around it
def get_error_location_string(fname: str, lineno: str) -> str:
return '{}:{}:'.format(fname, lineno)
def substring_is_in_list(substr: str, strlist: T.List[str]) -> bool:
for s in strlist:
if substr in s:
return True
return False
class OrderedSet(collections.abc.MutableSet):
"""A set that preserves the order in which items are added, by first
insertion.
"""
def __init__(self, iterable=None):
self.__container = collections.OrderedDict()
if iterable:
self.update(iterable)
def __contains__(self, value):
return value in self.__container
def __iter__(self):
return iter(self.__container.keys())
def __len__(self):
return len(self.__container)
def __repr__(self):
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def __reversed__(self):
return reversed(self.__container)
def add(self, value):
self.__container[value] = None
def discard(self, value):
if value in self.__container:
del self.__container[value]
def update(self, iterable):
for item in iterable:
self.__container[item] = None
def difference(self, set_):
return type(self)(e for e in self if e not in set_)
class BuildDirLock:
def __init__(self, builddir):
self.lockfilename = os.path.join(builddir, 'meson-private/meson.lock')
def __enter__(self):
self.lockfile = open(self.lockfilename, 'w')
try:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
except (BlockingIOError, PermissionError):
self.lockfile.close()
raise MesonException('Some other Meson process is already using this build directory. Exiting.')
def __exit__(self, *args):
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
self.lockfile.close()
def relpath(path: str, start: str) -> str:
# On Windows a relative path can't be evaluated for paths on two different
# drives (i.e. c:\foo and f:\bar). The only thing left to do is to use the
# original absolute path.
try:
return os.path.relpath(path, start)
except (TypeError, ValueError):
return path
class LibType(Enum):
"""Enumeration for library types."""
SHARED = 0
STATIC = 1
PREFER_SHARED = 2
PREFER_STATIC = 3
class ProgressBarFallback: # lgtm [py/iter-returns-non-self]
'''
Fallback progress bar implementation when tqdm is not found
Since this class is not an actual iterator, but only provides a minimal
fallback, it is safe to ignore the 'Iterator does not return self from
__iter__ method' warning.
'''
def __init__(self, iterable=None, total=None, bar_type=None, desc=None):
if iterable is not None:
self.iterable = iter(iterable)
return
self.total = total
self.done = 0
self.printed_dots = 0
if self.total and bar_type == 'download':
print('Download size:', self.total)
if desc:
print('{}: '.format(desc), end='')
# Pretend to be an iterator when called as one and don't print any
# progress
def __iter__(self):
return self.iterable
def __next__(self):
return next(self.iterable)
def print_dot(self):
print('.', end='')
sys.stdout.flush()
self.printed_dots += 1
def update(self, progress):
self.done += progress
if not self.total:
# Just print one dot per call if we don't have a total length
self.print_dot()
return
ratio = int(self.done / self.total * 10)
while self.printed_dots < ratio:
self.print_dot()
def close(self):
print('')
try:
from tqdm import tqdm
class ProgressBar(tqdm):
def __init__(self, *args, bar_type=None, **kwargs):
if bar_type == 'download':
kwargs.update({'unit': 'bytes', 'leave': True})
else:
kwargs.update({'leave': False})
kwargs['ncols'] = 100
super().__init__(*args, **kwargs)
except ImportError:
ProgressBar = ProgressBarFallback
def get_wine_shortpath(winecmd, wine_paths):
""" Get A short version of @wine_paths to avoid
reaching WINEPATH number of char limit.
"""
seen = set()
wine_paths = [p for p in wine_paths if not (p in seen or seen.add(p))]
getShortPathScript = '%s.bat' % str(uuid.uuid4()).lower()[:5]
with open(getShortPathScript, mode='w') as f:
f.write("@ECHO OFF\nfor %%x in (%*) do (\n echo|set /p=;%~sx\n)\n")
f.flush()
try:
with open(os.devnull, 'w') as stderr:
wine_path = subprocess.check_output(
winecmd +
['cmd', '/C', getShortPathScript] + wine_paths,
stderr=stderr).decode('utf-8')
except subprocess.CalledProcessError as e:
print("Could not get short paths: %s" % e)
wine_path = ';'.join(wine_paths)
finally:
os.remove(getShortPathScript)
if len(wine_path) > 2048:
raise MesonException(
'WINEPATH size {} > 2048'
' this will cause random failure.'.format(
len(wine_path)))
return wine_path.strip(';')
def run_once(func):
ret = []
def wrapper(*args, **kwargs):
if ret:
return ret[0]
val = func(*args, **kwargs)
ret.append(val)
return val
return update_wrapper(wrapper, func)
class OptionProxy:
def __init__(self, value):
self.value = value
class OptionOverrideProxy:
'''Mimic an option list but transparently override
selected option values.'''
def __init__(self, overrides, *options):
self.overrides = overrides
self.options = options
def __getitem__(self, option_name):
for opts in self.options:
if option_name in opts:
return self._get_override(option_name, opts[option_name])
raise KeyError('Option not found', option_name)
def _get_override(self, option_name, base_opt):
if option_name in self.overrides:
return OptionProxy(base_opt.validate_value(self.overrides[option_name]))
return base_opt
def copy(self):
result = {}
for opts in self.options:
for option_name in opts:
result[option_name] = self._get_override(option_name, opts[option_name])
return result
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
app/interface/main/app-interface/dao/favorite/dao_test.go
|
package favorite
import (
"context"
"flag"
"os"
"testing"
"go-common/app/interface/main/app-interface/conf"
. "github.com/smartystreets/goconvey/convey"
)
var (
dao *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.app-svr.app-interface")
flag.Set("conf_token", "1mWvdEwZHmCYGoXJCVIdszBOPVdtpXb3")
flag.Set("tree_id", "2688")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/app-interface-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
dao = New(conf.Conf)
os.Exit(m.Run())
// time.Sleep(time.Second)
}
func TestDao_Folders(t *testing.T) {
Convey("folder", t, func() {
gotFs, err := dao.Folders(context.Background(), 1, 1, "android", 0, true)
So(gotFs, ShouldNotBeEmpty)
So(err, ShouldBeNil)
})
}
func TestDao_FolderVideo(t *testing.T) {
Convey("folder video", t, func() {
gotFav, err := dao.FolderVideo(context.Background(), "", "", "", "", "", "", "", 0, 0, 1, 20, 1, 0, 1)
So(gotFav, ShouldNotBeEmpty)
So(err, ShouldBeNil)
})
}
|
[
"\"DEPLOY_ENV\""
] |
[] |
[
"DEPLOY_ENV"
] |
[]
|
["DEPLOY_ENV"]
|
go
| 1 | 0 | |
clean/config.py
|
"""Config file manager."""
import json
import os
from pathlib import Path
import click
from .updates.updator import need_update
from .updates.updator import update_config
class NoConfigFileException(Exception):
"""this exception throws If the config file found."""
pass
def is_valid_glob_path(glob_and_path):
"""Check config file is valid format."""
if 'path' not in glob_and_path:
return False
if 'glob' not in glob_and_path:
return False
return True
def get_config_path() -> Path:
"""Get config file path by environment variable or default path."""
config_file_name = '.cleanrc'
env_config_raw_path = os.getenv('CLEANRC_PATH')
if env_config_raw_path is None:
default_config_path = Path.home() / config_file_name
else:
default_config_path = Path(env_config_raw_path)
if default_config_path.is_dir():
default_config_path /= config_file_name
if not default_config_path.is_file():
raise NoConfigFileException('{}'.format(str(default_config_path)))
return default_config_path
class Config(object):
"""Config file manager class.
Returns:
Config -- config file instance
"""
def __init__(self, config_path: Path = None):
"""Initialize config class.
Keyword Arguments:
config_path {Path} -- set config file path
(default: {default_config_path})
"""
if config_path is None:
config_path = get_config_path()
self.config_path = config_path
if not self.config_path.is_file():
if self.config_path.exists():
click.echo(
'Can\'t create file. Same name something is exist. ' +
'Please check your home\'s {}.'.format(str(config_path)))
exit(1)
self._create_new_config_file()
self._load_file()
def add_glob_path(self, glob: str, path: str,
enable_meta_tag: bool = True) -> bool:
"""Add new glob path to config file."""
if self._is_contain_same_config(glob, path):
return False
self.config['path'].append({
'glob': glob,
'path': path,
'use_meta_tag': enable_meta_tag
})
self._save_file()
return True
def _is_contain_same_config(self, glob: str, path: str) -> bool:
return any(x['path'] == path and x['glob'] == glob
for x in self.config['path'])
def delete_glob_path(self, id: int) -> dict:
"""Delete registered glob and path by id.
Arguments:
id {int} -- the glob and path's id which you want to delete.
Returns:
{{'glob': string, 'path': string}|None} -- the setting you destroy.
"""
# 配列が空でないかどうかチェック
if not self.config['path']:
click.echo('There is no path settings. ' +
'Please add a path setting by "add" command.')
return None
# 配列の添え字が存在するかどうかチェック
if 0 > id:
click.echo(
'Please input 0 or positive id. The max id is {}.'.format(
len(self.config['path'])))
if len(self.config['path']) <= id:
click.echo('The id is too big. Please input 0 <= id < {}.'.format(
len(self.config['path'])))
return None
deleted_path = self.config['path'].pop(id)
self._save_file()
return deleted_path
def list_glob_path(self) -> list:
"""Return a list of path configs."""
return [i for i in self.config['path'] if is_valid_glob_path(i)]
def _save_file(self):
with self.config_path.open(mode='w', encoding='utf_8') as f:
f.write(json.dumps(self.config))
def _create_new_config_file(self):
with self.config_path.open(mode='w', encoding='utf_8') as f:
self.config = {'path': []}
f.write(json.dumps(self.config))
def get_config(self) -> dict:
"""Get config dictionary."""
return self.config
def _back_up_file(self):
with self.config_path.open(encoding='utf_8') as f:
with (self.config_path.parent /
(self.config_path.name + '.bk')).open(
mode='w', encoding='utf_8') as g:
g.write(f.read())
def _load_file(self):
with self.config_path.open(encoding='utf_8') as f:
config_text = f.read()
self.config = json.loads(config_text)
if need_update(self.config):
self._back_up_file()
update_config(self.config)
self._save_file()
|
[] |
[] |
[
"CLEANRC_PATH"
] |
[]
|
["CLEANRC_PATH"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"github.com/grupokindynos/obol/services/exchanges/birake"
"github.com/grupokindynos/obol/services/exchanges/bithumb"
"github.com/grupokindynos/obol/services/exchanges/pancake"
"net/http"
"os"
"time"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/grupokindynos/obol/controllers"
"github.com/grupokindynos/obol/models"
"github.com/grupokindynos/obol/services"
"github.com/grupokindynos/obol/services/exchanges/binance"
"github.com/grupokindynos/obol/services/exchanges/bitrue"
"github.com/grupokindynos/obol/services/exchanges/bittrex"
"github.com/grupokindynos/obol/services/exchanges/crex24"
"github.com/grupokindynos/obol/services/exchanges/graviex"
"github.com/grupokindynos/obol/services/exchanges/hitbtc"
"github.com/grupokindynos/obol/services/exchanges/kucoin"
"github.com/grupokindynos/obol/services/exchanges/lukki"
"github.com/grupokindynos/obol/services/exchanges/southxhcange"
"github.com/grupokindynos/obol/services/exchanges/stex"
_ "github.com/heroku/x/hmetrics/onload"
"github.com/joho/godotenv"
"github.com/ulule/limiter/v3"
mgin "github.com/ulule/limiter/v3/drivers/middleware/gin"
"github.com/ulule/limiter/v3/drivers/store/memory"
)
func init() {
_ = godotenv.Load()
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
App := GetApp()
err := App.Run(":" + port)
if err != nil {
panic(err)
}
}
// GetApp is used to wrap all the additions to the GIN API.
func GetApp() *gin.Engine {
App := gin.Default()
App.Use(cors.Default())
ApplyRoutes(App)
return App
}
// ApplyRoutes is used to attach all the routes to the API service.
func ApplyRoutes(r *gin.Engine) {
rateService := &services.RateSevice{
FiatRates: &models.FiatRates{
Rates: nil,
LastUpdated: time.Time{},
},
BittrexService: bittrex.InitService(),
BinanceService: binance.InitService(),
Crex24Service: crex24.InitService(),
StexService: stex.InitService(),
SouthXChangeService: southxhcange.InitService(),
KuCoinService: kucoin.InitService(),
GraviexService: graviex.InitService(),
BitrueService: bitrue.InitService(),
HitBTCService: hitbtc.InitService(),
LukkiService: lukki.InitService(),
BithumbService: bithumb.InitService(),
BirakeService: birake.InitService(),
PancakeService: pancake.InitService(),
}
err := rateService.LoadFiatRates()
if err != nil {
panic(err)
}
api := r.Group("/")
{
rate := limiter.Rate{
Period: 1 * time.Hour,
Limit: 1000,
}
store := memory.NewStore()
limiterMiddleware := mgin.NewMiddleware(limiter.New(store, rate))
api.Use(limiterMiddleware)
rateCtrl := controllers.RateController{RateService: rateService, RatesCache: make(map[string]controllers.CoinRate)}
api.GET("simple/:coin", rateCtrl.GetCoinRates)
api.GET("complex/:fromcoin/:tocoin", rateCtrl.GetCoinRateFromCoinToCoin)
api.GET("liquidity/:coin", rateCtrl.GetCoinLiquidity)
api.GET("rate/margin/:fromCoin/:toCoin", rateCtrl.GetCoinToCoinRateWithExchangeMargin)
}
r.NoRoute(func(c *gin.Context) {
c.String(http.StatusNotFound, "Not Found")
})
apiv2 := r.Group("/v2/")
{
rate := limiter.Rate{
Period: 1 * time.Hour,
Limit: 1000,
}
store := memory.NewStore()
limiterMiddleware := mgin.NewMiddleware(limiter.New(store, rate))
apiv2.Use(limiterMiddleware)
rateCtrl := controllers.RateController{RateService: rateService, RatesCache: make(map[string]controllers.CoinRate)}
apiv2.GET("simple/:coin", rateCtrl.GetCoinRatesV2)
apiv2.GET("complexfiat/:fromcoin/:tocoin", rateCtrl.GetCoinToFIATRate)
apiv2.GET("node/:coin", rateCtrl.GetNodeProvider)
}
r.NoRoute(func(c *gin.Context) {
c.String(http.StatusNotFound, "Not Found")
})
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
test/form3-account-api_test.go
|
package interview_accountapi
import (
"encoding/json"
"fmt"
uuid "github.com/satori/go.uuid"
"github.com/stretchr/testify/require"
tiny "github.com/yusufunlu/tinyclient"
"io/ioutil"
"os"
"testing"
)
type Account struct {
Data struct {
Type string `json:"type,omitempty"`
ID string `json:"id,omitempty"`
OrganisationID string `json:"organisation_id,omitempty"`
Version int `json:"version,omitempty"`
Attributes struct {
Country string `json:"country,omitempty"`
BaseCurrency string `json:"base_currency,omitempty"`
AccountNumber string `json:"account_number,omitempty"`
BankID string `json:"bank_id,omitempty"`
BankIDCode string `json:"bank_id_code,omitempty"`
Bic string `json:"bic,omitempty"`
Iban string `json:"iban,omitempty"`
Name []string `json:"name,omitempty"`
AlternativeNames []string `json:"alternative_names,omitempty"`
AccountClassification string `json:"account_classification,omitempty"`
JointAccount bool `json:"joint_account,omitempty"`
AccountMatchingOptOut bool `json:"account_matching_opt_out,omitempty"`
SecondaryIdentification string `json:"secondary_identification,omitempty"`
Switched bool `json:"switched,omitempty"`
PrivateIdentification *struct {
BirthDate string `json:"birth_date,omitempty"`
BirthCountry string `json:"birth_country,omitempty"`
Identification string `json:"identification,omitempty"`
Address []string `json:"address,omitempty,omitempty"`
City string `json:"city,omitempty"`
Country string `json:"country,omitempty"`
} `json:"private_identification,omitempty"`
OrganisationIdentification *struct {
Identification string `json:"identification,omitempty"`
Actors []struct {
Name []string `json:"name,omitempty"`
BirthDate string `json:"birth_date,omitempty"`
Residency string `json:"residency,omitempty"`
} `json:"actors,omitempty,omitempty"`
Address []string `json:"addres,omitempty"`
City string `json:"city,omitempty"`
Country string `json:"country,omitempty"`
} `json:"organisation_identification,omitempty"`
Status string `json:"status,omitempty"`
} `json:"attributes,omitempty"`
Relationships *struct {
MasterAccount *struct {
Data []struct {
Type string `json:"type,omitempty"`
ID string `json:"id,omitempty"`
} `json:"data,omitempty"`
} `json:"master_account,omitempty"`
AccountEvents *struct {
Data []struct {
Type string `json:"type,omitempty"`
ID string `json:"id,omitempty"`
} `json:"data,omitempty"`
} `json:"account_events,omitempty"`
} `json:"relationships,omitempty"`
} `json:"data"`
}
type ErrorResponse struct {
ErrorMessage string `json:"error_message"`
}
func getApiServiceName() string {
url := fmt.Sprintf("%s%s", os.Getenv("servicename"), ":8080/v1/organisation/accounts")
return url
}
func readTestData(v interface{}) {
jsonFile, err := os.Open("./testdata/account-post-data.json")
if err != nil {
fmt.Println(err)
}
defer jsonFile.Close()
accountBytes, _ := ioutil.ReadAll(jsonFile)
json.Unmarshal(accountBytes, v)
}
func TestPostSuccess(t *testing.T) {
account := Account{}
readTestData(&account)
url := getApiServiceName()
client := tiny.NewClient()
request := client.NewRequest().SetURL(url).SetBody(account).SetMethod(tiny.Post).
SetContentType("application/json; charset=utf-8")
response, err := client.Send(request)
require.NoError(t, err)
require.Equal(t, 201, response.Response.StatusCode)
respAccount := Account{}
err = response.BodyUnmarshall(&respAccount)
require.NoError(t, err)
}
func TestFetchSuccess(t *testing.T) {
account := Account{}
readTestData(&account)
id := account.Data.ID
url := fmt.Sprintf("%s/%s", getApiServiceName(), id)
client := tiny.NewClient()
request := client.NewRequest().SetURL(url).SetMethod(tiny.Get).
SetContentType("application/json; charset=utf-8")
response, err := client.Send(request)
require.NoError(t, err)
require.Equal(t, 200, response.Response.StatusCode)
respAccount := Account{}
err = response.BodyUnmarshall(&respAccount)
require.NoError(t, err)
require.Equal(t, respAccount.Data.ID, id)
}
func TestFetchFail(t *testing.T) {
id := uuid.NewV4().String()
url := fmt.Sprintf("%s/%s", getApiServiceName(), id)
client := tiny.NewClient().SetDebugMode(true)
request := client.NewRequest().SetURL(url).SetMethod(tiny.Get).
SetContentType("application/json; charset=utf-8")
response, err := client.Send(request)
require.NoError(t, err)
require.Equal(t, 404, response.Response.StatusCode)
errRes := ErrorResponse{}
err = response.BodyUnmarshall(&errRes)
require.NoError(t, err)
require.True(t, len(errRes.ErrorMessage) > 0)
}
func TestDeleteSuccess(t *testing.T) {
account := Account{}
readTestData(&account)
id := account.Data.ID
version := account.Data.Version
url := fmt.Sprintf("%s/%s", getApiServiceName(), id)
client := tiny.NewClient()
request := client.NewRequest().SetURL(url).SetMethod(tiny.Delete).
SetContentType("application/json; charset=utf-8").
AddQueryParam("version", fmt.Sprintf("%d", version))
response, err := client.Send(request)
require.NoError(t, err)
require.Equal(t, 204, response.Response.StatusCode)
}
func TestDeleteNotExistFail(t *testing.T) {
version := 0
id := uuid.NewV4().String()
url := fmt.Sprintf("%s/%s", getApiServiceName(), id)
client := tiny.NewClient()
request := client.NewRequest().SetURL(url).SetMethod(tiny.Delete).
SetContentType("application/json; charset=utf-8").
AddQueryParam("version", fmt.Sprintf("%d", version))
response, err := client.Send(request)
require.NoError(t, err)
require.Equal(t, 404, response.Response.StatusCode)
}
func TestDeleteVersionIncorrectFail(t *testing.T) {
account := Account{}
readTestData(&account)
id := account.Data.ID
version := account.Data.Version + 1
url := fmt.Sprintf("%s/%s", getApiServiceName(), id)
client := tiny.NewClient()
request := client.NewRequest().SetURL(url).SetMethod(tiny.Delete).
SetContentType("application/json; charset=utf-8").
AddQueryParam("version", fmt.Sprintf("%d", version))
response, err := client.Send(request)
require.NoError(t, err)
require.Equal(t, 404, response.Response.StatusCode)
}
func TestPostFail(t *testing.T) {
account := Account{}
readTestData(&account)
url := getApiServiceName()
client := tiny.NewClient()
request := client.NewRequest().SetURL(url).SetBody(account).SetMethod(tiny.Post).
SetContentType("application/json; charset=utf-8")
response, err := client.Send(request)
require.NoError(t, err)
require.Equal(t, 201, response.Response.StatusCode)
response, err = client.Send(request)
require.Equal(t, 409, response.Response.StatusCode)
errRes := ErrorResponse{}
err = response.BodyUnmarshall(&errRes)
require.NoError(t, err)
require.True(t, len(errRes.ErrorMessage) > 0)
//cleaning the data of context
TestDeleteSuccess(t)
}
|
[
"\"servicename\""
] |
[] |
[
"servicename"
] |
[]
|
["servicename"]
|
go
| 1 | 0 | |
djotes/wsgi.py
|
"""
WSGI config for djotes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djotes.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/gdb/mmap.py
|
# -*- python -*-
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import gdb_test
class MmapTest(gdb_test.GdbTest):
def setUp(self):
os.environ['NACL_FAULT_INJECTION'] = (
'MMAP_BYPASS_DESCRIPTOR_SAFETY_CHECK=GF/@')
super(MmapTest, self).setUp()
def test_mmap(self):
self.gdb.Command('break mmap_breakpoint')
self.gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
self.gdb.ResumeAndExpectStop('finish', 'function-finished')
# Check that we can read from memory mapped files.
self.assertEquals(gdb_test.ParseNumber(self.gdb.Eval('*file_mapping')), 123)
self.gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
self.gdb.ResumeAndExpectStop('finish', 'function-finished')
file_mapping_str = self.gdb.Eval('file_mapping')
file_mapping = gdb_test.ParseNumber(file_mapping_str)
self.gdb.Command('break *' + file_mapping_str)
self.gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
# Check that breakpoint in memory mapped code is working.
self.assertEquals(self.gdb.GetPC(), file_mapping)
self.gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
self.gdb.ResumeAndExpectStop('finish', 'function-finished')
file_mapping_str = self.gdb.Eval('file_mapping')
file_mapping = gdb_test.ParseNumber(file_mapping_str)
self.gdb.Command('break *' + file_mapping_str)
self.gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
# Check that breakpoint in memory mapped code is working.
self.assertEquals(self.gdb.GetPC(), file_mapping)
if __name__ == '__main__':
gdb_test.Main()
|
[] |
[] |
[
"NACL_FAULT_INJECTION"
] |
[]
|
["NACL_FAULT_INJECTION"]
|
python
| 1 | 0 | |
tidb-server/main.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
parsertypes "github.com/pingcap/parser/types"
pumpcli "github.com/pingcap/tidb-tools/tidb-binlog/pump_client"
"github.com/pingcap/tidb/bindinfo"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/plugin"
"github.com/pingcap/tidb/privilege/privileges"
"github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
kvstore "github.com/pingcap/tidb/store"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/gcworker"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/domainutil"
"github.com/pingcap/tidb/util/kvcache"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/printer"
"github.com/pingcap/tidb/util/profile"
"github.com/pingcap/tidb/util/signal"
"github.com/pingcap/tidb/util/storeutil"
"github.com/pingcap/tidb/util/sys/linux"
storageSys "github.com/pingcap/tidb/util/sys/storage"
"github.com/pingcap/tidb/util/systimemon"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
pd "github.com/tikv/pd/client"
"go.uber.org/automaxprocs/maxprocs"
"go.uber.org/zap"
"google.golang.org/grpc/grpclog"
)
// Flag Names
const (
nmVersion = "V"
nmConfig = "config"
nmConfigCheck = "config-check"
nmConfigStrict = "config-strict"
nmStore = "store"
nmStorePath = "path"
nmHost = "host"
nmAdvertiseAddress = "advertise-address"
nmPort = "P"
nmCors = "cors"
nmSocket = "socket"
nmEnableBinlog = "enable-binlog"
nmRunDDL = "run-ddl"
nmLogLevel = "L"
nmLogFile = "log-file"
nmLogSlowQuery = "log-slow-query"
nmReportStatus = "report-status"
nmStatusHost = "status-host"
nmStatusPort = "status"
nmMetricsAddr = "metrics-addr"
nmMetricsInterval = "metrics-interval"
nmDdlLease = "lease"
nmTokenLimit = "token-limit"
nmPluginDir = "plugin-dir"
nmPluginLoad = "plugin-load"
nmRepairMode = "repair-mode"
nmRepairList = "repair-list"
nmRequireSecureTransport = "require-secure-transport"
nmProxyProtocolNetworks = "proxy-protocol-networks"
nmProxyProtocolHeaderTimeout = "proxy-protocol-header-timeout"
nmAffinityCPU = "affinity-cpus"
)
var (
version = flagBoolean(nmVersion, false, "print version information and exit")
configPath = flag.String(nmConfig, "", "config file path")
configCheck = flagBoolean(nmConfigCheck, false, "check config file validity and exit")
configStrict = flagBoolean(nmConfigStrict, false, "enforce config file validity")
// Base
store = flag.String(nmStore, "unistore", "registered store name, [tikv, mocktikv, unistore]")
storePath = flag.String(nmStorePath, "/tmp/tidb", "tidb storage path")
host = flag.String(nmHost, "0.0.0.0", "tidb server host")
advertiseAddress = flag.String(nmAdvertiseAddress, "", "tidb server advertise IP")
port = flag.String(nmPort, "4000", "tidb server port")
cors = flag.String(nmCors, "", "tidb server allow cors origin")
socket = flag.String(nmSocket, "", "The socket file to use for connection.")
enableBinlog = flagBoolean(nmEnableBinlog, false, "enable generate binlog")
runDDL = flagBoolean(nmRunDDL, true, "run ddl worker on this tidb-server")
ddlLease = flag.String(nmDdlLease, "45s", "schema lease duration, very dangerous to change only if you know what you do")
tokenLimit = flag.Int(nmTokenLimit, 1000, "the limit of concurrent executed sessions")
pluginDir = flag.String(nmPluginDir, "/data/deploy/plugin", "the folder that hold plugin")
pluginLoad = flag.String(nmPluginLoad, "", "wait load plugin name(separated by comma)")
affinityCPU = flag.String(nmAffinityCPU, "", "affinity cpu (cpu-no. separated by comma, e.g. 1,2,3)")
repairMode = flagBoolean(nmRepairMode, false, "enable admin repair mode")
repairList = flag.String(nmRepairList, "", "admin repair table list")
requireTLS = flag.Bool(nmRequireSecureTransport, false, "require client use secure transport")
// Log
logLevel = flag.String(nmLogLevel, "info", "log level: info, debug, warn, error, fatal")
logFile = flag.String(nmLogFile, "", "log file path")
logSlowQuery = flag.String(nmLogSlowQuery, "", "slow query file path")
// Status
reportStatus = flagBoolean(nmReportStatus, true, "If enable status report HTTP service.")
statusHost = flag.String(nmStatusHost, "0.0.0.0", "tidb server status host")
statusPort = flag.String(nmStatusPort, "10080", "tidb server status port")
metricsAddr = flag.String(nmMetricsAddr, "", "prometheus pushgateway address, leaves it empty will disable prometheus push.")
metricsInterval = flag.Uint(nmMetricsInterval, 15, "prometheus client push interval in second, set \"0\" to disable prometheus push.")
// PROXY Protocol
proxyProtocolNetworks = flag.String(nmProxyProtocolNetworks, "", "proxy protocol networks allowed IP or *, empty mean disable proxy protocol support")
proxyProtocolHeaderTimeout = flag.Uint(nmProxyProtocolHeaderTimeout, 5, "proxy protocol header read timeout, unit is second.")
)
var (
storage kv.Storage
dom *domain.Domain
svr *server.Server
graceful bool
)
func main() {
flag.Parse()
if *version {
fmt.Println(printer.GetTiDBInfo())
os.Exit(0)
}
registerStores()
registerMetrics()
config.InitializeConfig(*configPath, *configCheck, *configStrict, reloadConfig, overrideConfig)
if config.GetGlobalConfig().OOMUseTmpStorage {
config.GetGlobalConfig().UpdateTempStoragePath()
err := disk.InitializeTempDir()
terror.MustNil(err)
checkTempStorageQuota()
}
setGlobalVars()
setCPUAffinity()
setupLog()
setHeapProfileTracker()
setupTracing() // Should before createServer and after setup config.
printInfo()
setupBinlogClient()
setupMetrics()
createStoreAndDomain()
createServer()
signal.SetupSignalHandler(serverShutdown)
runServer()
cleanup()
syncLog()
}
func exit() {
syncLog()
os.Exit(0)
}
func syncLog() {
if err := log.Sync(); err != nil {
fmt.Fprintln(os.Stderr, "sync log err:", err)
os.Exit(1)
}
}
func checkTempStorageQuota() {
// check capacity and the quota when OOMUseTmpStorage is enabled
c := config.GetGlobalConfig()
if c.TempStorageQuota < 0 {
// means unlimited, do nothing
} else {
capacityByte, err := storageSys.GetTargetDirectoryCapacity(c.TempStoragePath)
if err != nil {
log.Fatal(err.Error())
} else if capacityByte < uint64(c.TempStorageQuota) {
log.Fatal(fmt.Sprintf("value of [tmp-storage-quota](%d byte) exceeds the capacity(%d byte) of the [%s] directory", c.TempStorageQuota, capacityByte, c.TempStoragePath))
}
}
}
func setCPUAffinity() {
if affinityCPU == nil || len(*affinityCPU) == 0 {
return
}
var cpu []int
for _, af := range strings.Split(*affinityCPU, ",") {
af = strings.TrimSpace(af)
if len(af) > 0 {
c, err := strconv.Atoi(af)
if err != nil {
fmt.Fprintf(os.Stderr, "wrong affinity cpu config: %s", *affinityCPU)
exit()
}
cpu = append(cpu, c)
}
}
err := linux.SetAffinity(cpu)
if err != nil {
fmt.Fprintf(os.Stderr, "set cpu affinity failure: %v", err)
exit()
}
runtime.GOMAXPROCS(len(cpu))
metrics.MaxProcs.Set(float64(runtime.GOMAXPROCS(0)))
}
func setHeapProfileTracker() {
c := config.GetGlobalConfig()
d := parseDuration(c.Performance.MemProfileInterval)
go profile.HeapProfileForGlobalMemTracker(d)
}
func registerStores() {
err := kvstore.Register("tikv", tikv.Driver{})
terror.MustNil(err)
tikv.NewGCHandlerFunc = gcworker.NewGCWorker
err = kvstore.Register("mocktikv", mockstore.MockTiKVDriver{})
terror.MustNil(err)
err = kvstore.Register("unistore", mockstore.EmbedUnistoreDriver{})
terror.MustNil(err)
}
func registerMetrics() {
metrics.RegisterMetrics()
}
func createStoreAndDomain() {
cfg := config.GetGlobalConfig()
fullPath := fmt.Sprintf("%s://%s", cfg.Store, cfg.Path)
var err error
storage, err = kvstore.New(fullPath)
terror.MustNil(err)
// Bootstrap a session to load information schema.
dom, err = session.BootstrapSession(storage)
terror.MustNil(err)
}
func setupBinlogClient() {
cfg := config.GetGlobalConfig()
if !cfg.Binlog.Enable {
return
}
if cfg.Binlog.IgnoreError {
binloginfo.SetIgnoreError(true)
}
var (
client *pumpcli.PumpsClient
err error
)
securityOption := pd.SecurityOption{
CAPath: cfg.Security.ClusterSSLCA,
CertPath: cfg.Security.ClusterSSLCert,
KeyPath: cfg.Security.ClusterSSLKey,
}
if len(cfg.Binlog.BinlogSocket) == 0 {
client, err = pumpcli.NewPumpsClient(cfg.Path, cfg.Binlog.Strategy, parseDuration(cfg.Binlog.WriteTimeout), securityOption)
} else {
client, err = pumpcli.NewLocalPumpsClient(cfg.Path, cfg.Binlog.BinlogSocket, parseDuration(cfg.Binlog.WriteTimeout), securityOption)
}
terror.MustNil(err)
err = pumpcli.InitLogger(cfg.Log.ToLogConfig())
terror.MustNil(err)
binloginfo.SetPumpsClient(client)
log.Info("tidb-server", zap.Bool("create pumps client success, ignore binlog error", cfg.Binlog.IgnoreError))
}
// Prometheus push.
const zeroDuration = time.Duration(0)
// pushMetric pushes metrics in background.
func pushMetric(addr string, interval time.Duration) {
if interval == zeroDuration || len(addr) == 0 {
log.Info("disable Prometheus push client")
return
}
log.Info("start prometheus push client", zap.String("server addr", addr), zap.String("interval", interval.String()))
go prometheusPushClient(addr, interval)
}
// prometheusPushClient pushes metrics to Prometheus Pushgateway.
func prometheusPushClient(addr string, interval time.Duration) {
// TODO: TiDB do not have uniq name, so we use host+port to compose a name.
job := "tidb"
pusher := push.New(addr, job)
pusher = pusher.Gatherer(prometheus.DefaultGatherer)
pusher = pusher.Grouping("instance", instanceName())
for {
err := pusher.Push()
if err != nil {
log.Error("could not push metrics to prometheus pushgateway", zap.String("err", err.Error()))
}
time.Sleep(interval)
}
}
func instanceName() string {
cfg := config.GetGlobalConfig()
hostname, err := os.Hostname()
if err != nil {
return "unknown"
}
return fmt.Sprintf("%s_%d", hostname, cfg.Port)
}
// parseDuration parses lease argument string.
func parseDuration(lease string) time.Duration {
dur, err := time.ParseDuration(lease)
if err != nil {
dur, err = time.ParseDuration(lease + "s")
}
if err != nil || dur < 0 {
log.Fatal("invalid lease duration", zap.String("lease", lease))
}
return dur
}
func flagBoolean(name string, defaultVal bool, usage string) *bool {
if !defaultVal {
// Fix #4125, golang do not print default false value in usage, so we append it.
usage = fmt.Sprintf("%s (default false)", usage)
return flag.Bool(name, defaultVal, usage)
}
return flag.Bool(name, defaultVal, usage)
}
func reloadConfig(nc, c *config.Config) {
// Just a part of config items need to be reload explicitly.
// Some of them like OOMAction are always used by getting from global config directly
// like config.GetGlobalConfig().OOMAction.
// These config items will become available naturally after the global config pointer
// is updated in function ReloadGlobalConfig.
if nc.Performance.ServerMemoryQuota != c.Performance.ServerMemoryQuota {
plannercore.PreparedPlanCacheMaxMemory.Store(nc.Performance.ServerMemoryQuota)
}
if nc.Performance.CrossJoin != c.Performance.CrossJoin {
plannercore.AllowCartesianProduct.Store(nc.Performance.CrossJoin)
}
if nc.Performance.FeedbackProbability != c.Performance.FeedbackProbability {
statistics.FeedbackProbability.Store(nc.Performance.FeedbackProbability)
}
if nc.Performance.QueryFeedbackLimit != c.Performance.QueryFeedbackLimit {
statistics.MaxQueryFeedbackCount.Store(int64(nc.Performance.QueryFeedbackLimit))
}
if nc.Performance.PseudoEstimateRatio != c.Performance.PseudoEstimateRatio {
statistics.RatioOfPseudoEstimate.Store(nc.Performance.PseudoEstimateRatio)
}
if nc.Performance.MaxProcs != c.Performance.MaxProcs {
runtime.GOMAXPROCS(int(nc.Performance.MaxProcs))
metrics.MaxProcs.Set(float64(runtime.GOMAXPROCS(0)))
}
if nc.TiKVClient.StoreLimit != c.TiKVClient.StoreLimit {
storeutil.StoreLimit.Store(nc.TiKVClient.StoreLimit)
}
if nc.PreparedPlanCache.Enabled != c.PreparedPlanCache.Enabled {
plannercore.SetPreparedPlanCache(nc.PreparedPlanCache.Enabled)
}
if nc.Log.Level != c.Log.Level {
if err := logutil.SetLevel(nc.Log.Level); err != nil {
logutil.BgLogger().Error("update log level error", zap.Error(err))
}
}
}
// overrideConfig considers command arguments and overrides some config items in the Config.
func overrideConfig(cfg *config.Config) {
actualFlags := make(map[string]bool)
flag.Visit(func(f *flag.Flag) {
actualFlags[f.Name] = true
})
// Base
if actualFlags[nmHost] {
cfg.Host = *host
}
if actualFlags[nmAdvertiseAddress] {
cfg.AdvertiseAddress = *advertiseAddress
}
if len(cfg.AdvertiseAddress) == 0 && cfg.Host == "0.0.0.0" {
cfg.AdvertiseAddress = util.GetLocalIP()
}
if len(cfg.AdvertiseAddress) == 0 {
cfg.AdvertiseAddress = cfg.Host
}
var err error
if actualFlags[nmPort] {
var p int
p, err = strconv.Atoi(*port)
terror.MustNil(err)
cfg.Port = uint(p)
}
if actualFlags[nmCors] {
fmt.Println(cors)
cfg.Cors = *cors
}
if actualFlags[nmStore] {
cfg.Store = *store
}
if actualFlags[nmStorePath] {
cfg.Path = *storePath
}
if actualFlags[nmSocket] {
cfg.Socket = *socket
}
if actualFlags[nmEnableBinlog] {
cfg.Binlog.Enable = *enableBinlog
}
if actualFlags[nmRunDDL] {
cfg.RunDDL = *runDDL
}
if actualFlags[nmDdlLease] {
cfg.Lease = *ddlLease
}
if actualFlags[nmTokenLimit] {
cfg.TokenLimit = uint(*tokenLimit)
}
if actualFlags[nmPluginLoad] {
cfg.Plugin.Load = *pluginLoad
}
if actualFlags[nmPluginDir] {
cfg.Plugin.Dir = *pluginDir
}
if actualFlags[nmRequireSecureTransport] {
cfg.Security.RequireSecureTransport = *requireTLS
}
if actualFlags[nmRepairMode] {
cfg.RepairMode = *repairMode
}
if actualFlags[nmRepairList] {
if cfg.RepairMode {
cfg.RepairTableList = stringToList(*repairList)
}
}
// Log
if actualFlags[nmLogLevel] {
cfg.Log.Level = *logLevel
}
if actualFlags[nmLogFile] {
cfg.Log.File.Filename = *logFile
}
if actualFlags[nmLogSlowQuery] {
cfg.Log.SlowQueryFile = *logSlowQuery
}
// Status
if actualFlags[nmReportStatus] {
cfg.Status.ReportStatus = *reportStatus
}
if actualFlags[nmStatusHost] {
cfg.Status.StatusHost = *statusHost
}
if actualFlags[nmStatusPort] {
var p int
p, err = strconv.Atoi(*statusPort)
terror.MustNil(err)
cfg.Status.StatusPort = uint(p)
}
if actualFlags[nmMetricsAddr] {
cfg.Status.MetricsAddr = *metricsAddr
}
if actualFlags[nmMetricsInterval] {
cfg.Status.MetricsInterval = *metricsInterval
}
// PROXY Protocol
if actualFlags[nmProxyProtocolNetworks] {
cfg.ProxyProtocol.Networks = *proxyProtocolNetworks
}
if actualFlags[nmProxyProtocolHeaderTimeout] {
cfg.ProxyProtocol.HeaderTimeout = *proxyProtocolHeaderTimeout
}
}
func setGlobalVars() {
cfg := config.GetGlobalConfig()
// Disable automaxprocs log
nopLog := func(string, ...interface{}) {}
_, err := maxprocs.Set(maxprocs.Logger(nopLog))
terror.MustNil(err)
// We should respect to user's settings in config file.
// The default value of MaxProcs is 0, runtime.GOMAXPROCS(0) is no-op.
runtime.GOMAXPROCS(int(cfg.Performance.MaxProcs))
metrics.MaxProcs.Set(float64(runtime.GOMAXPROCS(0)))
ddlLeaseDuration := parseDuration(cfg.Lease)
session.SetSchemaLease(ddlLeaseDuration)
statsLeaseDuration := parseDuration(cfg.Performance.StatsLease)
session.SetStatsLease(statsLeaseDuration)
indexUsageSyncLeaseDuration := parseDuration(cfg.Performance.IndexUsageSyncLease)
session.SetIndexUsageSyncLease(indexUsageSyncLeaseDuration)
bindinfo.Lease = parseDuration(cfg.Performance.BindInfoLease)
domain.RunAutoAnalyze = cfg.Performance.RunAutoAnalyze
statistics.FeedbackProbability.Store(cfg.Performance.FeedbackProbability)
statistics.MaxQueryFeedbackCount.Store(int64(cfg.Performance.QueryFeedbackLimit))
statistics.RatioOfPseudoEstimate.Store(cfg.Performance.PseudoEstimateRatio)
ddl.RunWorker = cfg.RunDDL
if cfg.SplitTable {
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
}
plannercore.AllowCartesianProduct.Store(cfg.Performance.CrossJoin)
privileges.SkipWithGrant = cfg.Security.SkipGrantTable
kv.TxnTotalSizeLimit = cfg.Performance.TxnTotalSizeLimit
if cfg.Performance.TxnEntrySizeLimit > 120*1024*1024 {
log.Fatal("cannot set txn entry size limit larger than 120M")
}
kv.TxnEntrySizeLimit = cfg.Performance.TxnEntrySizeLimit
priority := mysql.Str2Priority(cfg.Performance.ForcePriority)
variable.ForcePriority = int32(priority)
variable.SetSysVar(variable.TiDBForcePriority, mysql.Priority2Str[priority])
variable.SetSysVar(variable.TiDBOptDistinctAggPushDown, variable.BoolToIntStr(cfg.Performance.DistinctAggPushDown))
variable.SetSysVar(variable.TIDBMemQuotaQuery, strconv.FormatInt(cfg.MemQuotaQuery, 10))
variable.SetSysVar(variable.TIDBMemQuotaStatistics, strconv.FormatInt(cfg.MemQuotaStatistics, 10))
variable.SetSysVar("lower_case_table_names", strconv.Itoa(cfg.LowerCaseTableNames))
variable.SetSysVar(variable.LogBin, variable.BoolToIntStr(config.GetGlobalConfig().Binlog.Enable))
variable.SetSysVar(variable.Port, fmt.Sprintf("%d", cfg.Port))
variable.SetSysVar(variable.Socket, cfg.Socket)
variable.SetSysVar(variable.DataDir, cfg.Path)
variable.SetSysVar(variable.TiDBSlowQueryFile, cfg.Log.SlowQueryFile)
variable.SetSysVar(variable.TiDBIsolationReadEngines, strings.Join(cfg.IsolationRead.Engines, ", "))
// For CI environment we default enable prepare-plan-cache.
plannercore.SetPreparedPlanCache(config.CheckTableBeforeDrop || cfg.PreparedPlanCache.Enabled)
if plannercore.PreparedPlanCacheEnabled() {
plannercore.PreparedPlanCacheCapacity = cfg.PreparedPlanCache.Capacity
plannercore.PreparedPlanCacheMemoryGuardRatio = cfg.PreparedPlanCache.MemoryGuardRatio
if plannercore.PreparedPlanCacheMemoryGuardRatio < 0.0 || plannercore.PreparedPlanCacheMemoryGuardRatio > 1.0 {
plannercore.PreparedPlanCacheMemoryGuardRatio = 0.1
}
plannercore.PreparedPlanCacheMaxMemory.Store(cfg.Performance.ServerMemoryQuota)
total, err := memory.MemTotal()
terror.MustNil(err)
if plannercore.PreparedPlanCacheMaxMemory.Load() > total || plannercore.PreparedPlanCacheMaxMemory.Load() <= 0 {
plannercore.PreparedPlanCacheMaxMemory.Store(total)
}
}
atomic.StoreUint64(&tikv.CommitMaxBackoff, uint64(parseDuration(cfg.TiKVClient.CommitTimeout).Seconds()*1000))
tikv.RegionCacheTTLSec = int64(cfg.TiKVClient.RegionCacheTTL)
domainutil.RepairInfo.SetRepairMode(cfg.RepairMode)
domainutil.RepairInfo.SetRepairTableList(cfg.RepairTableList)
c := config.GetGlobalConfig()
executor.GlobalDiskUsageTracker.SetBytesLimit(c.TempStorageQuota)
if c.Performance.ServerMemoryQuota < 1 {
// If MaxMemory equals 0, it means unlimited
executor.GlobalMemoryUsageTracker.SetBytesLimit(-1)
} else {
executor.GlobalMemoryUsageTracker.SetBytesLimit(int64(c.Performance.ServerMemoryQuota))
}
kvcache.GlobalLRUMemUsageTracker.AttachToGlobalTracker(executor.GlobalMemoryUsageTracker)
t, err := time.ParseDuration(cfg.TiKVClient.StoreLivenessTimeout)
if err != nil {
logutil.BgLogger().Fatal("invalid duration value for store-liveness-timeout",
zap.String("currentValue", config.GetGlobalConfig().TiKVClient.StoreLivenessTimeout))
}
tikv.StoreLivenessTimeout = t
parsertypes.TiDBStrictIntegerDisplayWidth = config.GetGlobalConfig().DeprecateIntegerDisplayWidth
}
func setupLog() {
cfg := config.GetGlobalConfig()
err := logutil.InitZapLogger(cfg.Log.ToLogConfig())
terror.MustNil(err)
err = logutil.InitLogger(cfg.Log.ToLogConfig())
terror.MustNil(err)
if len(os.Getenv("GRPC_DEBUG")) > 0 {
grpclog.SetLoggerV2(grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 999))
} else {
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr))
}
// trigger internal http(s) client init.
util.InternalHTTPClient()
}
func printInfo() {
// Make sure the TiDB info is always printed.
level := log.GetLevel()
log.SetLevel(zap.InfoLevel)
printer.PrintTiDBInfo()
log.SetLevel(level)
}
func createServer() {
cfg := config.GetGlobalConfig()
driver := server.NewTiDBDriver(storage)
var err error
svr, err = server.NewServer(cfg, driver)
// Both domain and storage have started, so we have to clean them before exiting.
terror.MustNil(err, closeDomainAndStorage)
svr.SetDomain(dom)
go dom.ExpensiveQueryHandle().SetSessionManager(svr).Run()
dom.InfoSyncer().SetSessionManager(svr)
}
func serverShutdown(isgraceful bool) {
if isgraceful {
graceful = true
}
svr.Close()
}
func setupMetrics() {
cfg := config.GetGlobalConfig()
// Enable the mutex profile, 1/10 of mutex blocking event sampling.
runtime.SetMutexProfileFraction(10)
systimeErrHandler := func() {
metrics.TimeJumpBackCounter.Inc()
}
callBackCount := 0
sucessCallBack := func() {
callBackCount++
// It is callback by monitor per second, we increase metrics.KeepAliveCounter per 5s.
if callBackCount >= 5 {
callBackCount = 0
metrics.KeepAliveCounter.Inc()
}
}
go systimemon.StartMonitor(time.Now, systimeErrHandler, sucessCallBack)
pushMetric(cfg.Status.MetricsAddr, time.Duration(cfg.Status.MetricsInterval)*time.Second)
}
func setupTracing() {
cfg := config.GetGlobalConfig()
tracingCfg := cfg.OpenTracing.ToTracingConfig()
tracingCfg.ServiceName = "TiDB"
tracer, _, err := tracingCfg.NewTracer()
if err != nil {
log.Fatal("setup jaeger tracer failed", zap.String("error message", err.Error()))
}
opentracing.SetGlobalTracer(tracer)
}
func runServer() {
err := svr.Run()
terror.MustNil(err)
}
func closeDomainAndStorage() {
atomic.StoreUint32(&tikv.ShuttingDown, 1)
dom.Close()
err := storage.Close()
terror.Log(errors.Trace(err))
}
func cleanup() {
if graceful {
svr.GracefulDown(context.Background(), nil)
} else {
svr.TryGracefulDown()
}
plugin.Shutdown(context.Background())
closeDomainAndStorage()
disk.CleanUp()
}
func stringToList(repairString string) []string {
if len(repairString) <= 0 {
return []string{}
}
if repairString[0] == '[' && repairString[len(repairString)-1] == ']' {
repairString = repairString[1 : len(repairString)-1]
}
return strings.FieldsFunc(repairString, func(r rune) bool {
return r == ',' || r == ' ' || r == '"'
})
}
|
[
"\"GRPC_DEBUG\""
] |
[] |
[
"GRPC_DEBUG"
] |
[]
|
["GRPC_DEBUG"]
|
go
| 1 | 0 | |
src/helper/index.py
|
# coding=utf-8
import requests
import os
def handler(event, context):
url = os.environ['KEEP_WARM_FC_URL']
method = os.environ['KEEP_WARM_FC_METHOD']
res = requests.request(method, url)
print(res.status_code)
|
[] |
[] |
[
"KEEP_WARM_FC_URL",
"KEEP_WARM_FC_METHOD"
] |
[]
|
["KEEP_WARM_FC_URL", "KEEP_WARM_FC_METHOD"]
|
python
| 2 | 0 | |
hyperopt/mongoexp.py
|
"""
Mongodb-based Trials Object
===========================
Components involved:
- mongo
e.g. mongod ...
- driver
e.g. hyperopt-mongo-search mongo://address bandit_json bandit_algo_json
- worker
e.g. hyperopt-mongo-worker --loop mongo://address
Mongo
=====
Mongo (daemon process mongod) is used for IPC between the driver and worker.
Configure it as you like, so that hyperopt-mongo-search can communicate with it.
I think there is some support in this file for an ssh+mongo connection type.
The experiment uses the following collections for IPC:
* jobs - documents of a standard form used to store suggested trials and their
results. These documents have keys:
* spec : subdocument returned by bandit_algo.suggest
* exp_key: an identifier of which driver suggested this trial
* cmd: a tuple (protocol, ...) identifying bandit.evaluate
* state: 0, 1, 2, 3 for job state (new, running, ok, fail)
* owner: None for new jobs, (hostname, pid) for started jobs
* book_time: time a job was reserved
* refresh_time: last time the process running the job checked in
* result: the subdocument returned by bandit.evaluate
* error: for jobs of state 3, a reason for failure.
* logs: a dict of sequences of strings received by ctrl object
* info: info messages
* warn: warning messages
* error: error messages
* fs - a gridfs storage collection (used for pickling)
* drivers - documents describing drivers. These are used to prevent two drivers
from using the same exp_key simultaneously, and to attach saved states.
* exp_key
* workdir: [optional] path where workers should chdir to
Attachments:
* pkl: [optional] saved state of experiment class
* bandit_args_kwargs: [optional] pickled (clsname, args, kwargs) to
reconstruct bandit in worker processes
The MongoJobs, and CtrlObj classes as well as the main_worker
method form the abstraction barrier around this database layout.
Worker
======
A worker looks up a job in a mongo database, maps that job document to a
runnable python object, calls that object, and writes the return value back to
the database.
A worker *reserves* a job by atomically identifying a document in the jobs
collection whose owner is None and whose state is 0, and setting the state to
1. If it fails to identify such a job, it loops with a random sleep interval
of a few seconds and polls the database.
If hyperopt-mongo-worker is called with a --loop argument then it goes back to
the database after finishing a job to identify and perform another one.
CtrlObj
-------
The worker allocates a CtrlObj and passes it to bandit.evaluate in addition to
the subdocument found at job['spec']. A bandit can use ctrl.info, ctrl.warn,
ctrl.error and so on like logger methods, and those messages will be written
to the mongo database (to job['logs']). They are not written synchronously
though, they are written when the bandit.evaluate function calls
ctrl.checkpoint().
Ctrl.checkpoint does several things:
* flushes logging messages to the database
* updates the refresh_time
* optionally updates the result subdocument
The main_worker routine calls Ctrl.checkpoint(rval) once after the
bandit.evalute function has returned before setting the state to 2 or 3 to
finalize the job in the database.
"""
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
import copy
# import hashlib
import logging
import optparse
import os
# import shutil
import signal
import socket
import subprocess
import sys
import time
import urllib.parse
import warnings
import numpy
import pymongo
import gridfs
from bson import SON
from .base import JOB_STATES
from .base import (JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE,
JOB_STATE_ERROR)
from .base import Trials
from .base import InvalidTrial
from .base import Ctrl
from .base import SONify
from .base import spec_from_misc
from .utils import coarse_utcnow
from .utils import fast_isin
from .utils import get_most_recent_inds
from .utils import json_call
from .utils import working_dir, temp_dir
import six
from six.moves import map
from six.moves import range
__authors__ = ["James Bergstra", "Dan Yamins"]
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
standard_library.install_aliases()
logger = logging.getLogger(__name__)
try:
import dill as pickler
except Exception as e:
logger.info('Failed to load dill, try installing dill via "pip install dill" for enhanced pickling support.')
import six.moves.cPickle as pickler
class OperationFailure(Exception):
"""Proxy that could be factored out if we also want to use CouchDB and
JobmanDB classes with this interface
"""
class Shutdown(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class WaitQuit(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class InvalidMongoTrial(InvalidTrial):
pass
class DomainSwapError(Exception):
"""Raised when the search program tries to change the bandit attached to
an experiment.
"""
class ReserveTimeout(Exception):
"""No job was reserved in the alotted time
"""
def read_pw():
return open(os.path.join(os.getenv('HOME'), ".hyperopt")).read()[:-1]
def parse_url(url, pwfile=None):
"""Unpacks a url of the form
protocol://[username[:pw]]@hostname[:port]/db/collection
:rtype: tuple of strings
:returns: protocol, username, password, hostname, port, dbname, collection
:note:
If the password is not given in the url but the username is, then
this function will read the password from file by calling
``open(pwfile).read()[:-1]``
"""
protocol = url[:url.find(':')]
ftp_url = 'ftp' + url[url.find(':'):]
# -- parse the string as if it were an ftp address
tmp = urllib.parse.urlparse(ftp_url)
query_params = urllib.parse.parse_qs(tmp.query)
logger.info('PROTOCOL %s' % protocol)
logger.info('USERNAME %s' % tmp.username)
logger.info('HOSTNAME %s' % tmp.hostname)
logger.info('PORT %s' % tmp.port)
logger.info('PATH %s' % tmp.path)
authdbname = None
if 'authSource' in query_params and len(query_params['authSource']):
authdbname = query_params['authSource'][-1]
logger.info('AUTH DB %s' % authdbname)
try:
_, dbname, collection = tmp.path.split('/')
except:
print("Failed to parse '%s'" % (str(tmp.path)), file=sys.stderr)
raise
logger.info('DB %s' % dbname)
logger.info('COLLECTION %s' % collection)
if tmp.password is None:
if (tmp.username is not None) and pwfile:
password = open(pwfile).read()[:-1]
else:
password = None
else:
password = tmp.password
if password is not None:
logger.info('PASS ***')
port = int(float(tmp.port)) # port has to be casted explicitly here.
return (protocol, tmp.username, password, tmp.hostname, port, dbname, collection, authdbname)
def connection_with_tunnel(dbname, host='localhost',
auth_dbname=None, port=27017,
ssh=False, user='hyperopt', pw=None):
if ssh:
local_port = numpy.random.randint(low=27500, high=28000)
# -- forward from local to remote machine
ssh_tunnel = subprocess.Popen(
['ssh', '-NTf', '-L',
'%i:%s:%i' % (local_port, '127.0.0.1', port),
host],
)
# -- give the subprocess time to set up
time.sleep(.5)
connection = pymongo.MongoClient('127.0.0.1', local_port,
document_class=SON, w=1, j=True)
else:
connection = pymongo.MongoClient(host, port, document_class=SON, w=1, j=True)
if user:
if not pw:
pw = read_pw()
if user == 'hyperopt' and not auth_dbname:
auth_dbname = 'admin'
connection[dbname].authenticate(user, pw, source=auth_dbname)
ssh_tunnel = None
# Note that the w=1 and j=True args to MongoClient above should:
# -- Ensure that changes are written to at least one server.
# -- Ensure that changes are written to the journal if there is one.
return connection, ssh_tunnel
def connection_from_string(s):
protocol, user, pw, host, port, db, collection, authdb = parse_url(s)
if protocol == 'mongo':
ssh = False
elif protocol in ('mongo+ssh', 'ssh+mongo'):
ssh = True
else:
raise ValueError('unrecognized protocol for MongoJobs', protocol)
connection, tunnel = connection_with_tunnel(
dbname=db,
ssh=ssh,
user=user,
pw=pw,
host=host,
port=port,
auth_dbname=authdb
)
return connection, tunnel, connection[db], connection[db][collection]
class MongoJobs(object):
"""
# Interface to a Jobs database structured like this
#
# Collections:
#
# db.jobs - structured {config_name, 'cmd', 'owner', 'book_time',
# 'refresh_time', 'state', 'exp_key', 'owner', 'result'}
# This is the collection that the worker nodes write to
#
# db.gfs - file storage via gridFS for all collections
#
"""
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
"""
Parameters
----------
db - Mongo Database (e.g. `Connection()[dbname]`)
database in which all job-related info is stored
jobs - Mongo Collection handle
collection within `db` to use for job arguments, return vals,
and various bookkeeping stuff and meta-data. Typically this is
`db['jobs']`
gfs - Mongo GridFS handle
GridFS is used to store attachments - binary blobs that don't fit
or are awkward to store in the `jobs` collection directly.
conn - Mongo Connection
Why we need to keep this, I'm not sure.
tunnel - something for ssh tunneling if you're doing that
See `connection_with_tunnel` for more info.
config_name - string
XXX: No idea what this is for, seems unimportant.
"""
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn = conn
self.tunnel = tunnel
self.config_name = config_name
# TODO: rename jobs -> coll throughout
coll = property(lambda s: s.jobs)
@classmethod
def alloc(cls, dbname, host='localhost',
auth_dbname='admin', port=27017,
jobs_coll='jobs', gfs_coll='fs', ssh=False, user=None, pw=None):
connection, tunnel = connection_with_tunnel(
dbname, host, auth_dbname, port, ssh, user, pw)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll='fs', config_name='spec'):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ['exp_key', 'result.loss', 'book_time']:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index('exp_key', unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if not r.get('MIA', False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if r.get('MIA', False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job):
"""Return a job dictionary by inserting the job dict into the database"""
try:
cpy = copy.deepcopy(job)
# -- this call adds an _id field to cpy
_id = self.jobs.insert(cpy, check_keys=True)
# -- so now we return the dict with the _id field
assert _id == cpy['_id']
return cpy
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# This was meant to make it easier to catch insertion errors
# in a generic way even if different databases were used.
# ... but there's just MongoDB so far, so kinda goofy.
raise OperationFailure(e)
def delete(self, job):
"""Delete job[s]"""
try:
self.jobs.remove(job)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all(self, cond=None):
"""Delete all jobs and attachments"""
if cond is None:
cond = {}
try:
for d in self.jobs.find(filter=cond, projection=['_id', '_attachments']):
logger.info('deleting job %s' % d['_id'])
for name, file_id in d.get('_attachments', []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error('failed to remove attachment %s:%s' % (
name, file_id))
self.jobs.remove(d)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all_error_jobs(self):
return self.delete_all(cond={'state': JOB_STATE_ERROR})
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(cond) # copy is important, will be modified, but only the top-level
if exp_key is not None:
cond['exp_key'] = exp_key
# having an owner of None implies state==JOB_STATE_NEW, so this effectively
# acts as a filter to make sure that only new jobs get reserved.
if cond.get('owner') is not None:
raise ValueError('refusing to reserve owned job')
else:
cond['owner'] = None
cond['state'] = JOB_STATE_NEW # theoretically this is redundant, theoretically
try:
rval = self.jobs.find_and_modify(
cond,
{'$set':
{'owner': host_id,
'book_time': now,
'state': JOB_STATE_RUNNING,
'refresh_time': now,
}
},
new=True,
upsert=False)
except pymongo.errors.OperationFailure as e:
logger.error('Error during reserve_job: %s' % str(e))
rval = None
return rval
def refresh(self, doc):
self.update(doc, dict(refresh_time=coarse_utcnow()))
def update(self, doc, dct, collection=None, do_sanity_checks=True):
"""Return union of doc and dct, after making sure that dct has been
added to doc in `collection`.
This function does not modify either `doc` or `dct`.
"""
if collection is None:
collection = self.coll
dct = copy.deepcopy(dct)
if '_id' not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if '_id' in dct:
if dct['_id'] != doc['_id']:
raise ValueError('cannot update the _id field')
del dct['_id']
if 'version' in dct:
if dct['version'] != doc['version']:
warnings.warn('Ignoring "version" field in update dictionary')
if 'version' in doc:
doc_query = dict(_id=doc['_id'], version=doc['version'])
dct['version'] = doc['version'] + 1
else:
doc_query = dict(_id=doc['_id'])
dct['version'] = 1
try:
# warning - if doc matches nothing then this function succeeds
# N.B. this matches *at most* one entry, and possibly zero
collection.update(
doc_query,
{'$set': dct},
upsert=False,
multi=False,)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
# update doc in-place to match what happened on the server side
doc.update(dct)
if do_sanity_checks:
server_doc = collection.find_one(
dict(_id=doc['_id'], version=doc['version']))
if server_doc is None:
raise OperationFailure('updated doc not found : %s'
% str(doc))
elif server_doc != doc:
if 0: # This is all commented out because it is tripping on the fact that
# str('a') != unicode('a').
# TODO: eliminate false alarms and catch real ones
mismatching_keys = []
for k, v in list(server_doc.items()):
if k in doc:
if doc[k] != v:
mismatching_keys.append((k, v, doc[k]))
else:
mismatching_keys.append((k, v, '<missing>'))
for k, v in list(doc.items()):
if k not in server_doc:
mismatching_keys.append((k, '<missing>', v))
raise OperationFailure('local and server doc documents are out of sync: %s' %
repr((doc, server_doc, mismatching_keys)))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], six.string_types), name_id
return str(name_id[0])
return list(map(as_str, doc.get('_attachments', [])))
def set_attachment(self, doc, blob, name, collection=None):
"""Attach potentially large data string `blob` to `doc` by name `name`
blob must be a string
doc must have been saved in some collection (must have an _id), but not
necessarily the jobs collection.
name must be a string
Returns None
"""
# If there is already a file with the given name for this doc, then we will delete it
# after writing the new file
attachments = doc.get('_attachments', [])
name_matches = [a for a in attachments if a[0] == name]
# the filename is set to something so that fs.list() will display the file
new_file_id = self.gfs.put(blob, filename='%s_%s' % (doc['_id'], name))
logger.info('stored blob of %i bytes with id=%s and filename %s_%s' % (
len(blob), str(new_file_id), doc['_id'], name))
new_attachments = ([a for a in attachments if a[0] != name] +
[(name, new_file_id)])
try:
ii = 0
doc = self.update(doc, {'_attachments': new_attachments},
collection=collection)
# there is a database leak until we actually delete the files that
# are no longer pointed to by new_attachments
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning("Leak during set_attachment: old_file_id=%s" % (
name_matches[ii][1]))
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
# return new_file_id
def get_attachment(self, doc, name):
"""Retrieve data attached to `doc` by `attach_blob`.
Raises OperationFailure if `name` does not correspond to an attached blob.
Returns the blob as a string.
"""
attachments = doc.get('_attachments', [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure('Attachment not found: %s' % name)
if len(file_ids) > 1:
raise OperationFailure('multiple name matches', (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get('_attachments', [])
file_id = None
for i, a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure('Attachment not found: %s' % name)
del attachments[i]
self.update(doc, {'_attachments': attachments}, collection=collection)
self.gfs.delete(file_id)
class MongoTrials(Trials):
"""Trials maps on to an entire mongo collection. It's basically a wrapper
around MongoJobs for now.
As a concession to performance, this object permits trial filtering based
on the exp_key, but I feel that's a hack. The case of `cmd` is similar--
the exp_key and cmd are semantically coupled.
WRITING TO THE DATABASE
-----------------------
The trials object is meant for *reading* a trials database. Writing
to a database is different enough from writing to an in-memory
collection that no attempt has been made to abstract away that
difference. If you want to update the documents within
a MongoTrials collection, then retrieve the `.handle` attribute (a
MongoJobs instance) and use lower-level methods, or pymongo's
interface directly. When you are done writing, call refresh() or
refresh_tids() to bring the MongoTrials up to date.
"""
asynchronous = True
def __init__(self, arg, exp_key=None, cmd=None, workdir=None,
refresh=True):
if isinstance(arg, MongoJobs):
self.handle = arg
else:
connection_string = arg
self.handle = MongoJobs.new_from_connection_str(connection_string)
self.handle.create_indexes()
self._exp_key = exp_key
self.cmd = cmd
self.workdir = workdir
if refresh:
self.refresh()
def view(self, exp_key=None, cmd=None, workdir=None, refresh=True):
rval = self.__class__(self.handle,
exp_key=self._exp_key if exp_key is None else exp_key,
cmd=self.cmd if cmd is None else cmd,
workdir=self.workdir if workdir is None else workdir,
refresh=refresh)
return rval
def refresh_tids(self, tids):
""" Sync documents with `['tid']` in the list of `tids` from the
database (not *to* the database).
Local trial documents whose tid is not in `tids` are not
affected by this call. Local trial documents whose tid is in `tids` may
be:
* *deleted* (if db no longer has corresponding document), or
* *updated* (if db has an updated document) or,
* *left alone* (if db document matches local one).
Additionally, if the db has a matching document, but there is no
local trial with a matching tid, then the db document will be
*inserted* into the local collection.
"""
exp_key = self._exp_key
if exp_key != None:
query = {'exp_key': exp_key}
else:
query = {}
t0 = time.time()
query['state'] = {'$ne': JOB_STATE_ERROR}
if tids is not None:
query['tid'] = {'$in': list(tids)}
orig_trials = getattr(self, '_trials', [])
_trials = orig_trials[:] # copy to make sure it doesn't get screwed up
if _trials:
db_data = list(self.handle.jobs.find(query,
projection=['_id', 'version']))
# -- pull down a fresh list of ids from mongo
if db_data:
# make numpy data arrays
db_data = numpy.rec.array([(x['_id'], int(x['version']))
for x in db_data],
names=['_id', 'version'])
db_data.sort(order=['_id', 'version'])
db_data = db_data[get_most_recent_inds(db_data)]
existing_data = numpy.rec.array([(x['_id'],
int(x['version'])) for x in _trials],
names=['_id', 'version'])
existing_data.sort(order=['_id', 'version'])
# which records are in db but not in existing, and vice versa
db_in_existing = fast_isin(db_data['_id'], existing_data['_id'])
existing_in_db = fast_isin(existing_data['_id'], db_data['_id'])
# filtering out out-of-date records
_trials = [_trials[_ind] for _ind in existing_in_db.nonzero()[0]]
# new data is what's in db that's not in existing
new_data = db_data[numpy.invert(db_in_existing)]
# having removed the new and out of data data,
# concentrating on data in db and existing for state changes
db_data = db_data[db_in_existing]
existing_data = existing_data[existing_in_db]
try:
assert len(db_data) == len(existing_data)
assert (existing_data['_id'] == db_data['_id']).all()
assert (existing_data['version'] <= db_data['version']).all()
except:
reportpath = os.path.join(os.getcwd(),
'hyperopt_refresh_crash_report_' +
str(numpy.random.randint(1e8)) + '.pkl')
logger.error('HYPEROPT REFRESH ERROR: writing error file to %s' % reportpath)
_file = open(reportpath, 'w')
pickler.dump({'db_data': db_data,
'existing_data': existing_data},
_file)
_file.close()
raise
same_version = existing_data['version'] == db_data['version']
_trials = [_trials[_ind] for _ind in same_version.nonzero()[0]]
version_changes = existing_data[numpy.invert(same_version)]
# actually get the updated records
update_ids = new_data['_id'].tolist() + version_changes['_id'].tolist()
num_new = len(update_ids)
update_query = copy.deepcopy(query)
update_query['_id'] = {'$in': update_ids}
updated_trials = list(self.handle.jobs.find(update_query))
_trials.extend(updated_trials)
else:
num_new = 0
_trials = []
else:
# this case is for performance, though should be able to be removed
# without breaking correctness.
_trials = list(self.handle.jobs.find(query))
if _trials:
_trials = [_trials[_i] for _i in get_most_recent_inds(_trials)]
num_new = len(_trials)
logger.debug('Refresh data download took %f seconds for %d ids' %
(time.time() - t0, num_new))
if tids is not None:
# -- If tids were given, then _trials only contains
# documents with matching tids. Here we augment these
# fresh matching documents, with our current ones whose
# tids don't match.
new_trials = _trials
tids_set = set(tids)
assert all(t['tid'] in tids_set for t in new_trials)
old_trials = [t for t in orig_trials if t['tid'] not in tids_set]
_trials = new_trials + old_trials
# -- reassign new trials to self, in order of increasing tid
jarray = numpy.array([j['_id'] for j in _trials])
jobsort = jarray.argsort()
self._trials = [_trials[_idx] for _idx in jobsort]
self._specs = [_trials[_idx]['spec'] for _idx in jobsort]
self._results = [_trials[_idx]['result'] for _idx in jobsort]
self._miscs = [_trials[_idx]['misc'] for _idx in jobsort]
def refresh(self):
self.refresh_tids(None)
def _insert_trial_docs(self, docs):
rval = []
for doc in docs:
rval.append(self.handle.jobs.insert(doc))
return rval
def count_by_state_unsynced(self, arg):
exp_key = self._exp_key
# TODO: consider searching by SON rather than dict
if isinstance(arg, int):
if arg not in JOB_STATES:
raise ValueError('invalid state', arg)
query = dict(state=arg)
else:
assert hasattr(arg, '__iter__')
states = list(arg)
assert all([x in JOB_STATES for x in states])
query = dict(state={'$in': states})
if exp_key != None:
query['exp_key'] = exp_key
rval = self.handle.jobs.find(query).count()
return rval
def delete_all(self, cond=None):
if cond is None:
cond = {}
else:
cond = dict(cond)
if self._exp_key:
cond['exp_key'] = self._exp_key
# -- remove all documents matching condition
self.handle.delete_all(cond)
gfs = self.handle.gfs
for filename in gfs.list():
try:
fdoc = gfs.get_last_version(filename=filename, **cond)
except gridfs.errors.NoFile:
continue
gfs.delete(fdoc._id)
self.refresh()
def new_trial_ids(self, N):
db = self.handle.db
# N.B. that the exp key is *not* used here. It was once, but it caused
# a nasty bug: tids were generated by a global experiment
# with exp_key=None, running a suggest() that introduced sub-experiments
# with exp_keys, which ran jobs that did result injection. The tids of
# injected jobs were sometimes unique within an experiment, and
# sometimes not. Hilarious!
#
# Solution: tids are generated to be unique across the db, not just
# within an exp_key.
#
# -- mongo docs say you can't upsert an empty document
query = {'a': 0}
doc = None
while doc is None:
doc = db.job_ids.find_and_modify(
query,
{'$inc': {'last_id': N}},
upsert=True)
if doc is None:
logger.warning('no last_id found, re-trying')
time.sleep(1.0)
lid = doc.get('last_id', 0)
return list(range(lid, lid + N))
def trial_attachments(self, trial):
"""
Attachments to a single trial (e.g. learned weights)
Returns a dictionary interface to the attachments.
"""
# don't offer more here than in MongoCtrl
class Attachments(object):
def __contains__(_self, name):
return name in self.handle.attachment_names(doc=trial)
def __len__(_self):
return len(self.handle.attachment_names(doc=trial))
def __iter__(_self):
return iter(self.handle.attachment_names(doc=trial))
def __getitem__(_self, name):
try:
return self.handle.get_attachment(
doc=trial,
name=name)
except OperationFailure:
raise KeyError(name)
def __setitem__(_self, name, value):
self.handle.set_attachment(
doc=trial,
blob=value,
name=name,
collection=self.handle.db.jobs)
def __delitem__(_self, name):
raise NotImplementedError('delete trial_attachment')
def keys(self):
return [k for k in self]
def values(self):
return [self[k] for k in self]
def items(self):
return [(k, self[k]) for k in self]
return Attachments()
@property
def attachments(self):
"""
Attachments to a Trials set (such as bandit args).
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
gfs = self.handle.gfs
query = {}
if self._exp_key:
query['exp_key'] = self._exp_key
class Attachments(object):
def __iter__(_self):
if query:
# -- gfs.list does not accept query kwargs
# (at least, as of pymongo 2.4)
filenames = [fname
for fname in gfs.list()
if fname in _self]
else:
filenames = gfs.list()
return iter(filenames)
def __contains__(_self, name):
return gfs.exists(filename=name, **query)
def __getitem__(_self, name):
try:
rval = gfs.get_version(filename=name, **query).read()
return rval
except gridfs.NoFile:
raise KeyError(name)
def __setitem__(_self, name, value):
if gfs.exists(filename=name, **query):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
gfs.put(value, filename=name, encoding='utf-8', **query)
def __delitem__(_self, name):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
return Attachments()
class MongoWorker(object):
poll_interval = 3.0 # -- seconds
workdir = None
def __init__(self, mj,
poll_interval=poll_interval,
workdir=workdir,
exp_key=None,
logfilename='logfile.txt',
):
"""
mj - MongoJobs interface to jobs collection
poll_interval - seconds
workdir - string
exp_key - restrict reservations to this key
"""
self.mj = mj
self.poll_interval = poll_interval
self.workdir = workdir
self.exp_key = exp_key
self.logfilename = logfilename
def make_log_handler(self):
self.log_handler = logging.FileHandler(self.logfilename)
self.log_handler.setFormatter(
logging.Formatter(
fmt='%(levelname)s (%(name)s): %(message)s'))
self.log_handler.setLevel(logging.INFO)
def run_one(self,
host_id=None,
reserve_timeout=None,
erase_created_workdir=False,
):
if host_id == None:
host_id = '%s:%i' % (socket.gethostname(), os.getpid()),
job = None
start_time = time.time()
mj = self.mj
while job is None:
if (reserve_timeout and
(time.time() - start_time) > reserve_timeout):
raise ReserveTimeout()
job = mj.reserve(host_id, exp_key=self.exp_key)
if not job:
interval = (1 +
numpy.random.rand() *
(float(self.poll_interval) - 1.0))
logger.info('no job found, sleeping for %.1fs' % interval)
time.sleep(interval)
logger.debug('job found: %s' % str(job))
# -- don't let the cmd mess up our trial object
spec = spec_from_misc(job['misc'])
ctrl = MongoCtrl(
trials=MongoTrials(mj, exp_key=job['exp_key'], refresh=False),
read_only=False,
current_trial=job)
if self.workdir is None:
workdir = job['misc'].get('workdir', os.getcwd())
if workdir is None:
workdir = ''
workdir = os.path.join(workdir, str(job['_id']))
else:
workdir = self.workdir
workdir = os.path.abspath(os.path.expanduser(workdir))
try:
root_logger = logging.getLogger()
if self.logfilename:
self.make_log_handler()
root_logger.addHandler(self.log_handler)
cmd = job['misc']['cmd']
cmd_protocol = cmd[0]
try:
if cmd_protocol == 'cpickled fn':
worker_fn = pickler.loads(cmd[1])
elif cmd_protocol == 'call evaluate':
bandit = pickler.loads(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'token_load':
cmd_toks = cmd[1].split('.')
cmd_module = '.'.join(cmd_toks[:-1])
worker_fn = exec_import(cmd_module, cmd[1])
elif cmd_protocol == 'bandit_json evaluate':
bandit = json_call(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'driver_attachment':
# name = 'driver_attachment_%s' % job['exp_key']
blob = ctrl.trials.attachments[cmd[1]]
bandit_name, bandit_args, bandit_kwargs = pickler.loads(blob)
worker_fn = json_call(bandit_name,
args=bandit_args,
kwargs=bandit_kwargs).evaluate
elif cmd_protocol == 'domain_attachment':
blob = ctrl.trials.attachments[cmd[1]]
try:
domain = pickler.loads(blob)
except BaseException as e:
logger.info(
'Error while unpickling.')
raise
worker_fn = domain.evaluate
else:
raise ValueError('Unrecognized cmd protocol', cmd_protocol)
with temp_dir(workdir, erase_created_workdir), working_dir(workdir):
result = worker_fn(spec, ctrl)
result = SONify(result)
except BaseException as e:
# XXX: save exception to database, but if this fails, then
# at least raise the original traceback properly
logger.info('job exception: %s' % str(e))
ctrl.checkpoint()
mj.update(job,
{'state': JOB_STATE_ERROR,
'error': (str(type(e)), str(e))})
raise
finally:
if self.logfilename:
root_logger.removeHandler(self.log_handler)
logger.info('job finished: %s' % str(job['_id']))
attachments = result.pop('attachments', {})
for aname, aval in list(attachments.items()):
logger.info(
'mongoexp: saving attachment name=%s (%i bytes)' % (
aname, len(aval)))
ctrl.attachments[aname] = aval
ctrl.checkpoint(result)
mj.update(job, {'state': JOB_STATE_DONE})
class MongoCtrl(Ctrl):
"""
Attributes:
current_trial - current job document
jobs - MongoJobs object in which current_trial resides
read_only - True means don't change the db
"""
def __init__(self, trials, current_trial, read_only):
self.trials = trials
self.current_trial = current_trial
self.read_only = read_only
def debug(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.warn(*args, **kwargs)
def error(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.error(*args, **kwargs)
def checkpoint(self, result=None):
if not self.read_only:
handle = self.trials.handle
handle.refresh(self.current_trial)
if result is not None:
return handle.update(self.current_trial, dict(result=result))
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
@property
def set_attachment(self):
# XXX: Is there a better deprecation error?
raise RuntimeError(
'set_attachment deprecated. Use `self.attachments[name] = value`')
def exec_import(cmd_module, cmd):
worker_fn = None
exec('import %s; worker_fn = %s' % (cmd_module, cmd))
return worker_fn
def as_mongo_str(s):
if s.startswith('mongo://'):
return s
else:
return 'mongo://%s' % s
def main_worker_helper(options, args):
N = int(options.max_jobs)
if options.last_job_timeout is not None:
last_job_timeout = time.time() + float(options.last_job_timeout)
else:
last_job_timeout = None
def sighandler_shutdown(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise Shutdown(signum)
def sighandler_wait_quit(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise WaitQuit(signum)
is_windows = os.name == 'nt'
if not is_windows:
signal.signal(signal.SIGHUP, sighandler_shutdown)
signal.signal(signal.SIGUSR1, sighandler_wait_quit)
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
if N > 1:
proc = None
cons_errs = 0
if last_job_timeout and time.time() > last_job_timeout:
logger.info("Exiting due to last_job_timeout")
return
while N and cons_errs < int(options.max_consecutive_failures):
try:
# recursive Popen, dropping N from the argv
# By using another process to run this job
# we protect ourselves from memory leaks, bad cleanup
# and other annoying details.
# The tradeoff is that a large dataset must be reloaded once for
# each subprocess.
sub_argv = [sys.argv[0],
'--poll-interval=%s' % options.poll_interval,
'--max-jobs=1',
'--mongo=%s' % options.mongo,
'--reserve-timeout=%s' % options.reserve_timeout]
if options.workdir is not None:
sub_argv.append('--workdir=%s' % options.workdir)
if options.exp_key is not None:
sub_argv.append('--exp-key=%s' % options.exp_key)
proc = subprocess.Popen(sub_argv)
retcode = proc.wait()
proc = None
except Shutdown:
# this is the normal way to stop the infinite loop (if originally N=-1)
if proc:
# proc.terminate() is only available as of 2.6
os.kill(proc.pid, signal.CTRL_C_EVENT if is_windows else signal.SIGTERM)
return proc.wait()
else:
return 0
except WaitQuit:
# -- sending SIGUSR1 to a looping process will cause it to
# break out of the loop after the current subprocess finishes
# normally.
if proc:
return proc.wait()
else:
return 0
if retcode != 0:
cons_errs += 1
else:
cons_errs = 0
N -= 1
logger.info("exiting with N=%i after %i consecutive exceptions" % (
N, cons_errs))
elif N == 1:
# XXX: the name of the jobs collection is a parameter elsewhere,
# so '/jobs' should not be hard-coded here
mj = MongoJobs.new_from_connection_str(
as_mongo_str(options.mongo) + '/jobs')
mworker = MongoWorker(mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
else:
raise ValueError("N <= 0")
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
sys.exit(main_worker())
def main_worker():
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--exp-key",
dest='exp_key',
default=None,
metavar='str',
help="identifier for this workers's jobs")
parser.add_option("--last-job-timeout",
dest='last_job_timeout',
metavar='T',
default=None,
help="Do not reserve a job after T seconds have passed")
parser.add_option("--max-consecutive-failures",
dest="max_consecutive_failures",
metavar='N',
default=4,
help="stop if N consecutive jobs fail (default: 4)")
parser.add_option("--max-jobs",
dest='max_jobs',
default=sys.maxsize,
help="stop after running this many jobs (default: inf)")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--poll-interval",
dest='poll_interval',
metavar='N',
default=5,
help="check work queue every 1 < T < N seconds (default: 5")
parser.add_option("--reserve-timeout",
dest='reserve_timeout',
metavar='T',
default=120.0,
help="poll database for up to T seconds to reserve a job")
parser.add_option("--workdir",
dest="workdir",
default=None,
help="root workdir (default: load from mongo)",
metavar="DIR")
(options, args) = parser.parse_args()
if args:
parser.print_help()
return -1
return main_worker_helper(options, args)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
azure/src/test/java/com/microsoft/azure/management/TestKubernetesCluster.java
|
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*/
package com.microsoft.azure.management;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
import com.microsoft.azure.management.containerservice.ContainerServiceVMSizeTypes;
import com.microsoft.azure.management.containerservice.KubernetesCluster;
import com.microsoft.azure.management.containerservice.KubernetesClusters;
import com.microsoft.azure.management.containerservice.KubernetesVersion;
import com.microsoft.azure.management.resources.fluentcore.arm.Region;
import com.microsoft.rest.serializer.JacksonAdapter;
import org.apache.commons.codec.binary.Base64;
import org.junit.Assert;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.interfaces.RSAPublicKey;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Properties;
import java.util.Set;
public class TestKubernetesCluster extends TestTemplate<KubernetesCluster, KubernetesClusters> {
@Override
public KubernetesCluster createResource(KubernetesClusters kubernetesClusters) throws Exception {
final String sshKeyData = this.getSshKey();
final String newName = "aks" + this.testId;
final String dnsPrefix = "dns" + newName;
final String agentPoolName = "ap" + newName;
String clientId = "clientId";
String secret = "secret";
// aks can use another azure auth rather than original client auth to access azure service.
// Thus, set it to AZURE_AUTH_LOCATION_2 when you want.
String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2");
if (envSecondaryServicePrincipal == null || envSecondaryServicePrincipal.isEmpty() || !(new File(envSecondaryServicePrincipal).exists())) {
envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION");
}
try {
HashMap<String, String> credentialsMap = ParseAuthFile(envSecondaryServicePrincipal);
clientId = credentialsMap.get("clientId");
secret = credentialsMap.get("clientSecret");
} catch (Exception e) {}
KubernetesCluster resource = kubernetesClusters.define(newName)
.withRegion(Region.US_EAST)
.withNewResourceGroup()
.withLatestVersion()
.withRootUsername("aksadmin")
.withSshKey(sshKeyData)
.withServicePrincipalClientId(clientId)
.withServicePrincipalSecret(secret)
.defineAgentPool(agentPoolName)
.withVirtualMachineSize(ContainerServiceVMSizeTypes.STANDARD_D2_V2)
.withAgentPoolVirtualMachineCount(1)
.attach()
.withDnsPrefix(dnsPrefix)
.withTag("tag1", "value1")
.create();
Assert.assertNotNull("Container service not found.", resource.id());
Assert.assertEquals(Region.US_EAST, resource.region());
Assert.assertEquals("aksadmin", resource.linuxRootUsername());
Assert.assertEquals(1, resource.agentPools().size());
Assert.assertNotNull(resource.agentPools().get(agentPoolName));
Assert.assertEquals(1, resource.agentPools().get(agentPoolName).count());
Assert.assertEquals(ContainerServiceVMSizeTypes.STANDARD_D2_V2, resource.agentPools().get(agentPoolName).vmSize());
Assert.assertTrue(resource.tags().containsKey("tag1"));
resource = kubernetesClusters.getByResourceGroup(resource.resourceGroupName(), newName);
byte[] kubeConfigAdmin = resource.adminKubeConfigContent();
Assert.assertTrue(kubeConfigAdmin != null && kubeConfigAdmin.length > 0);
byte[] kubeConfigUser = resource.userKubeConfigContent();
Assert.assertTrue(kubeConfigUser != null && kubeConfigUser.length > 0);
return resource;
}
@Override
public KubernetesCluster updateResource(KubernetesCluster resource) throws Exception {
String agentPoolName = new ArrayList<>(resource.agentPools().keySet()).get(0);
// Modify existing container service
resource = resource.update()
.withAgentPoolVirtualMachineCount(agentPoolName, 5)
.withTag("tag2", "value2")
.withTag("tag3", "value3")
.withoutTag("tag1")
.apply();
Assert.assertEquals(1, resource.agentPools().size());
Assert.assertTrue("Agent pool count was not updated.", resource.agentPools().get(agentPoolName).count() == 5);
Assert.assertTrue(resource.tags().containsKey("tag2"));
Assert.assertTrue(!resource.tags().containsKey("tag1"));
return resource;
}
@Override
public void print(KubernetesCluster resource) {
System.out.println(new StringBuilder().append("Container Service: ").append(resource.id())
.append("Name: ").append(resource.name())
.append("\n\tResource group: ").append(resource.resourceGroupName())
.append("\n\tRegion: ").append(resource.region())
.append("\n\tTags: ").append(resource.tags())
.toString());
}
private String getSshKey() throws Exception {
KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
keyPairGenerator.initialize(2048);
KeyPair keyPair=keyPairGenerator.generateKeyPair();
RSAPublicKey publicKey=(RSAPublicKey)keyPair.getPublic();
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes().length);
dos.write("ssh-rsa".getBytes());
dos.writeInt(publicKey.getPublicExponent().toByteArray().length);
dos.write(publicKey.getPublicExponent().toByteArray());
dos.writeInt(publicKey.getModulus().toByteArray().length);
dos.write(publicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(
Base64.encodeBase64(byteOs.toByteArray()));
return "ssh-rsa " + publicKeyEncoded + " ";
}
/**
* Parse azure auth to hashmap
* @param authFilename the azure auth location
* @return all fields in azure auth json
* @throws Exception exception
*/
private static HashMap<String, String> ParseAuthFile(String authFilename) throws Exception {
String content = Files.toString(new File(authFilename), Charsets.UTF_8).trim();
HashMap<String, String> auth = new HashMap<>();
if (isJsonBased(content)) {
auth = new JacksonAdapter().deserialize(content, auth.getClass());
} else {
Properties authSettings = new Properties();
FileInputStream credentialsFileStream = new FileInputStream(new File(authFilename));
authSettings.load(credentialsFileStream);
credentialsFileStream.close();
for (final String authName: authSettings.stringPropertyNames()) {
auth.put(authName, authSettings.getProperty(authName));
}
}
return auth;
}
private static boolean isJsonBased(String content) {
return content.startsWith("{");
}
}
|
[
"\"AZURE_AUTH_LOCATION_2\"",
"\"AZURE_AUTH_LOCATION\""
] |
[] |
[
"AZURE_AUTH_LOCATION_2",
"AZURE_AUTH_LOCATION"
] |
[]
|
["AZURE_AUTH_LOCATION_2", "AZURE_AUTH_LOCATION"]
|
java
| 2 | 0 | |
hood/wsgi.py
|
"""
WSGI config for hood project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hood.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/utils/utils.go
|
package utils
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"runtime"
"strings"
"time"
"github.com/containers/storage/pkg/parsers/kernel"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
var (
DefaultWaitTimeout = 90
OSReleasePath = "/etc/os-release"
ProcessOneCgroupPath = "/proc/1/cgroup"
)
// PodmanTestCommon contains common functions will be updated later in
// the inheritance structs
type PodmanTestCommon interface {
MakeOptions(args []string, noEvents, noCache bool) []string
WaitForContainer() bool
WaitContainerReady(id string, expStr string, timeout int, step int) bool
}
// PodmanTest struct for command line options
type PodmanTest struct {
PodmanMakeOptions func(args []string, noEvents, noCache bool) []string
PodmanBinary string
ArtifactPath string
TempDir string
RemoteTest bool
RemotePodmanBinary string
RemoteSession *os.Process
RemoteSocket string
RemoteCommand *exec.Cmd
ImageCacheDir string
ImageCacheFS string
}
// PodmanSession wraps the gexec.session so we can extend it
type PodmanSession struct {
*Session
}
// HostOS is a simple struct for the test os
type HostOS struct {
Distribution string
Version string
Arch string
}
// MakeOptions assembles all podman options
func (p *PodmanTest) MakeOptions(args []string, noEvents, noCache bool) []string {
return p.PodmanMakeOptions(args, noEvents, noCache)
}
// PodmanAsUserBase exec podman as user. uid and gid is set for credentials usage. env is used
// to record the env for debugging
func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, cwd string, env []string, noEvents, noCache bool, wrapper []string, extraFiles []*os.File) *PodmanSession {
var command *exec.Cmd
podmanOptions := p.MakeOptions(args, noEvents, noCache)
podmanBinary := p.PodmanBinary
if p.RemoteTest {
podmanBinary = p.RemotePodmanBinary
}
runCmd := append(wrapper, podmanBinary)
if p.RemoteTest {
podmanOptions = append([]string{"--remote", "--url", p.RemoteSocket}, podmanOptions...)
}
if env == nil {
fmt.Printf("Running: %s %s\n", strings.Join(runCmd, " "), strings.Join(podmanOptions, " "))
} else {
fmt.Printf("Running: (env: %v) %s %s\n", env, strings.Join(runCmd, " "), strings.Join(podmanOptions, " "))
}
if uid != 0 || gid != 0 {
pythonCmd := fmt.Sprintf("import os; import sys; uid = %d; gid = %d; cwd = '%s'; os.setgid(gid); os.setuid(uid); os.chdir(cwd) if len(cwd)>0 else True; os.execv(sys.argv[1], sys.argv[1:])", gid, uid, cwd)
runCmd = append(runCmd, podmanOptions...)
nsEnterOpts := append([]string{"-c", pythonCmd}, runCmd...)
command = exec.Command("python", nsEnterOpts...)
} else {
runCmd = append(runCmd, podmanOptions...)
command = exec.Command(runCmd[0], runCmd[1:]...)
}
if env != nil {
command.Env = env
}
if cwd != "" {
command.Dir = cwd
}
command.ExtraFiles = extraFiles
session, err := Start(command, GinkgoWriter, GinkgoWriter)
if err != nil {
Fail(fmt.Sprintf("unable to run podman command: %s\n%v", strings.Join(podmanOptions, " "), err))
}
return &PodmanSession{session}
}
// PodmanBase exec podman with default env.
func (p *PodmanTest) PodmanBase(args []string, noEvents, noCache bool) *PodmanSession {
return p.PodmanAsUserBase(args, 0, 0, "", nil, noEvents, noCache, nil, nil)
}
// WaitForContainer waits on a started container
func (p *PodmanTest) WaitForContainer() bool {
for i := 0; i < 10; i++ {
if p.NumberOfContainersRunning() > 0 {
return true
}
time.Sleep(1 * time.Second)
}
fmt.Printf("WaitForContainer(): timed out\n")
return false
}
// NumberOfContainersRunning returns an int of how many
// containers are currently running.
func (p *PodmanTest) NumberOfContainersRunning() int {
var containers []string
ps := p.PodmanBase([]string{"ps", "-q"}, false, true)
ps.WaitWithDefaultTimeout()
Expect(ps).Should(Exit(0))
for _, i := range ps.OutputToStringArray() {
if i != "" {
containers = append(containers, i)
}
}
return len(containers)
}
// NumberOfContainers returns an int of how many
// containers are currently defined.
func (p *PodmanTest) NumberOfContainers() int {
var containers []string
ps := p.PodmanBase([]string{"ps", "-aq"}, false, true)
ps.WaitWithDefaultTimeout()
Expect(ps.ExitCode()).To(Equal(0))
for _, i := range ps.OutputToStringArray() {
if i != "" {
containers = append(containers, i)
}
}
return len(containers)
}
// NumberOfPods returns an int of how many
// pods are currently defined.
func (p *PodmanTest) NumberOfPods() int {
var pods []string
ps := p.PodmanBase([]string{"pod", "ps", "-q"}, false, true)
ps.WaitWithDefaultTimeout()
Expect(ps.ExitCode()).To(Equal(0))
for _, i := range ps.OutputToStringArray() {
if i != "" {
pods = append(pods, i)
}
}
return len(pods)
}
// GetContainerStatus returns the containers state.
// This function assumes only one container is active.
func (p *PodmanTest) GetContainerStatus() string {
var podmanArgs = []string{"ps"}
podmanArgs = append(podmanArgs, "--all", "--format={{.Status}}")
session := p.PodmanBase(podmanArgs, false, true)
session.WaitWithDefaultTimeout()
return session.OutputToString()
}
// WaitContainerReady waits process or service inside container start, and ready to be used.
func (p *PodmanTest) WaitContainerReady(id string, expStr string, timeout int, step int) bool {
startTime := time.Now()
s := p.PodmanBase([]string{"logs", id}, false, true)
s.WaitWithDefaultTimeout()
for {
if time.Since(startTime) >= time.Duration(timeout)*time.Second {
fmt.Printf("Container %s is not ready in %ds", id, timeout)
return false
}
if strings.Contains(s.OutputToString(), expStr) {
return true
}
time.Sleep(time.Duration(step) * time.Second)
s = p.PodmanBase([]string{"logs", id}, false, true)
s.WaitWithDefaultTimeout()
}
}
// WaitForContainer is a wrapper function for accept inheritance PodmanTest struct.
func WaitForContainer(p PodmanTestCommon) bool {
return p.WaitForContainer()
}
// WaitForContainerReady is a wrapper function for accept inheritance PodmanTest struct.
func WaitContainerReady(p PodmanTestCommon, id string, expStr string, timeout int, step int) bool {
return p.WaitContainerReady(id, expStr, timeout, step)
}
// OutputToString formats session output to string
func (s *PodmanSession) OutputToString() string {
if s == nil || s.Out == nil || s.Out.Contents() == nil {
return ""
}
fields := strings.Fields(string(s.Out.Contents()))
return strings.Join(fields, " ")
}
// OutputToStringArray returns the output as a []string
// where each array item is a line split by newline
func (s *PodmanSession) OutputToStringArray() []string {
var results []string
output := string(s.Out.Contents())
for _, line := range strings.Split(output, "\n") {
if line != "" {
results = append(results, line)
}
}
return results
}
// ErrorToString formats session stderr to string
func (s *PodmanSession) ErrorToString() string {
fields := strings.Fields(string(s.Err.Contents()))
return strings.Join(fields, " ")
}
// ErrorToStringArray returns the stderr output as a []string
// where each array item is a line split by newline
func (s *PodmanSession) ErrorToStringArray() []string {
output := string(s.Err.Contents())
return strings.Split(output, "\n")
}
// GrepString takes session output and behaves like grep. it returns a bool
// if successful and an array of strings on positive matches
func (s *PodmanSession) GrepString(term string) (bool, []string) {
var (
greps []string
matches bool
)
for _, line := range s.OutputToStringArray() {
if strings.Contains(line, term) {
matches = true
greps = append(greps, line)
}
}
return matches, greps
}
// ErrorGrepString takes session stderr output and behaves like grep. it returns a bool
// if successful and an array of strings on positive matches
func (s *PodmanSession) ErrorGrepString(term string) (bool, []string) {
var (
greps []string
matches bool
)
for _, line := range s.ErrorToStringArray() {
if strings.Contains(line, term) {
matches = true
greps = append(greps, line)
}
}
return matches, greps
}
// LineInOutputStartsWith returns true if a line in a
// session output starts with the supplied string
func (s *PodmanSession) LineInOutputStartsWith(term string) bool {
for _, i := range s.OutputToStringArray() {
if strings.HasPrefix(i, term) {
return true
}
}
return false
}
// LineInOutputContains returns true if a line in a
// session output contains the supplied string
func (s *PodmanSession) LineInOutputContains(term string) bool {
for _, i := range s.OutputToStringArray() {
if strings.Contains(i, term) {
return true
}
}
return false
}
// LineInOutputContainsTag returns true if a line in the
// session's output contains the repo-tag pair as returned
// by podman-images(1).
func (s *PodmanSession) LineInOutputContainsTag(repo, tag string) bool {
tagMap := tagOutputToMap(s.OutputToStringArray())
return tagMap[repo][tag]
}
// IsJSONOutputValid attempts to unmarshal the session buffer
// and if successful, returns true, else false
func (s *PodmanSession) IsJSONOutputValid() bool {
var i interface{}
if err := json.Unmarshal(s.Out.Contents(), &i); err != nil {
fmt.Println(err)
return false
}
return true
}
// WaitWithDefaultTimeout waits for process finished with DefaultWaitTimeout
func (s *PodmanSession) WaitWithDefaultTimeout() {
s.WaitWithTimeout(DefaultWaitTimeout)
}
// WaitWithTimeout waits for process finished with DefaultWaitTimeout
func (s *PodmanSession) WaitWithTimeout(timeout int) {
Eventually(s, timeout).Should(Exit())
os.Stdout.Sync()
os.Stderr.Sync()
fmt.Println("output:", s.OutputToString())
}
// CreateTempDirInTempDir create a temp dir with prefix podman_test
func CreateTempDirInTempDir() (string, error) {
return ioutil.TempDir("", "podman_test")
}
// SystemExec is used to exec a system command to check its exit code or output
func SystemExec(command string, args []string) *PodmanSession {
c := exec.Command(command, args...)
session, err := Start(c, GinkgoWriter, GinkgoWriter)
if err != nil {
Fail(fmt.Sprintf("unable to run command: %s %s", command, strings.Join(args, " ")))
}
session.Wait(DefaultWaitTimeout)
return &PodmanSession{session}
}
// StartSystemExec is used to start exec a system command
func StartSystemExec(command string, args []string) *PodmanSession {
c := exec.Command(command, args...)
session, err := Start(c, GinkgoWriter, GinkgoWriter)
if err != nil {
Fail(fmt.Sprintf("unable to run command: %s %s", command, strings.Join(args, " ")))
}
return &PodmanSession{session}
}
// StringInSlice determines if a string is in a string slice, returns bool
func StringInSlice(s string, sl []string) bool {
for _, i := range sl {
if i == s {
return true
}
}
return false
}
// tagOutPutToMap parses each string in imagesOutput and returns
// a map whose key is a repo, and value is another map whose keys
// are the tags found for that repo. Notice, the first array item will
// be skipped as it's considered to be the header.
func tagOutputToMap(imagesOutput []string) map[string]map[string]bool {
m := make(map[string]map[string]bool)
// iterate over output but skip the header
for _, i := range imagesOutput[1:] {
tmp := []string{}
for _, x := range strings.Split(i, " ") {
if x != "" {
tmp = append(tmp, x)
}
}
// podman-images(1) return a list like output
// in the format of "Repository Tag [...]"
if len(tmp) < 2 {
continue
}
if m[tmp[0]] == nil {
m[tmp[0]] = map[string]bool{}
}
m[tmp[0]][tmp[1]] = true
}
return m
}
// GetHostDistributionInfo returns a struct with its distribution Name and version
func GetHostDistributionInfo() HostOS {
f, err := os.Open(OSReleasePath)
defer f.Close()
if err != nil {
return HostOS{}
}
l := bufio.NewScanner(f)
host := HostOS{}
host.Arch = runtime.GOARCH
for l.Scan() {
if strings.HasPrefix(l.Text(), "ID=") {
host.Distribution = strings.Replace(strings.TrimSpace(strings.Join(strings.Split(l.Text(), "=")[1:], "")), "\"", "", -1)
}
if strings.HasPrefix(l.Text(), "VERSION_ID=") {
host.Version = strings.Replace(strings.TrimSpace(strings.Join(strings.Split(l.Text(), "=")[1:], "")), "\"", "", -1)
}
}
return host
}
// IsKernelNewerThan compares the current kernel version to one provided. If
// the kernel is equal to or greater, returns true
func IsKernelNewerThan(version string) (bool, error) {
inputVersion, err := kernel.ParseRelease(version)
if err != nil {
return false, err
}
kv, err := kernel.GetKernelVersion()
if err != nil {
return false, err
}
// CompareKernelVersion compares two kernel.VersionInfo structs.
// Returns -1 if a < b, 0 if a == b, 1 it a > b
result := kernel.CompareKernelVersion(*kv, *inputVersion)
if result >= 0 {
return true, nil
}
return false, nil
}
// IsCommandAvailable check if command exist
func IsCommandAvailable(command string) bool {
check := exec.Command("bash", "-c", strings.Join([]string{"command -v", command}, " "))
err := check.Run()
if err != nil {
return false
}
return true
}
// WriteJsonFile write json format data to a json file
func WriteJsonFile(data []byte, filePath string) error {
var jsonData map[string]interface{}
json.Unmarshal(data, &jsonData)
formatJson, _ := json.MarshalIndent(jsonData, "", " ")
return ioutil.WriteFile(filePath, formatJson, 0644)
}
// Containerized check the podman command run inside container
func Containerized() bool {
container := os.Getenv("container")
if container != "" {
return true
}
b, err := ioutil.ReadFile(ProcessOneCgroupPath)
if err != nil {
// shrug, if we cannot read that file, return false
return false
}
if strings.Index(string(b), "docker") > -1 {
return true
}
return false
}
func init() {
rand.Seed(GinkgoRandomSeed())
}
var randomLetters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
// RandomString returns a string of given length composed of random characters
func RandomString(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = randomLetters[rand.Intn(len(randomLetters))]
}
return string(b)
}
|
[
"\"container\""
] |
[] |
[
"container"
] |
[]
|
["container"]
|
go
| 1 | 0 | |
repository/coin.go
|
package repository
import (
"github.com/MinterTeam/explorer-genesis-uploader/domain"
"github.com/go-pg/pg/v10"
"os"
"sync"
)
type Coin struct {
cache *sync.Map
invCache *sync.Map
db *pg.DB
}
func NewCoinRepository(db *pg.DB) *Coin {
return &Coin{
cache: new(sync.Map),
invCache: new(sync.Map),
db: db,
}
}
func (r *Coin) SaveAll(coins []*domain.Coin) error {
_, err := r.db.Model(&coins).Insert()
for _, coin := range coins {
r.cache.Store(coin.Symbol, coin.ID)
r.invCache.Store(coin.ID, coin.Symbol)
}
return err
}
// Find coin id by symbol
func (r *Coin) FindIdBySymbol(symbol string) (uint64, error) {
//First look in the cache
id, ok := r.cache.Load(symbol)
if ok {
return id.(uint64), nil
}
coin := new(domain.Coin)
err := r.db.Model(coin).
Column("id").
Where("symbol = ?", symbol).
Select()
if err != nil {
return 0, err
}
return uint64(coin.ID), nil
}
// Find coin id by symbol
func (r *Coin) FindBySymbol(symbol string) (*domain.Coin, error) {
coin := new(domain.Coin)
err := r.db.Model(coin).
Where("symbol = ?", symbol).
Limit(1).
Select()
if err != nil {
return nil, err
}
return coin, nil
}
func (r *Coin) GetCoinsCount() (int, error) {
return r.db.Model((*domain.Coin)(nil)).Where("symbol != ?", os.Getenv("MINTER_BASE_COIN")).Count()
}
func (r *Coin) ChangeSequence(i int) error {
_, err := r.db.Model().Exec(`
alter sequence coins_id_seq START WITH ?;
`, i)
return err
}
|
[
"\"MINTER_BASE_COIN\""
] |
[] |
[
"MINTER_BASE_COIN"
] |
[]
|
["MINTER_BASE_COIN"]
|
go
| 1 | 0 | |
src/main/java/dev/xframe/utils/XProcess.java
|
package dev.xframe.utils;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.List;
public class XProcess {
public static List<String> jprocesses() throws IOException {
return execCmd(jpsCmd());
}
public static String jpsCmd() {
String jhome = System.getenv("JAVA_HOME");
if(!XStrings.isEmpty(jhome)) {
return String.format("%s/bin/jps", jhome);
}
return "jps";
}
public static List<String> execCmd(String cmd) throws IOException {
BufferedReader reader = null;
try {
Process proc = Runtime.getRuntime().exec(cmd);
reader = new BufferedReader(new InputStreamReader(proc.getInputStream()));
List<String> lines = new ArrayList<String>();
String line;
while ((line = reader.readLine()) != null) {
lines.add(line);
}
return lines;
} finally {
if (reader != null) reader.close();
}
}
public static boolean isProcessRunning(String pidfile) {
try {
String pid = new String(Files.readAllBytes(Paths.get(pidfile)));
List<String> processes = jprocesses();
for (String process : processes) {
if(process.startsWith(pid)) return true;
}
} catch (IOException e) {
//ignore
}
return false;
}
public static void writeProcessIdFile(String pidfile) {
try {
Files.write(Paths.get(pidfile), currentProcessId().getBytes(), StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE);
} catch (IOException e) {
//ignore
}
}
public static String currentProcessId() {
String name = ManagementFactory.getRuntimeMXBean().getName();
return name.substring(0, name.indexOf("@"));
}
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
java
| 1 | 0 | |
internal/config/config.go
|
package config
import (
"github.com/getsentry/sentry-go"
"github.com/spf13/viper"
"github.com/subosito/gotenv"
"os"
"time"
)
func InitMainConfig() {
setDefaultValuesForMainConfig()
// load config file
viper.SetConfigName(getMainConfigName()) // name of config file (without extension)
viper.SetConfigType("yaml")
viper.AddConfigPath(".")
_ = viper.ReadInConfig()
// Load env variables from .env
gotenv.Load()
if os.Getenv("ENV") != "test" {
setupSentry()
}
}
func setDefaultValuesForMainConfig() {
viper.SetDefault("stats.interval", 3600)
viper.SetDefault("stats.blocks.interval", 900)
viper.SetDefault("stats.uptime.interval", 3600)
viper.SetDefault("stats.balance.interval", 900)
viper.SetDefault("jsonrpc.lotus-miner.url", "http://localhost:2345/rpc/v0")
viper.SetDefault("jsonrpc.lotus-miner.token", "")
viper.SetDefault("jsonrpc.lotus-node.url", "http://localhost:1234/rpc/v0")
viper.SetDefault("jsonrpc.lotus-node.token", "")
viper.SetDefault("hactar.api-url", "https://api.hactar.app/api")
viper.SetDefault("log.level", "error")
viper.SetDefault("lotus.network-address", "t01")
}
// depending on ENV variable creates name for config file
func getMainConfigName() string {
configFileName := "config"
if env := os.Getenv("ENV"); env != "" {
configFileName = configFileName + "-" + env
}
return configFileName
}
func setupSentry() {
dsn := os.Getenv("SENTRY_DSN")
sentry.Init(sentry.ClientOptions{
Dsn: dsn,
Debug: false,
})
// Flush buffered events before the program terminates.
defer sentry.Flush(2 * time.Second)
}
|
[
"\"ENV\"",
"\"ENV\"",
"\"SENTRY_DSN\""
] |
[] |
[
"ENV",
"SENTRY_DSN"
] |
[]
|
["ENV", "SENTRY_DSN"]
|
go
| 2 | 0 | |
tools/build_board_info.py
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors)
#
# SPDX-License-Identifier: MIT
import json
import os
import subprocess
import sys
import sh
import base64
from datetime import date
from sh.contrib import git
sys.path.append("../docs")
import shared_bindings_matrix
sys.path.append("adabot")
import adabot.github_requests as github
SUPPORTED_PORTS = [
"atmel-samd",
"cxd56",
"esp32s2",
"litex",
"mimxrt10xx",
"nrf",
"raspberrypi",
"stm",
]
BIN = ("bin",)
UF2 = ("uf2",)
BIN_UF2 = ("bin", "uf2")
HEX = ("hex",)
HEX_UF2 = ("hex", "uf2")
SPK = ("spk",)
DFU = ("dfu",)
BIN_DFU = ("bin", "dfu")
# Default extensions
extension_by_port = {
"nrf": UF2,
"atmel-samd": UF2,
"stm": BIN,
"cxd56": SPK,
"mimxrt10xx": HEX_UF2,
"litex": DFU,
"esp32s2": BIN_UF2,
"raspberrypi": UF2,
}
# Per board overrides
extension_by_board = {
# samd
"arduino_mkr1300": BIN_UF2,
"arduino_mkrzero": BIN_UF2,
"arduino_nano_33_iot": BIN_UF2,
"arduino_zero": BIN_UF2,
"feather_m0_adalogger": BIN_UF2,
"feather_m0_basic": BIN_UF2,
"feather_m0_rfm69": BIN_UF2,
"feather_m0_rfm9x": BIN_UF2,
"uchip": BIN_UF2,
# nRF52840 dev kits that may not have UF2 bootloaders,
"makerdiary_nrf52840_mdk": HEX,
"makerdiary_nrf52840_mdk_usb_dongle": HEX_UF2,
"pca10056": BIN_UF2,
"pca10059": BIN_UF2,
"electronut_labs_blip": HEX,
# stm32
"meowbit_v121": UF2,
}
aliases_by_board = {
"circuitplayground_express": [
"circuitplayground_express_4h",
"circuitplayground_express_digikey_pycon2019",
],
"pybadge": ["edgebadge"],
"pyportal": ["pyportal_pynt"],
"gemma_m0": ["gemma_m0_pycon2018"],
"pewpew10": ["pewpew13"],
}
language_allow_list = set([
"ID",
"de_DE",
"en_US",
"en_x_pirate",
"es",
"fil",
"fr",
"it_IT",
"ja",
"nl",
"pl",
"pt_BR",
"sv",
"zh_Latn_pinyin",
])
def get_languages(list_all = False):
languages = set()
for f in os.scandir("../locale"):
if f.name.endswith(".po"):
languages.add(f.name[:-3])
if not list_all:
languages = languages & language_allow_list
return sorted(list(languages), key=str.casefold)
def get_board_mapping():
boards = {}
for port in SUPPORTED_PORTS:
board_path = os.path.join("../ports", port, "boards")
for board_path in os.scandir(board_path):
if board_path.is_dir():
board_files = os.listdir(board_path.path)
board_id = board_path.name
extensions = extension_by_port[port]
extensions = extension_by_board.get(board_path.name, extensions)
aliases = aliases_by_board.get(board_path.name, [])
boards[board_id] = {
"port": port,
"extensions": extensions,
"download_count": 0,
"aliases": aliases,
}
for alias in aliases:
boards[alias] = {
"port": port,
"extensions": extensions,
"download_count": 0,
"alias": True,
"aliases": [],
}
return boards
def get_version_info():
version = None
sha = git("rev-parse", "--short", "HEAD").stdout.decode("utf-8")
try:
version = git("describe", "--tags", "--exact-match").stdout.decode("utf-8").strip()
except sh.ErrorReturnCode_128:
# No exact match
pass
if "GITHUB_SHA" in os.environ:
sha = os.environ["GITHUB_SHA"]
if not version:
version = "{}-{}".format(date.today().strftime("%Y%m%d"), sha[:7])
return sha, version
def get_current_info():
response = github.get("/repos/adafruit/circuitpython-org/git/refs/heads/master")
if not response.ok:
print(response.text)
raise RuntimeError("cannot get master sha")
commit_sha = response.json()["object"]["sha"]
response = github.get(
"/repos/adafruit/circuitpython-org/contents/_data/files.json?ref=" + commit_sha
)
if not response.ok:
print(response.text)
raise RuntimeError("cannot get previous files.json")
response = response.json()
git_info = commit_sha, response["sha"]
current_list = json.loads(base64.b64decode(response["content"]).decode("utf-8"))
current_info = {}
for info in current_list:
current_info[info["id"]] = info
return git_info, current_info
def create_json(updated):
# Convert the dictionary to a list of boards. Liquid templates only handle arrays.
updated_list = []
all_ids = sorted(updated.keys())
for id in all_ids:
info = updated[id]
info["id"] = id
updated_list.append(info)
return json.dumps(updated_list, sort_keys=True, indent=1).encode("utf-8") + b"\n"
def create_pr(changes, updated, git_info, user):
commit_sha, original_blob_sha = git_info
branch_name = "new_release_" + changes["new_release"]
updated = create_json(updated)
# print(updated.decode("utf-8"))
pr_title = "Automated website update for release {}".format(changes["new_release"])
boards = ""
if changes["new_boards"]:
boards = "New boards:\n* " + "\n* ".join(changes["new_boards"])
languages = ""
if changes["new_languages"]:
languages = "New languages:\n* " + "\n* ".join(changes["new_languages"])
message = "Automated website update for release {} by Blinka.\n\n{}\n\n{}\n".format(
changes["new_release"], boards, languages
)
create_branch = {"ref": "refs/heads/" + branch_name, "sha": commit_sha}
response = github.post("/repos/{}/circuitpython-org/git/refs".format(user), json=create_branch)
if not response.ok and response.json()["message"] != "Reference already exists":
print("unable to create branch")
print(response.text)
return
update_file = {
"message": message,
"content": base64.b64encode(updated).decode("utf-8"),
"sha": original_blob_sha,
"branch": branch_name,
}
response = github.put(
"/repos/{}/circuitpython-org/contents/_data/files.json".format(user), json=update_file
)
if not response.ok:
print("unable to post new file")
print(response.text)
return
pr_info = {
"title": pr_title,
"head": user + ":" + branch_name,
"base": "master",
"body": message,
"maintainer_can_modify": True,
}
response = github.post("/repos/adafruit/circuitpython-org/pulls", json=pr_info)
if not response.ok:
print("unable to create pr")
print(response.text)
return
print(changes)
print(pr_info)
def print_active_user():
response = github.get("/user")
if response.ok:
user = response.json()["login"]
print("Logged in as {}".format(user))
return user
else:
print("Not logged in")
return None
def generate_download_info():
boards = {}
errors = []
new_tag = os.environ["RELEASE_TAG"]
changes = {"new_release": new_tag, "new_boards": [], "new_languages": []}
user = print_active_user()
sha, this_version = get_version_info()
git_info, current_info = get_current_info()
languages = get_languages()
support_matrix = shared_bindings_matrix.support_matrix_by_board(use_branded_name=False)
new_stable = "-" not in new_tag
previous_releases = set()
previous_languages = set()
# Delete the release we are replacing
for board in current_info:
info = current_info[board]
for version in info["versions"]:
previous_releases.add(version["version"])
previous_languages.update(version["languages"])
if version["stable"] == new_stable or (
new_stable and version["version"].startswith(this_version)
):
info["versions"].remove(version)
board_mapping = get_board_mapping()
for port in SUPPORTED_PORTS:
board_path = os.path.join("../ports", port, "boards")
for board_path in os.scandir(board_path):
if board_path.is_dir():
board_files = os.listdir(board_path.path)
board_id = board_path.name
board_info = board_mapping[board_id]
for alias in [board_id] + board_info["aliases"]:
alias_info = board_mapping[alias]
if alias not in current_info:
changes["new_boards"].append(alias)
current_info[alias] = {"downloads": 0, "versions": []}
new_version = {
"stable": new_stable,
"version": new_tag,
"modules": support_matrix[board_id],
"languages": languages,
"extensions": board_info["extensions"],
}
current_info[alias]["downloads"] = alias_info["download_count"]
current_info[alias]["versions"].append(new_version)
changes["new_languages"] = set(languages) - previous_languages
if changes["new_release"] and user:
create_pr(changes, current_info, git_info, user)
else:
print("No new release to update")
# print(create_json(current_info).decode("utf8"))
if __name__ == "__main__":
if "RELEASE_TAG" in os.environ and os.environ["RELEASE_TAG"]:
generate_download_info()
else:
print("skipping website update because this isn't a tag")
|
[] |
[] |
[
"RELEASE_TAG",
"GITHUB_SHA"
] |
[]
|
["RELEASE_TAG", "GITHUB_SHA"]
|
python
| 2 | 0 | |
app/routes/course.py
|
from fastapi import Depends, status, Request, HTTPException
from fastapi import APIRouter
from fastapi.security import HTTPBasic
from sqlalchemy.orm import Session
from typing import List
import os
from dotenv import load_dotenv
load_dotenv()
from ..auth.authentication import authenticate_admin, authenticate_webuser
security = HTTPBasic()
from ..schemas import course
from ..controllers import crud_courses
from ..config.database import get_db
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Course main server route listener
course_router = APIRouter(
prefix = os.getenv("API_URL") + '/courses',
tags=['courses']
)
@course_router.get("/", status_code = status.HTTP_200_OK)
async def get_courses(webuser: str = Depends(authenticate_webuser), skip: int = 0, limit: int = 200, db: Session = Depends(get_db)):
db_courses = await crud_courses.get_courses(db, skip=skip, limit=limit)
if db_courses:
return db_courses
raise HTTPException(
status_code=404,
detail="Courses not found",
headers={"WWW-Authenticate": "Basic"},
)
@course_router.get("/{id}", status_code = status.HTTP_200_OK)
async def get_course_by_id(id: int, webuser: str = Depends(authenticate_webuser), db: Session = Depends(get_db)):
db_course = await crud_courses.get_course_by_id(db, id=id)
if db_course is None:
raise HTTPException(
status_code=404,
detail="Course not found",
headers={"WWW-Authenticate": "Basic"},
)
return db_course
@course_router.get("/name/{name}", status_code = status.HTTP_200_OK)
async def get_course_by_filename(name: str, webuser: str = Depends(authenticate_webuser), db: Session = Depends(get_db)):
db_course = await crud_courses.get_course_by_name(db, name=name.upper())
if db_course is None:
raise HTTPException(
status_code=404,
detail="Course filename not found",
headers={"WWW-Authenticate": "Basic"},
)
return db_course
@course_router.post("/", status_code = status.HTTP_201_CREATED)
async def create_course(course: course.CourseCreate, administrator: str = Depends(authenticate_admin), db: Session = Depends(get_db)):
db_course = await crud_courses.get_course_by_name(db, name=course.name.upper())
if db_course:
raise HTTPException(
status_code=400,
detail="Course name already existing",
headers={"WWW-Authenticate": "Basic"},
)
return await crud_courses.create_course(db, course=course, creation_user=administrator)
@course_router.patch("/{id}", status_code = status.HTTP_202_ACCEPTED)
async def update_course(id: int, administrator: str = Depends(authenticate_admin), db: Session = Depends(get_db)):
return {'id': id}
@course_router.delete("/{id}", status_code = status.HTTP_205_RESET_CONTENT)
async def delete_course(id: int, administrator: str = Depends(authenticate_admin), db: Session = Depends(get_db)):
status = await crud_courses.delete_course_by_id(db, id=id)
db_course = await crud_courses.get_course_by_id(db, id=id)
if db_course is None:
return {'message': status}
raise HTTPException(
status_code=501,
detail="Course not deleted",
headers={"WWW-Authenticate": "Basic"},
)
@course_router.get("/{id}/sections", status_code = status.HTTP_200_OK)
async def get_course_sections_by_id(id: int, webuser: str = Depends(authenticate_webuser), db: Session = Depends(get_db)):
db_course = await crud_courses.get_course_by_id(db, id=id)
if db_course is None:
raise HTTPException(
status_code=404,
detail="Course not found",
headers={"WWW-Authenticate": "Basic"},
)
db_sections = await crud_courses.get_course_sections(db, id=db_course.id)
if db_sections:
return db_sections
raise HTTPException(
status_code=404,
detail="Course sections not found",
headers={"WWW-Authenticate": "Basic"},
)
|
[] |
[] |
[
"API_URL"
] |
[]
|
["API_URL"]
|
python
| 1 | 0 | |
x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_users.go
|
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
package tables
import (
"context"
"github.com/osquery/osquery-go/plugin/table"
"github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs"
)
const (
passwdFile = "/etc/passwd"
)
func HostUsersColumns() []table.ColumnDefinition {
return []table.ColumnDefinition{
table.BigIntColumn("uid"),
table.BigIntColumn("gid"),
table.BigIntColumn("uid_signed"),
table.BigIntColumn("gid_signed"),
table.TextColumn("username"),
table.TextColumn("description"),
table.TextColumn("directory"),
table.TextColumn("shell"),
table.TextColumn("uuid"),
}
}
func GetHostUsersGenerateFunc() table.GenerateFunc {
fn := hostfs.GetPath(passwdFile)
return func(ctx context.Context, queryContext table.QueryContext) ([]map[string]string, error) {
return hostfs.ReadPasswd(fn)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Craigslist_Clone.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
coap-gateway/service/client.go
|
package service
import (
"bytes"
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/plgd-dev/device/schema/resources"
"github.com/plgd-dev/go-coap/v2/message"
"github.com/plgd-dev/go-coap/v2/message/codes"
"github.com/plgd-dev/go-coap/v2/tcp"
"github.com/plgd-dev/go-coap/v2/tcp/message/pool"
"github.com/plgd-dev/hub/v2/coap-gateway/coapconv"
grpcClient "github.com/plgd-dev/hub/v2/grpc-gateway/client"
idEvents "github.com/plgd-dev/hub/v2/identity-store/events"
"github.com/plgd-dev/hub/v2/pkg/log"
kitNetGrpc "github.com/plgd-dev/hub/v2/pkg/net/grpc"
pkgJwt "github.com/plgd-dev/hub/v2/pkg/security/jwt"
"github.com/plgd-dev/hub/v2/pkg/sync/task/future"
"github.com/plgd-dev/hub/v2/resource-aggregate/commands"
"github.com/plgd-dev/hub/v2/resource-aggregate/events"
kitSync "github.com/plgd-dev/kit/v2/sync"
)
type authorizationContext struct {
DeviceID string
AccessToken string
UserID string
Expire time.Time
}
func (a *authorizationContext) GetUserID() string {
if a == nil {
return ""
}
return a.UserID
}
func (a *authorizationContext) GetDeviceID() string {
if a != nil {
return a.DeviceID
}
return ""
}
func (a *authorizationContext) GetAccessToken() string {
if a != nil {
return a.AccessToken
}
return ""
}
func (a *authorizationContext) IsValid() error {
if a == nil {
return fmt.Errorf("invalid authorization context")
}
if a.AccessToken == "" {
return fmt.Errorf("invalid access token")
}
if !a.Expire.IsZero() && time.Now().UnixNano() > a.Expire.UnixNano() {
return fmt.Errorf("token is expired")
}
return nil
}
func (a *authorizationContext) ToContext(ctx context.Context) context.Context {
return kitNetGrpc.CtxWithToken(ctx, a.GetAccessToken())
}
//Client a setup of connection
type Client struct {
server *Service
coapConn *tcp.ClientConn
tlsDeviceID string
resourceSubscriptions *kitSync.Map // [token]
exchangeCache *exchangeCache
refreshCache *refreshCache
mutex sync.Mutex
authCtx *authorizationContext
deviceSubscriber *grpcClient.DeviceSubscriber
deviceObserver *future.Future
closeEventSubscriptions func()
}
//newClient create and initialize client
func newClient(server *Service, coapConn *tcp.ClientConn, tlsDeviceID string) *Client {
return &Client{
server: server,
coapConn: coapConn,
tlsDeviceID: tlsDeviceID,
resourceSubscriptions: kitSync.NewMap(),
exchangeCache: NewExchangeCache(),
refreshCache: NewRefreshCache(),
}
}
func (client *Client) remoteAddrString() string {
return client.coapConn.RemoteAddr().String()
}
func (client *Client) Context() context.Context {
return client.coapConn.Context()
}
func (client *Client) cancelResourceSubscription(token string) (bool, error) {
s, ok := client.resourceSubscriptions.PullOut(token)
if !ok {
return false, nil
}
sub := s.(*resourceSubscription)
err := sub.Close()
if err != nil {
return false, err
}
return true, nil
}
// Callback executed when the Get response is received in the deviceObserver.
//
// This function is executed in the coap connection-goroutine, any operation on the connection (read, write, ...)
// will cause a deadlock . To avoid this problem the operation must be executed inside the taskQueue.
//
// The received notification is released by this function at the correct moment and must not be released
// by the caller.
func (client *Client) onGetResourceContent(ctx context.Context, deviceID, href string, notification *pool.Message) error {
cannotGetResourceContentError := func(deviceID, href string, err error) error {
return fmt.Errorf("cannot get resource /%v%v content: %w", deviceID, href, err)
}
notification.Hijack()
err := client.server.taskQueue.Submit(func() {
defer client.server.messagePool.ReleaseMessage(notification)
err2 := client.notifyContentChanged(deviceID, href, false, notification)
if err2 != nil {
// cloud is unsynchronized against device. To recover cloud state, client need to reconnect to cloud.
log.Error(cannotGetResourceContentError(deviceID, href, err2))
if err3 := client.Close(); err3 != nil {
log.Errorf("failed to close client connection on get resource /%v%v: %w", deviceID, href, err3)
}
}
if notification.Code() == codes.NotFound {
client.unpublishResourceLinks(client.getUserAuthorizedContext(ctx), []string{href}, nil)
}
})
if err != nil {
defer client.server.messagePool.ReleaseMessage(notification)
return cannotGetResourceContentError(deviceID, href, err)
}
return nil
}
// Callback executed when the Observe notification is received in the deviceObserver.
//
// This function is executed in the coap connection-goroutine, any operation on the connection (read, write, ...)
// will cause a deadlock . To avoid this problem the operation must be executed inside the taskQueue.
//
// The received notification is released by this function at the correct moment and must not be released
// by the caller.
func (client *Client) onObserveResource(ctx context.Context, deviceID, href string, batch bool, notification *pool.Message) error {
cannotObserResourceError := func(err error) error {
return fmt.Errorf("cannot handle resource observation: %w", err)
}
notification.Hijack()
err := client.server.taskQueue.Submit(func() {
defer client.server.messagePool.ReleaseMessage(notification)
err2 := client.notifyContentChanged(deviceID, href, batch, notification)
if err2 != nil {
// cloud is unsynchronized against device. To recover cloud state, client need to reconnect to cloud.
log.Error(cannotObserResourceError(err2))
if err3 := client.Close(); err3 != nil {
log.Errorf("failed to close client connection on resource /%v%v observation: %w", deviceID, href, err3)
}
}
if notification.Code() == codes.NotFound {
client.unpublishResourceLinks(client.getUserAuthorizedContext(notification.Context()), []string{href}, nil)
}
})
if err != nil {
defer client.server.messagePool.ReleaseMessage(notification)
return cannotObserResourceError(err)
}
return nil
}
// Close closes coap connection
func (client *Client) Close() error {
err := client.coapConn.Close()
if err != nil {
return fmt.Errorf("cannot close client: %w", err)
}
return nil
}
func (client *Client) cancelResourceSubscriptions(wantWait bool) {
resourceSubscriptions := client.resourceSubscriptions.PullOutAll()
for _, v := range resourceSubscriptions {
o, ok := grpcClient.ToResourceSubscription(v, true)
if !ok {
continue
}
wait, err := o.Cancel()
if err != nil {
log.Errorf("cannot cancel resource subscription: %w", err)
} else if wantWait {
wait()
}
}
}
func (client *Client) CleanUp(resetAuthContext bool) *authorizationContext {
authCtx, _ := client.GetAuthorizationContext()
log.Debugf("cleanUp client %v for device %v", client.coapConn.RemoteAddr(), authCtx.GetDeviceID())
client.server.devicesStatusUpdater.Remove(client)
if err := client.closeDeviceObserver(client.Context()); err != nil {
log.Errorf("cleanUp error: failed to close observer for device %v: %w", authCtx.GetDeviceID(), err)
}
client.cancelResourceSubscriptions(false)
if err := client.closeDeviceSubscriber(); err != nil {
log.Errorf("cleanUp error: failed to close device %v subscription: %w", authCtx.GetDeviceID(), err)
}
client.unsubscribeFromDeviceEvents()
if resetAuthContext {
return client.SetAuthorizationContext(nil)
}
// we cannot reset authorizationContext need token (eg signOff)
return authCtx
}
// OnClose action when coap connection was closed.
func (client *Client) OnClose() {
authCtx, _ := client.GetAuthorizationContext()
log.Debugf("close client %v for device %v", client.coapConn.RemoteAddr(), authCtx.GetDeviceID())
oldAuthCtx := client.CleanUp(false)
if oldAuthCtx.GetDeviceID() != "" {
client.server.expirationClientCache.Delete(oldAuthCtx.GetDeviceID())
ctx, cancel := context.WithTimeout(context.Background(), client.server.config.APIs.COAP.KeepAlive.Timeout)
defer cancel()
_, err := client.server.raClient.UpdateDeviceMetadata(kitNetGrpc.CtxWithToken(ctx, oldAuthCtx.GetAccessToken()), &commands.UpdateDeviceMetadataRequest{
DeviceId: authCtx.GetDeviceID(),
Update: &commands.UpdateDeviceMetadataRequest_Status{
Status: &commands.ConnectionStatus{
Value: commands.ConnectionStatus_OFFLINE,
},
},
CommandMetadata: &commands.CommandMetadata{
Sequence: client.coapConn.Sequence(),
ConnectionId: client.remoteAddrString(),
},
})
if err != nil {
// Device will be still reported as online and it can fix his state by next calls online, offline commands.
log.Errorf("DeviceID %v: cannot handle sign out: cannot update cloud device status: %w", oldAuthCtx.GetDeviceID(), err)
}
}
}
func (client *Client) SetAuthorizationContext(authCtx *authorizationContext) (oldDeviceID *authorizationContext) {
log.Debugf("Authorization context replaced for client %v, device %v, user %v", client.coapConn.RemoteAddr(), authCtx.GetDeviceID(), authCtx.GetUserID())
client.mutex.Lock()
defer client.mutex.Unlock()
oldAuthContext := client.authCtx
client.authCtx = authCtx
return oldAuthContext
}
func (client *Client) GetAuthorizationContext() (*authorizationContext, error) {
client.mutex.Lock()
defer client.mutex.Unlock()
return client.authCtx, client.authCtx.IsValid()
}
func (client *Client) notifyContentChanged(deviceID, href string, batch bool, notification *pool.Message) error {
notifyError := func(deviceID, href string, err error) error {
return fmt.Errorf("cannot notify resource /%v%v content changed: %w", deviceID, href, err)
}
authCtx, err := client.GetAuthorizationContext()
if err != nil {
return notifyError(deviceID, href, err)
}
decodeMsgToDebug(client, notification, "RECEIVED-NOTIFICATION")
var requests []*commands.NotifyResourceChangedRequest
if batch && href == resources.ResourceURI {
requests, err = coapconv.NewNotifyResourceChangedRequestsFromBatchResourceDiscovery(deviceID, client.remoteAddrString(), notification)
if err != nil {
return notifyError(deviceID, href, err)
}
} else {
requests = []*commands.NotifyResourceChangedRequest{coapconv.NewNotifyResourceChangedRequest(commands.NewResourceID(deviceID, href), client.remoteAddrString(), notification)}
}
ctx := kitNetGrpc.CtxWithToken(client.Context(), authCtx.GetAccessToken())
for _, request := range requests {
_, err = client.server.raClient.NotifyResourceChanged(ctx, request)
if err != nil {
return notifyError(request.GetResourceId().GetDeviceId(), request.GetResourceId().GetHref(), err)
}
}
return nil
}
func (client *Client) sendErrorConfirmResourceUpdate(ctx context.Context, deviceID, href, userID, correlationID string, code codes.Code, errToSend error) {
resp := client.server.messagePool.AcquireMessage(ctx)
defer client.server.messagePool.ReleaseMessage(resp)
resp.SetContentFormat(message.TextPlain)
resp.SetBody(bytes.NewReader([]byte(errToSend.Error())))
resp.SetCode(code)
request := coapconv.NewConfirmResourceUpdateRequest(commands.NewResourceID(deviceID, href), correlationID, client.remoteAddrString(), resp)
_, err := client.server.raClient.ConfirmResourceUpdate(ctx, request)
if err != nil {
log.Errorf("cannot send error via confirm resource update: %w", err)
}
}
func (client *Client) UpdateResource(ctx context.Context, event *events.ResourceUpdatePending) error {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
err := fmt.Errorf("cannot update resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err)
if err2 := client.Close(); err2 != nil {
log.Errorf("failed to close client connection on update resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err2)
}
return err
}
sendConfirmCtx := authCtx.ToContext(ctx)
if event.GetResourceId().GetHref() == commands.StatusHref {
msg := client.server.messagePool.AcquireMessage(ctx)
msg.SetCode(codes.MethodNotAllowed)
msg.SetSequence(client.coapConn.Sequence())
defer client.server.messagePool.ReleaseMessage(msg)
request := coapconv.NewConfirmResourceUpdateRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), msg)
_, err = client.server.raClient.ConfirmResourceUpdate(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
coapCtx, cancel := context.WithTimeout(ctx, client.server.config.APIs.COAP.KeepAlive.Timeout)
defer cancel()
req, err := coapconv.NewCoapResourceUpdateRequest(coapCtx, client.server.messagePool, event)
if err != nil {
client.sendErrorConfirmResourceUpdate(sendConfirmCtx, event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.BadRequest, err)
return err
}
defer client.server.messagePool.ReleaseMessage(req)
decodeMsgToDebug(client, req, "RESOURCE-UPDATE-REQUEST")
resp, err := client.coapConn.Do(req)
if err != nil {
client.sendErrorConfirmResourceUpdate(sendConfirmCtx, event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.ServiceUnavailable, err)
return err
}
defer client.server.messagePool.ReleaseMessage(resp)
decodeMsgToDebug(client, resp, "RESOURCE-UPDATE-RESPONSE")
if resp.Code() == codes.NotFound {
client.unpublishResourceLinks(client.getUserAuthorizedContext(ctx), []string{event.GetResourceId().GetHref()}, nil)
}
request := coapconv.NewConfirmResourceUpdateRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), resp)
_, err = client.server.raClient.ConfirmResourceUpdate(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
func (client *Client) sendErrorConfirmResourceRetrieve(ctx context.Context, deviceID, href, userID, correlationID string, code codes.Code, errToSend error) {
resp := client.server.messagePool.AcquireMessage(ctx)
defer client.server.messagePool.ReleaseMessage(resp)
resp.SetContentFormat(message.TextPlain)
resp.SetBody(bytes.NewReader([]byte(errToSend.Error())))
resp.SetCode(code)
request := coapconv.NewConfirmResourceRetrieveRequest(commands.NewResourceID(deviceID, href), correlationID, client.remoteAddrString(), resp)
_, err := client.server.raClient.ConfirmResourceRetrieve(ctx, request)
if err != nil {
log.Errorf("cannot send error confirm resource retrieve: %w", err)
}
}
func (client *Client) RetrieveResource(ctx context.Context, event *events.ResourceRetrievePending) error {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
err := fmt.Errorf("cannot retrieve resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err)
if err2 := client.Close(); err2 != nil {
log.Errorf("failed to close client connection on retrieve resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err2)
}
return err
}
sendConfirmCtx := authCtx.ToContext(ctx)
if event.GetResourceId().GetHref() == commands.StatusHref {
msg := client.server.messagePool.AcquireMessage(ctx)
msg.SetCode(codes.Content)
msg.SetSequence(client.coapConn.Sequence())
defer client.server.messagePool.ReleaseMessage(msg)
request := coapconv.NewConfirmResourceRetrieveRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), msg)
_, err = client.server.raClient.ConfirmResourceRetrieve(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
coapCtx, cancel := context.WithTimeout(ctx, client.server.config.APIs.COAP.KeepAlive.Timeout)
defer cancel()
req, err := coapconv.NewCoapResourceRetrieveRequest(coapCtx, client.server.messagePool, event)
if err != nil {
client.sendErrorConfirmResourceRetrieve(sendConfirmCtx, event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.BadRequest, err)
return err
}
defer client.server.messagePool.ReleaseMessage(req)
decodeMsgToDebug(client, req, "RESOURCE-RETRIEVE-REQUEST")
resp, err := client.coapConn.Do(req)
if err != nil {
client.sendErrorConfirmResourceRetrieve(sendConfirmCtx, event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.ServiceUnavailable, err)
return err
}
defer client.server.messagePool.ReleaseMessage(resp)
decodeMsgToDebug(client, resp, "RESOURCE-RETRIEVE-RESPONSE")
if resp.Code() == codes.NotFound {
client.unpublishResourceLinks(client.getUserAuthorizedContext(ctx), []string{event.GetResourceId().GetHref()}, nil)
}
request := coapconv.NewConfirmResourceRetrieveRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), resp)
_, err = client.server.raClient.ConfirmResourceRetrieve(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
func (client *Client) sendErrorConfirmResourceDelete(ctx context.Context, deviceID, href, userID, correlationID string, code codes.Code, errToSend error) {
resp := client.server.messagePool.AcquireMessage(ctx)
defer client.server.messagePool.ReleaseMessage(resp)
resp.SetContentFormat(message.TextPlain)
resp.SetBody(bytes.NewReader([]byte(errToSend.Error())))
resp.SetCode(code)
request := coapconv.NewConfirmResourceDeleteRequest(commands.NewResourceID(deviceID, href), correlationID, client.remoteAddrString(), resp)
_, err := client.server.raClient.ConfirmResourceDelete(ctx, request)
if err != nil {
log.Errorf("cannot send error via confirm resource delete: %w", err)
}
}
func (client *Client) DeleteResource(ctx context.Context, event *events.ResourceDeletePending) error {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
err := fmt.Errorf("cannot delete resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err)
if err2 := client.Close(); err2 != nil {
log.Errorf("failed to close client connection on delete resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err2)
}
return err
}
sendConfirmCtx := authCtx.ToContext(ctx)
if event.GetResourceId().GetHref() == commands.StatusHref {
msg := client.server.messagePool.AcquireMessage(ctx)
msg.SetCode(codes.Forbidden)
msg.SetSequence(client.coapConn.Sequence())
defer client.server.messagePool.ReleaseMessage(msg)
request := coapconv.NewConfirmResourceDeleteRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), msg)
_, err = client.server.raClient.ConfirmResourceDelete(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
coapCtx, cancel := context.WithTimeout(ctx, client.server.config.APIs.COAP.KeepAlive.Timeout)
defer cancel()
req, err := coapconv.NewCoapResourceDeleteRequest(coapCtx, client.server.messagePool, event)
if err != nil {
client.sendErrorConfirmResourceDelete(sendConfirmCtx, event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.BadRequest, err)
return err
}
defer client.server.messagePool.ReleaseMessage(req)
decodeMsgToDebug(client, req, "RESOURCE-DELETE-REQUEST")
resp, err := client.coapConn.Do(req)
if err != nil {
client.sendErrorConfirmResourceDelete(sendConfirmCtx, event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.ServiceUnavailable, err)
return err
}
defer client.server.messagePool.ReleaseMessage(resp)
decodeMsgToDebug(client, resp, "RESOURCE-DELETE-RESPONSE")
if resp.Code() == codes.NotFound {
client.unpublishResourceLinks(client.getUserAuthorizedContext(ctx), []string{event.GetResourceId().GetHref()}, nil)
}
request := coapconv.NewConfirmResourceDeleteRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), resp)
_, err = client.server.raClient.ConfirmResourceDelete(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
func (client *Client) getUserAuthorizedContext(ctx context.Context) context.Context {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
log.Errorf("unable to load authorization context: %w", err)
return nil
}
return authCtx.ToContext(ctx)
}
func (client *Client) unpublishResourceLinks(ctx context.Context, hrefs []string, instanceIDs []int64) []string {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
log.Errorf("unable to load authorization context during resource links publish for device: %w", err)
return nil
}
logUnpublishError := func(err error) {
log.Errorf("error occurred during resource links unpublish for device %v: %w", authCtx.GetDeviceID(), err)
}
resp, err := client.server.raClient.UnpublishResourceLinks(ctx, &commands.UnpublishResourceLinksRequest{
Hrefs: hrefs,
InstanceIds: instanceIDs,
DeviceId: authCtx.GetDeviceID(),
CommandMetadata: &commands.CommandMetadata{
ConnectionId: client.remoteAddrString(),
Sequence: client.coapConn.Sequence(),
},
})
if err != nil {
// unpublish resource is not critical -> resource can be still accessible
// next resource update will return 'not found' what triggers a publish again
logUnpublishError(err)
return nil
}
if len(resp.UnpublishedHrefs) == 0 {
return nil
}
observer, err := client.getDeviceObserver(ctx)
if err != nil {
logUnpublishError(err)
return resp.UnpublishedHrefs
}
observer.RemovePublishedResources(ctx, resp.UnpublishedHrefs)
return resp.UnpublishedHrefs
}
func (client *Client) sendErrorConfirmResourceCreate(ctx context.Context, resourceID *commands.ResourceId, userID, correlationID string, code codes.Code, errToSend error) {
resp := client.server.messagePool.AcquireMessage(ctx)
defer client.server.messagePool.ReleaseMessage(resp)
resp.SetContentFormat(message.TextPlain)
resp.SetBody(bytes.NewReader([]byte(errToSend.Error())))
resp.SetCode(code)
request := coapconv.NewConfirmResourceCreateRequest(resourceID, correlationID, client.remoteAddrString(), resp)
_, err := client.server.raClient.ConfirmResourceCreate(ctx, request)
if err != nil {
log.Errorf("cannot send error via confirm resource create: %w", err)
}
}
func (client *Client) CreateResource(ctx context.Context, event *events.ResourceCreatePending) error {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
err := fmt.Errorf("cannot create resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err)
if err2 := client.Close(); err2 != nil {
log.Errorf("failed to close client connection on create resource /%v%v: %w", event.GetResourceId().GetDeviceId(), event.GetResourceId().GetHref(), err2)
}
return err
}
sendConfirmCtx := authCtx.ToContext(ctx)
if event.GetResourceId().GetHref() == commands.StatusHref {
msg := client.server.messagePool.AcquireMessage(ctx)
msg.SetCode(codes.Forbidden)
msg.SetSequence(client.coapConn.Sequence())
defer client.server.messagePool.ReleaseMessage(msg)
request := coapconv.NewConfirmResourceCreateRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), msg)
_, err = client.server.raClient.ConfirmResourceCreate(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
coapCtx, cancel := context.WithTimeout(ctx, client.server.config.APIs.COAP.KeepAlive.Timeout)
defer cancel()
req, err := coapconv.NewCoapResourceCreateRequest(coapCtx, client.server.messagePool, event)
if err != nil {
client.sendErrorConfirmResourceCreate(sendConfirmCtx, event.GetResourceId(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.BadRequest, err)
return err
}
defer client.server.messagePool.ReleaseMessage(req)
decodeMsgToDebug(client, req, "RESOURCE-CREATE-REQUEST")
resp, err := client.coapConn.Do(req)
if err != nil {
client.sendErrorConfirmResourceCreate(sendConfirmCtx, event.GetResourceId(), authCtx.GetUserID(), event.GetAuditContext().GetCorrelationId(), codes.ServiceUnavailable, err)
return err
}
defer client.server.messagePool.ReleaseMessage(resp)
decodeMsgToDebug(client, resp, "RESOURCE-CREATE-RESPONSE")
if resp.Code() == codes.NotFound {
client.unpublishResourceLinks(client.getUserAuthorizedContext(ctx), []string{event.GetResourceId().GetHref()}, nil)
}
request := coapconv.NewConfirmResourceCreateRequest(event.GetResourceId(), event.GetAuditContext().GetCorrelationId(), client.remoteAddrString(), resp)
_, err = client.server.raClient.ConfirmResourceCreate(sendConfirmCtx, request)
if err != nil {
return err
}
return nil
}
func (client *Client) OnDeviceSubscriberReconnectError(err error) {
auth, _ := client.GetAuthorizationContext()
deviceID := auth.GetDeviceID()
log.Errorf("cannot reconnect device %v subscriber to resource directory or eventbus - closing the device connection: %w", deviceID, err)
if err := client.Close(); err != nil {
log.Errorf("failed to close device %v connection : %w", deviceID, err)
}
logCloseDeviceSubscriberError := func(err error) {
log.Errorf("failed to close device %v subscription: %w", auth.GetDeviceID(), err)
}
if err := client.server.taskQueue.Submit(func() {
if errSub := client.closeDeviceSubscriber(); err != nil {
logCloseDeviceSubscriberError(errSub)
}
}); err != nil {
logCloseDeviceSubscriberError(err)
}
}
func (client *Client) GetContext() context.Context {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
return client.Context()
}
return authCtx.ToContext(client.Context())
}
func (client *Client) UpdateDeviceMetadata(ctx context.Context, event *events.DeviceMetadataUpdatePending) error {
authCtx, err := client.GetAuthorizationContext()
if err != nil {
err := fmt.Errorf("cannot update device('%v') metadata: %w", event.GetDeviceId(), err)
if err2 := client.Close(); err2 != nil {
log.Errorf("failed to close client connection on update device('%v') metadata: %w", event.GetDeviceId(), err2)
}
return err
}
if event.GetShadowSynchronization() == commands.ShadowSynchronization_UNSET {
return nil
}
sendConfirmCtx := authCtx.ToContext(ctx)
previous, errObs := client.replaceDeviceObserverWithDeviceShadow(sendConfirmCtx, event.GetShadowSynchronization())
if errObs != nil {
log.Errorf("update device('%v') metadata error: %w", event.GetDeviceId(), errObs)
}
_, err = client.server.raClient.ConfirmDeviceMetadataUpdate(sendConfirmCtx, &commands.ConfirmDeviceMetadataUpdateRequest{
DeviceId: event.GetDeviceId(),
CorrelationId: event.GetAuditContext().GetCorrelationId(),
Confirm: &commands.ConfirmDeviceMetadataUpdateRequest_ShadowSynchronization{
ShadowSynchronization: event.GetShadowSynchronization(),
},
CommandMetadata: &commands.CommandMetadata{
ConnectionId: client.remoteAddrString(),
Sequence: client.coapConn.Sequence(),
},
Status: commands.Status_OK,
})
if err != nil && !errors.Is(err, context.Canceled) {
_, errObs := client.replaceDeviceObserverWithDeviceShadow(sendConfirmCtx, previous)
if errObs != nil {
log.Errorf("update device('%v') metadata error: %w", event.GetDeviceId(), errObs)
}
}
return err
}
func (c *Client) ValidateToken(ctx context.Context, token string) (pkgJwt.Claims, error) {
return c.server.ValidateToken(ctx, token)
}
func (c *Client) subscribeToDeviceEvents(owner string, onEvent func(e *idEvents.Event)) error {
close, err := c.server.ownerCache.Subscribe(owner, onEvent)
if err != nil {
return err
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.closeEventSubscriptions = close
return nil
}
func (c *Client) unsubscribeFromDeviceEvents() {
close := func() {
// default no-op
}
c.mutex.Lock()
if c.closeEventSubscriptions != nil {
close = c.closeEventSubscriptions
c.closeEventSubscriptions = nil
}
c.mutex.Unlock()
close()
}
func (c *Client) ResolveDeviceID(claim pkgJwt.Claims, paramDeviceID string) string {
if c.server.config.APIs.COAP.Authorization.DeviceIDClaim != "" {
return claim.DeviceID(c.server.config.APIs.COAP.Authorization.DeviceIDClaim)
}
if c.server.config.APIs.COAP.TLS.Enabled && c.server.config.APIs.COAP.TLS.Embedded.ClientCertificateRequired {
return c.tlsDeviceID
}
return paramDeviceID
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/monitor/elastic.go
|
package monitor
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"strings"
"time"
elastic "github.com/elastic/go-elasticsearch/v7"
"github.com/elastic/go-elasticsearch/v7/esapi"
)
const (
version = "v1"
mappingFormat = `
{
"index_patterns": "%s-%s-*",
"mappings" : {
"_source": {
"enabled": true
},
"dynamic_templates": [
{
"logtext": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword"
}
}
}
],
"properties": {
"result": {
"properties": {
"timestamp": {
"type" : "date"
},
"interface": {
"properties": {
"internalIp": {
"type":"ip"
},
"externalIp": {
"type":"ip"
}
}
},
"server": {
"properties": {
"ip": {
"type":"ip"
},
"externalIp": {
"type":"ip"
}
}
}
}
},
"location": {
"properties": {
"ip": {
"type":"ip"
},
"geo_point": {
"type" : "geo_point"
}
}
}
}
}
}
`
clusterSettingsFormat = `{
"persistent": {
"action.auto_create_index": "%s-%s-*"
}
}`
)
type ElasticErrorCause struct {
Type string `json:"type"`
Reason string `json:"reason"`
}
type ElasticError struct {
RootCause []ElasticErrorCause `json:"root_cause"`
Status int `json:"status"`
}
func (e ElasticError) Error() string {
for _, e := range e.RootCause {
return e.Reason
}
return ""
}
type ElasticIndexer struct {
Client *elastic.Client
IndexPrefix string
}
type IndexResult struct {
Result string `json:"result"`
Error *ElasticError `json:"error"`
}
func NewElasticIndexer() (*ElasticIndexer, error) {
indexPrefix := os.Getenv("ELASTIC_INDEX_PREFIX")
if indexPrefix == "" {
indexPrefix = "speed-test"
}
config := elastic.Config{
Username: os.Getenv("ELASTIC_USER"),
Password: os.Getenv("ELASTIC_PASSWORD"),
Addresses: []string{os.Getenv("ELASTIC_HOST")},
}
c, err := elastic.NewClient(config)
resp, err := esapi.IndicesPutTemplateRequest{
Name: "speed-test",
Body: strings.NewReader(fmt.Sprintf(mappingFormat, indexPrefix, version)),
}.Do(context.Background(), c)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := IndexResult{}
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, err
}
if result.Error != nil {
return nil, result.Error
}
if false {
settingsResp, err := esapi.ClusterPutSettingsRequest{
Body: strings.NewReader(fmt.Sprintf(clusterSettingsFormat, indexPrefix, version)),
}.Do(context.Background(), c)
if err != nil {
return nil, err
}
settingsResp.Body.Close()
}
return &ElasticIndexer{c, indexPrefix}, err
}
func (e *ElasticIndexer) Index(results *LocatedSpeed) error {
b := bytes.NewBuffer(nil)
err := json.NewEncoder(b).Encode(results)
if err != nil {
return err
}
ts, err := time.Parse("2006-01-02T15:04:05Z07", results.Result.Header.Timestamp)
index := fmt.Sprintf("%s-%s", e.IndexPrefix, version)
if err == nil {
//index += "-" + ts.Format("2006-01")
index += "-" + ts.Format("2006")
}
req := esapi.IndexRequest{
Index: index,
DocumentID: results.Result.Result.ID,
Body: b,
Refresh: "true",
}
resp, err := req.Do(context.Background(), e.Client)
if err != nil {
return err
}
defer resp.Body.Close()
result := IndexResult{}
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return err
}
if result.Error != nil {
return result.Error
}
return nil
}
|
[
"\"ELASTIC_INDEX_PREFIX\"",
"\"ELASTIC_USER\"",
"\"ELASTIC_PASSWORD\"",
"\"ELASTIC_HOST\""
] |
[] |
[
"ELASTIC_USER",
"ELASTIC_PASSWORD",
"ELASTIC_HOST",
"ELASTIC_INDEX_PREFIX"
] |
[]
|
["ELASTIC_USER", "ELASTIC_PASSWORD", "ELASTIC_HOST", "ELASTIC_INDEX_PREFIX"]
|
go
| 4 | 0 | |
upup/pkg/fi/cloudup/apply_cluster.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudup
import (
"fmt"
"net/url"
"os"
"path"
"strings"
"k8s.io/kops/pkg/k8sversion"
"github.com/blang/semver"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
kopsbase "k8s.io/kops"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/model/alimodel"
"k8s.io/kops/pkg/model/awsmodel"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/components/etcdmanager"
"k8s.io/kops/pkg/model/domodel"
"k8s.io/kops/pkg/model/gcemodel"
"k8s.io/kops/pkg/model/openstackmodel"
"k8s.io/kops/pkg/model/spotinstmodel"
"k8s.io/kops/pkg/model/vspheremodel"
"k8s.io/kops/pkg/resources/digitalocean"
"k8s.io/kops/pkg/templates"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/alitasks"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/baremetal"
"k8s.io/kops/upup/pkg/fi/cloudup/cloudformation"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks"
"k8s.io/kops/upup/pkg/fi/cloudup/spotinsttasks"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
"k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/util/pkg/hashing"
"k8s.io/kops/util/pkg/vfs"
)
const (
starline = "*********************************************************************************\n"
)
var (
// AlphaAllowBareMetal is a feature flag that gates BareMetal support while it is alpha
AlphaAllowBareMetal = featureflag.New("AlphaAllowBareMetal", featureflag.Bool(false))
// AlphaAllowDO is a feature flag that gates DigitalOcean support while it is alpha
AlphaAllowDO = featureflag.New("AlphaAllowDO", featureflag.Bool(false))
// AlphaAllowGCE is a feature flag that gates GCE support while it is alpha
AlphaAllowGCE = featureflag.New("AlphaAllowGCE", featureflag.Bool(false))
// AlphaAllowOpenstack is a feature flag that gates OpenStack support while it is alpha
AlphaAllowOpenstack = featureflag.New("AlphaAllowOpenstack", featureflag.Bool(false))
// AlphaAllowVsphere is a feature flag that gates vsphere support while it is alpha
AlphaAllowVsphere = featureflag.New("AlphaAllowVsphere", featureflag.Bool(false))
// AlphaAllowALI is a feature flag that gates aliyun support while it is alpha
AlphaAllowALI = featureflag.New("AlphaAllowALI", featureflag.Bool(false))
// CloudupModels a list of supported models
CloudupModels = []string{"proto", "cloudup"}
)
type ApplyClusterCmd struct {
Cluster *kops.Cluster
InstanceGroups []*kops.InstanceGroup
// NodeUpSource is the location from which we download nodeup
NodeUpSource string
// NodeUpHash is the sha hash
NodeUpHash string
// Models is a list of cloudup models to apply
Models []string
// TargetName specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
TargetName string
// Target is the fi.Target we will operate against
Target fi.Target
// OutDir is a local directory in which we place output, can cache files etc
OutDir string
// Assets is a list of sources for files (primarily when not using everything containerized)
// Formats:
// raw url: http://... or https://...
// url with hash: <hex>@http://... or <hex>@https://...
Assets []*MirroredAsset
Clientset simple.Clientset
// DryRun is true if this is only a dry run
DryRun bool
// RunTasksOptions defines parameters for task execution, e.g. retry interval
RunTasksOptions *fi.RunTasksOptions
// The channel we are using
channel *kops.Channel
// Phase can be set to a Phase to run the specific subset of tasks, if we don't want to run everything
Phase Phase
// LifecycleOverrides is passed in to override the lifecycle for one of more tasks.
// The key value is the task name such as InternetGateway and the value is the fi.Lifecycle
// that is re-mapped.
LifecycleOverrides map[string]fi.Lifecycle
// TaskMap is the map of tasks that we built (output)
TaskMap map[string]fi.Task
}
func (c *ApplyClusterCmd) Run() error {
if c.InstanceGroups == nil {
list, err := c.Clientset.InstanceGroupsFor(c.Cluster).List(metav1.ListOptions{})
if err != nil {
return err
}
var instanceGroups []*kops.InstanceGroup
for i := range list.Items {
instanceGroups = append(instanceGroups, &list.Items[i])
}
c.InstanceGroups = instanceGroups
}
if c.Models == nil {
c.Models = CloudupModels
}
modelStore, err := findModelStore()
if err != nil {
return err
}
channel, err := ChannelForCluster(c.Cluster)
if err != nil {
return err
}
c.channel = channel
stageAssetsLifecycle := fi.LifecycleSync
securityLifecycle := fi.LifecycleSync
networkLifecycle := fi.LifecycleSync
clusterLifecycle := fi.LifecycleSync
switch c.Phase {
case Phase(""):
// Everything ... the default
// until we implement finding assets we need to Ignore them
stageAssetsLifecycle = fi.LifecycleIgnore
case PhaseStageAssets:
networkLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseNetwork:
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseSecurity:
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
clusterLifecycle = fi.LifecycleIgnore
case PhaseCluster:
if c.TargetName == TargetDryRun {
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleExistsAndWarnIfChanges
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
} else {
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndValidates
securityLifecycle = fi.LifecycleExistsAndValidates
}
default:
return fmt.Errorf("unknown phase %q", c.Phase)
}
// This is kinda a hack. Need to move phases out of fi. If we use Phase here we introduce a circular
// go dependency.
phase := string(c.Phase)
assetBuilder := assets.NewAssetBuilder(c.Cluster, phase)
err = c.upgradeSpecs(assetBuilder)
if err != nil {
return err
}
err = c.validateKopsVersion()
if err != nil {
return err
}
err = c.validateKubernetesVersion()
if err != nil {
return err
}
err = validation.DeepValidate(c.Cluster, c.InstanceGroups, true)
if err != nil {
return err
}
cluster := c.Cluster
if cluster.Spec.KubernetesVersion == "" {
return fmt.Errorf("KubernetesVersion not set")
}
if cluster.Spec.DNSZone == "" && !dns.IsGossipHostname(cluster.ObjectMeta.Name) {
return fmt.Errorf("DNSZone not set")
}
l := &Loader{}
l.Init()
l.Cluster = c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
keyStore, err := c.Clientset.KeyStore(cluster)
if err != nil {
return err
}
sshCredentialStore, err := c.Clientset.SSHCredentialStore(cluster)
if err != nil {
return err
}
secretStore, err := c.Clientset.SecretStore(cluster)
if err != nil {
return err
}
// Normalize k8s version
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
if strings.HasPrefix(versionWithoutV, "v") {
versionWithoutV = versionWithoutV[1:]
}
if cluster.Spec.KubernetesVersion != versionWithoutV {
klog.Warningf("Normalizing kubernetes version: %q -> %q", cluster.Spec.KubernetesVersion, versionWithoutV)
cluster.Spec.KubernetesVersion = versionWithoutV
}
kv, err := k8sversion.Parse(cluster.Spec.KubernetesVersion)
if err != nil {
return err
}
// check if we should recommend turning off anonymousAuth on k8s versions gte than 1.10
// we do 1.10 since this is a really critical issues and 1.10 has it
if kv.IsGTE("1.10") {
// we do a check here because setting modifying the kubelet object messes with the output
warn := false
if cluster.Spec.Kubelet == nil {
warn = true
} else if cluster.Spec.Kubelet.AnonymousAuth == nil {
warn = true
}
if warn {
fmt.Println("")
fmt.Printf(starline)
fmt.Println("")
fmt.Println("Kubelet anonymousAuth is currently turned on. This allows RBAC escalation and remote code execution possibilities.")
fmt.Println("It is highly recommended you turn it off by setting 'spec.kubelet.anonymousAuth' to 'false' via 'kops edit cluster'")
fmt.Println("")
fmt.Println("See https://github.com/kubernetes/kops/blob/master/docs/security.md#kubelet-api")
fmt.Println("")
fmt.Printf(starline)
fmt.Println("")
}
}
if err := c.AddFileAssets(assetBuilder); err != nil {
return err
}
// Only setup transfer of kops assets if using a FileRepository
if c.Cluster.Spec.Assets != nil && c.Cluster.Spec.Assets.FileRepository != nil {
if err := SetKopsAssetsLocations(assetBuilder); err != nil {
return err
}
}
checkExisting := true
l.AddTypes(map[string]interface{}{
"keypair": &fitasks.Keypair{},
"secret": &fitasks.Secret{},
"managedFile": &fitasks.ManagedFile{},
"mirrorKeystore": &fitasks.MirrorKeystore{},
"mirrorSecrets": &fitasks.MirrorSecrets{},
})
cloud, err := BuildCloud(cluster)
if err != nil {
return err
}
region := ""
project := ""
var sshPublicKeys [][]byte
{
keys, err := sshCredentialStore.FindSSHPublicKeys(fi.SecretNameSSHPrimary)
if err != nil {
return fmt.Errorf("error retrieving SSH public key %q: %v", fi.SecretNameSSHPrimary, err)
}
for _, k := range keys {
sshPublicKeys = append(sshPublicKeys, []byte(k.Spec.PublicKey))
}
}
modelContext := &model.KopsModelContext{
Cluster: cluster,
InstanceGroups: c.InstanceGroups,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
{
gceCloud := cloud.(gce.GCECloud)
region = gceCloud.Region()
project = gceCloud.Project()
if !AlphaAllowGCE.Enabled() {
return fmt.Errorf("GCE support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowGCE")
}
modelContext.SSHPublicKeys = sshPublicKeys
l.AddTypes(map[string]interface{}{
"Disk": &gcetasks.Disk{},
"Instance": &gcetasks.Instance{},
"InstanceTemplate": &gcetasks.InstanceTemplate{},
"Network": &gcetasks.Network{},
"InstanceGroupManager": &gcetasks.InstanceGroupManager{},
"FirewallRule": &gcetasks.FirewallRule{},
"Address": &gcetasks.Address{},
})
}
case kops.CloudProviderDO:
{
if !AlphaAllowDO.Enabled() {
return fmt.Errorf("DigitalOcean support is currently (very) alpha and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowDO to enable it")
}
modelContext.SSHPublicKeys = sshPublicKeys
l.AddTypes(map[string]interface{}{
"volume": &dotasks.Volume{},
"droplet": &dotasks.Droplet{},
})
}
case kops.CloudProviderAWS:
{
awsCloud := cloud.(awsup.AWSCloud)
region = awsCloud.Region()
l.AddTypes(map[string]interface{}{
// EC2
"elasticIP": &awstasks.ElasticIP{},
"instance": &awstasks.Instance{},
"instanceElasticIPAttachment": &awstasks.InstanceElasticIPAttachment{},
"instanceVolumeAttachment": &awstasks.InstanceVolumeAttachment{},
"ebsVolume": &awstasks.EBSVolume{},
"sshKey": &awstasks.SSHKey{},
// IAM
"iamInstanceProfile": &awstasks.IAMInstanceProfile{},
"iamInstanceProfileRole": &awstasks.IAMInstanceProfileRole{},
"iamRole": &awstasks.IAMRole{},
"iamRolePolicy": &awstasks.IAMRolePolicy{},
// VPC / Networking
"dhcpOptions": &awstasks.DHCPOptions{},
"internetGateway": &awstasks.InternetGateway{},
"route": &awstasks.Route{},
"routeTable": &awstasks.RouteTable{},
"routeTableAssociation": &awstasks.RouteTableAssociation{},
"securityGroup": &awstasks.SecurityGroup{},
"securityGroupRule": &awstasks.SecurityGroupRule{},
"subnet": &awstasks.Subnet{},
"vpc": &awstasks.VPC{},
"ngw": &awstasks.NatGateway{},
"vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{},
// ELB
"loadBalancer": &awstasks.LoadBalancer{},
"loadBalancerAttachment": &awstasks.LoadBalancerAttachment{},
// Autoscaling
"autoscalingGroup": &awstasks.AutoscalingGroup{},
"launchConfiguration": &awstasks.LaunchConfiguration{},
// Spotinst
"spotinstElastigroup": &spotinsttasks.Elastigroup{},
"spotinstOcean": &spotinsttasks.Ocean{},
"spotinstLaunchSpec": &spotinsttasks.LaunchSpec{},
})
if len(sshPublicKeys) == 0 && c.Cluster.Spec.SSHKeyName == "" {
return fmt.Errorf("SSH public key must be specified when running with AWS (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) > 1 {
return fmt.Errorf("Exactly one 'admin' SSH public key can be specified when running with AWS; please delete a key using `kops delete secret`")
}
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
}
case kops.CloudProviderALI:
{
if !AlphaAllowALI.Enabled() {
return fmt.Errorf("Aliyun support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowALI")
}
aliCloud := cloud.(aliup.ALICloud)
region = aliCloud.Region()
l.AddTypes(map[string]interface{}{
"Vpc": &alitasks.VPC{},
"VSwitch": &alitasks.VSwitch{},
"Disk": &alitasks.Disk{},
"SecurityGroup": &alitasks.SecurityGroup{},
"SecurityGroupRule": &alitasks.SecurityGroupRule{},
"LoadBalancer": &alitasks.LoadBalancer{},
"LoadBalancerListener": &alitasks.LoadBalancerListener{},
"LoadBalancerWhiteList": &alitasks.LoadBalancerWhiteList{},
"AutoscalingGroup": &alitasks.ScalingGroup{},
"LaunchConfiguration": &alitasks.LaunchConfiguration{},
"RAMPolicy": &alitasks.RAMPolicy{},
"RAMRole": &alitasks.RAMRole{},
"SSHKey": &alitasks.SSHKey{},
})
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with ALICloud (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("Exactly one 'admin' SSH public key can be specified when running with ALICloud; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderVSphere:
{
if !AlphaAllowVsphere.Enabled() {
return fmt.Errorf("Vsphere support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowVsphere")
}
vsphereCloud := cloud.(*vsphere.VSphereCloud)
// TODO: map region with vCenter cluster, or datacenter, or datastore?
region = vsphereCloud.Cluster
l.AddTypes(map[string]interface{}{
"instance": &vspheretasks.VirtualMachine{},
})
}
case kops.CloudProviderBareMetal:
{
if !AlphaAllowBareMetal.Enabled() {
return fmt.Errorf("BareMetal support is currently (very) alpha and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowBareMetal to enable it")
}
// No additional tasks (yet)
}
case kops.CloudProviderOpenstack:
{
if !AlphaAllowOpenstack.Enabled() {
return fmt.Errorf("Openstack support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowOpenstack")
}
osCloud := cloud.(openstack.OpenstackCloud)
region = osCloud.Region()
l.AddTypes(map[string]interface{}{
// Compute
"sshKey": &openstacktasks.SSHKey{},
"serverGroup": &openstacktasks.ServerGroup{},
"instance": &openstacktasks.Instance{},
// Networking
"network": &openstacktasks.Network{},
"subnet": &openstacktasks.Subnet{},
"router": &openstacktasks.Router{},
"securityGroup": &openstacktasks.SecurityGroup{},
"securityGroupRule": &openstacktasks.SecurityGroupRule{},
// BlockStorage
"volume": &openstacktasks.Volume{},
// LB
"lb": &openstacktasks.LB{},
})
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with Openstack (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("Exactly one 'admin' SSH public key can be specified when running with Openstack; please delete a key using `kops delete secret`")
}
}
default:
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
}
modelContext.Region = region
if dns.IsGossipHostname(cluster.ObjectMeta.Name) {
klog.Infof("Gossip DNS: skipping DNS validation")
} else {
err = validateDNS(cluster, cloud)
if err != nil {
return err
}
}
clusterTags, err := buildCloudupTags(cluster)
if err != nil {
return err
}
tf := &TemplateFunctions{
cluster: cluster,
instanceGroups: c.InstanceGroups,
tags: clusterTags,
region: region,
modelContext: modelContext,
}
l.Tags = clusterTags
l.WorkDir = c.OutDir
l.ModelStore = modelStore
var fileModels []string
for _, m := range c.Models {
switch m {
case "proto":
// No proto code options; no file model
case "cloudup":
templates, err := templates.LoadTemplates(cluster, models.NewAssetPath("cloudup/resources"))
if err != nil {
return fmt.Errorf("error loading templates: %v", err)
}
err = tf.AddTo(templates.TemplateFunctions, secretStore)
if err != nil {
return err
}
l.Builders = append(l.Builders,
&BootstrapChannelBuilder{
Lifecycle: &clusterLifecycle,
assetBuilder: assetBuilder,
cluster: cluster,
templates: templates,
},
&model.PKIModelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&etcdmanager.EtcdManagerBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
)
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&awsmodel.APILoadBalancerBuilder{AWSModelContext: awsModelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.BastionModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.DNSModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&model.ExternalAccessModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.FirewallModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.SSHKeyModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
l.Builders = append(l.Builders,
&model.NetworkModelBuilder{KopsModelContext: modelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&model.IAMModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderDO:
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderGCE:
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
storageAclLifecycle := securityLifecycle
if storageAclLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageAclLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&gcemodel.APILoadBalancerBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.ExternalAccessModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.FirewallModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageAclLifecycle},
)
case kops.CloudProviderALI:
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&alimodel.APILoadBalancerModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.NetworkModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.RAMModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.SSHKeyModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.FirewallModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.ExternalAccessModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderVSphere:
// No special settings (yet!)
case kops.CloudProviderBareMetal:
// No special settings (yet!)
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
// &openstackmodel.APILBModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &clusterLifecycle},
&openstackmodel.NetworkModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &networkLifecycle},
&openstackmodel.SSHKeyModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
&openstackmodel.FirewallModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
)
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
fileModels = append(fileModels, m)
default:
fileModels = append(fileModels, m)
}
}
l.TemplateFunctions["CA"] = func() fi.CAStore {
return keyStore
}
l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
return secretStore
}
bootstrapScriptBuilder := &model.BootstrapScript{
NodeUpConfigBuilder: func(ig *kops.InstanceGroup) (*nodeup.Config, error) { return c.BuildNodeUpConfig(assetBuilder, ig) },
NodeUpSource: c.NodeUpSource,
NodeUpSourceHash: c.NodeUpHash,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
if featureflag.Spotinst.Enabled() {
l.Builders = append(l.Builders, &spotinstmodel.InstanceGroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
})
} else {
l.Builders = append(l.Builders, &awsmodel.AutoscalingGroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
})
}
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &domodel.DropletBuilder{
DOModelContext: doModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
case kops.CloudProviderGCE:
{
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &gcemodel.AutoscalingGroupModelBuilder{
GCEModelContext: gceModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderALI:
{
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &alimodel.ScalingGroupModelBuilder{
ALIModelContext: aliModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderVSphere:
{
vsphereModelContext := &vspheremodel.VSphereModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &vspheremodel.AutoscalingGroupModelBuilder{
VSphereModelContext: vsphereModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderBareMetal:
// BareMetal tasks will go here
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &openstackmodel.ServerGroupModelBuilder{
OpenstackModelContext: openstackModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
l.TemplateFunctions["Masters"] = tf.modelContext.MasterInstanceGroups
err = tf.AddTo(l.TemplateFunctions, secretStore)
if err != nil {
return err
}
taskMap, err := l.BuildTasks(modelStore, fileModels, assetBuilder, &stageAssetsLifecycle, c.LifecycleOverrides)
if err != nil {
return fmt.Errorf("error building tasks: %v", err)
}
c.TaskMap = taskMap
var target fi.Target
dryRun := false
shouldPrecreateDNS := true
switch c.TargetName {
case TargetDirect:
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
target = gce.NewGCEAPITarget(cloud.(gce.GCECloud))
case kops.CloudProviderAWS:
target = awsup.NewAWSAPITarget(cloud.(awsup.AWSCloud))
case kops.CloudProviderDO:
target = do.NewDOAPITarget(cloud.(*digitalocean.Cloud))
case kops.CloudProviderVSphere:
target = vsphere.NewVSphereAPITarget(cloud.(*vsphere.VSphereCloud))
case kops.CloudProviderBareMetal:
target = baremetal.NewTarget(cloud.(*baremetal.Cloud))
case kops.CloudProviderOpenstack:
target = openstack.NewOpenstackAPITarget(cloud.(openstack.OpenstackCloud))
case kops.CloudProviderALI:
target = aliup.NewALIAPITarget(cloud.(aliup.ALICloud))
default:
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", cluster.Spec.CloudProvider)
}
case TargetTerraform:
checkExisting = false
outDir := c.OutDir
tf := terraform.NewTerraformTarget(cloud, region, project, outDir, cluster.Spec.Target)
// We include a few "util" variables in the TF output
if err := tf.AddOutputVariable("region", terraform.LiteralFromStringValue(region)); err != nil {
return err
}
if project != "" {
if err := tf.AddOutputVariable("project", terraform.LiteralFromStringValue(project)); err != nil {
return err
}
}
if err := tf.AddOutputVariable("cluster_name", terraform.LiteralFromStringValue(cluster.ObjectMeta.Name)); err != nil {
return err
}
target = tf
// Can cause conflicts with terraform management
shouldPrecreateDNS = false
case TargetCloudformation:
checkExisting = false
outDir := c.OutDir
target = cloudformation.NewCloudformationTarget(cloud, region, project, outDir)
// Can cause conflicts with cloudformation management
shouldPrecreateDNS = false
case TargetDryRun:
target = fi.NewDryRunTarget(assetBuilder, os.Stdout)
dryRun = true
// Avoid making changes on a dry-run
shouldPrecreateDNS = false
default:
return fmt.Errorf("unsupported target type %q", c.TargetName)
}
c.Target = target
if !dryRun {
err = registry.WriteConfigDeprecated(cluster, configBase.Join(registry.PathClusterCompleted), c.Cluster)
if err != nil {
return fmt.Errorf("error writing completed cluster spec: %v", err)
}
vfsMirror := vfsclientset.NewInstanceGroupMirror(cluster, configBase)
for _, g := range c.InstanceGroups {
// TODO: We need to update the mirror (below), but do we need to update the primary?
_, err := c.Clientset.InstanceGroupsFor(c.Cluster).Update(g)
if err != nil {
return fmt.Errorf("error writing InstanceGroup %q to registry: %v", g.ObjectMeta.Name, err)
}
// TODO: Don't write if vfsMirror == c.ClientSet
if err := vfsMirror.WriteMirror(g); err != nil {
return fmt.Errorf("error writing instance group spec to mirror: %v", err)
}
}
}
context, err := fi.NewContext(target, cluster, cloud, keyStore, secretStore, configBase, checkExisting, taskMap)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
var options fi.RunTasksOptions
if c.RunTasksOptions != nil {
options = *c.RunTasksOptions
} else {
options.InitDefaults()
}
err = context.RunTasks(options)
if err != nil {
return fmt.Errorf("error running tasks: %v", err)
}
if dns.IsGossipHostname(cluster.Name) {
shouldPrecreateDNS = false
}
if shouldPrecreateDNS {
if err := precreateDNS(cluster, cloud); err != nil {
klog.Warningf("unable to pre-create DNS records - cluster startup may be slower: %v", err)
}
}
err = target.Finish(taskMap) //This will finish the apply, and print the changes
if err != nil {
return fmt.Errorf("error closing target: %v", err)
}
return nil
}
// upgradeSpecs ensures that fields are fully populated / defaulted
func (c *ApplyClusterCmd) upgradeSpecs(assetBuilder *assets.AssetBuilder) error {
fullCluster, err := PopulateClusterSpec(c.Clientset, c.Cluster, assetBuilder)
if err != nil {
return err
}
c.Cluster = fullCluster
for i, g := range c.InstanceGroups {
fullGroup, err := PopulateInstanceGroupSpec(fullCluster, g, c.channel)
if err != nil {
return err
}
c.InstanceGroups[i] = fullGroup
}
return nil
}
// validateKopsVersion ensures that kops meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKopsVersion() error {
kopsVersion, err := semver.ParseTolerant(kopsbase.Version)
if err != nil {
klog.Warningf("unable to parse kops version %q", kopsbase.Version)
// Not a hard-error
return nil
}
versionInfo := kops.FindKopsVersionSpec(c.channel.Spec.KopsVersions, kopsVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kops version %q in channel", kopsVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kops version %q in channel", kopsVersion)
}
required, err := versionInfo.IsUpgradeRequired(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kops version %q in channel", kopsVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
fmt.Printf("A new kops version is available: %s\n", recommended)
fmt.Printf("\n")
fmt.Printf("Upgrading is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("A new kops version is available: %s\n", recommended)
}
fmt.Printf("\n")
fmt.Printf("This version of kops (%s) is no longer supported; upgrading is required\n", kopsbase.Version)
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
return nil
}
// validateKubernetesVersion ensures that kubernetes meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKubernetesVersion() error {
parsed, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil {
klog.Warningf("unable to parse kubernetes version %q", c.Cluster.Spec.KubernetesVersion)
// Not a hard-error
return nil
}
// TODO: make util.ParseKubernetesVersion not return a pointer
kubernetesVersion := *parsed
versionInfo := kops.FindKubernetesVersionSpec(c.channel.Spec.KubernetesVersions, kubernetesVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kubernetes version %q in channel", kubernetesVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kubernetes version %q in channel", kubernetesVersion)
}
required, err := versionInfo.IsUpgradeRequired(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kubernetes version %q in channel", kubernetesVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
fmt.Printf("Upgrading is recommended (try kops upgrade cluster)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
}
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is no longer supported; upgrading is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kubernetes upgrade is required")
}
}
return nil
}
// AddFileAssets adds the file assets within the assetBuilder
func (c *ApplyClusterCmd) AddFileAssets(assetBuilder *assets.AssetBuilder) error {
var baseURL string
var err error
if components.IsBaseURL(c.Cluster.Spec.KubernetesVersion) {
baseURL = c.Cluster.Spec.KubernetesVersion
} else {
baseURL = "https://storage.googleapis.com/kubernetes-release/release/v" + c.Cluster.Spec.KubernetesVersion
}
k8sAssetsNames := []string{
"/bin/linux/amd64/kubelet",
"/bin/linux/amd64/kubectl",
}
if needsMounterAsset(c.Cluster, c.InstanceGroups) {
k8sVersion, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil {
return fmt.Errorf("unable to determine kubernetes version from %q", c.Cluster.Spec.KubernetesVersion)
} else if util.IsKubernetesGTE("1.9", *k8sVersion) {
// Available directly
k8sAssetsNames = append(k8sAssetsNames, "/bin/linux/amd64/mounter")
} else {
// Only available in the kubernetes-manifests.tar.gz directory
k8sAssetsNames = append(k8sAssetsNames, "/kubernetes-manifests.tar.gz")
}
}
for _, a := range k8sAssetsNames {
k, err := url.Parse(baseURL)
if err != nil {
return err
}
k.Path = path.Join(k.Path, a)
u, hash, err := assetBuilder.RemapFileAndSHA(k)
if err != nil {
return err
}
c.Assets = append(c.Assets, BuildMirroredAsset(u, hash))
}
if usesCNI(c.Cluster) {
cniAsset, cniAssetHash, err := findCNIAssets(c.Cluster, assetBuilder)
if err != nil {
return err
}
c.Assets = append(c.Assets, BuildMirroredAsset(cniAsset, cniAssetHash))
}
if c.Cluster.Spec.Networking.LyftVPC != nil {
var hash *hashing.Hash
urlString := os.Getenv("LYFT_VPC_DOWNLOAD_URL")
if urlString == "" {
urlString = "https://github.com/lyft/cni-ipvlan-vpc-k8s/releases/download/v0.4.2/cni-ipvlan-vpc-k8s-v0.4.2.tar.gz"
hash, err = hashing.FromString("bfdc65028a3bf8ffe14388fca28ede3600e7e2dee4e781908b6a23f9e79f86ad")
if err != nil {
// Should be impossible
return fmt.Errorf("invalid hard-coded hash for lyft url")
}
} else {
klog.Warningf("Using url from LYFT_VPC_DOWNLOAD_URL env var: %q", urlString)
}
u, err := url.Parse(urlString)
if err != nil {
return fmt.Errorf("unable to parse lyft-vpc URL %q", urlString)
}
c.Assets = append(c.Assets, BuildMirroredAsset(u, hash))
}
// TODO figure out if we can only do this for CoreOS only and GCE Container OS
// TODO It is very difficult to pre-determine what OS an ami is, and if that OS needs socat
// At this time we just copy the socat and conntrack binaries to all distros.
// Most distros will have their own socat and conntrack binary.
// Container operating systems like CoreOS need to have socat and conntrack added to them.
{
utilsLocation, hash, err := KopsFileUrl("linux/amd64/utils.tar.gz", assetBuilder)
if err != nil {
return err
}
c.Assets = append(c.Assets, BuildMirroredAsset(utilsLocation, hash))
}
n, hash, err := NodeUpLocation(assetBuilder)
if err != nil {
return err
}
c.NodeUpSource = n.String()
c.NodeUpHash = hash.Hex()
// Explicitly add the protokube image,
// otherwise when the Target is DryRun this asset is not added
// Is there a better way to call this?
_, _, err = ProtokubeImageSource(assetBuilder)
if err != nil {
return err
}
return nil
}
// buildPermalink returns a link to our "permalink docs", to further explain an error message
func buildPermalink(key, anchor string) string {
url := "https://github.com/kubernetes/kops/blob/master/permalinks/" + key + ".md"
if anchor != "" {
url += "#" + anchor
}
return url
}
func ChannelForCluster(c *kops.Cluster) (*kops.Channel, error) {
channelLocation := c.Spec.Channel
if channelLocation == "" {
channelLocation = kops.DefaultChannel
}
return kops.LoadChannel(channelLocation)
}
// needsMounterAsset checks if we need the mounter program
// This is only needed currently on ContainerOS i.e. GCE, but we don't have a nice way to detect it yet
func needsMounterAsset(c *kops.Cluster, instanceGroups []*kops.InstanceGroup) bool {
// TODO: Do real detection of ContainerOS (but this has to work with image names, and maybe even forked images)
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderGCE:
return true
default:
return false
}
}
// BuildNodeUpConfig returns the NodeUp config, in YAML format
func (c *ApplyClusterCmd) BuildNodeUpConfig(assetBuilder *assets.AssetBuilder, ig *kops.InstanceGroup) (*nodeup.Config, error) {
if ig == nil {
return nil, fmt.Errorf("instanceGroup cannot be nil")
}
cluster := c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return nil, fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
// TODO: Remove
clusterTags, err := buildCloudupTags(cluster)
if err != nil {
return nil, err
}
channels := []string{
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
}
for i := range c.Cluster.Spec.Addons {
channels = append(channels, c.Cluster.Spec.Addons[i].Manifest)
}
role := ig.Spec.Role
if role == "" {
return nil, fmt.Errorf("cannot determine role for instance group: %v", ig.ObjectMeta.Name)
}
nodeUpTags, err := buildNodeupTags(role, cluster, clusterTags)
if err != nil {
return nil, err
}
config := &nodeup.Config{}
for _, tag := range nodeUpTags.List() {
config.Tags = append(config.Tags, tag)
}
for _, a := range c.Assets {
config.Assets = append(config.Assets, a.CompactString())
}
config.ClusterName = cluster.ObjectMeta.Name
config.ConfigBase = fi.String(configBase.Path())
config.InstanceGroupName = ig.ObjectMeta.Name
var images []*nodeup.Image
if components.IsBaseURL(cluster.Spec.KubernetesVersion) {
// When using a custom version, we want to preload the images over http
components := []string{"kube-proxy"}
if role == kops.InstanceGroupRoleMaster {
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
}
for _, component := range components {
baseURL, err := url.Parse(c.Cluster.Spec.KubernetesVersion)
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/bin/linux/amd64/", component+".tar")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images = append(images, image)
}
}
{
u, hash, err := ProtokubeImageSource(assetBuilder)
if err != nil {
return nil, err
}
asset := BuildMirroredAsset(u, hash)
config.ProtokubeImage = &nodeup.Image{
Name: kopsbase.DefaultProtokubeImageName(),
Sources: asset.Locations,
Hash: asset.Hash.Hex(),
}
}
if role == kops.InstanceGroupRoleMaster {
for _, etcdCluster := range cluster.Spec.EtcdClusters {
if etcdCluster.Provider == kops.EtcdProviderTypeManager {
p := configBase.Join("manifests/etcd/" + etcdCluster.Name + ".yaml").Path()
config.EtcdManifests = append(config.EtcdManifests, p)
}
}
}
config.Images = images
config.Channels = channels
return config, nil
}
|
[
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"LYFT_VPC_DOWNLOAD_URL\""
] |
[] |
[
"KOPS_RUN_OBSOLETE_VERSION",
"LYFT_VPC_DOWNLOAD_URL"
] |
[]
|
["KOPS_RUN_OBSOLETE_VERSION", "LYFT_VPC_DOWNLOAD_URL"]
|
go
| 2 | 0 | |
test/unit/test_martkistd_data_shims.py
|
import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
import martkistlib
import gobject_json
# old format proposal hex w/multi-dimensional array
@pytest.fixture
def proposal_hex_old():
return "5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313534373138333939342c20226e616d65223a20226a61636b2d73706172726f772d6e65772d736869702d7573696e672d6d6172746b697374222c20227061796d656e745f61646472657373223a2022544663706d64766e613539567557646843515159734b45346a6d3674554d6575454a222c20227061796d656e745f616d6f756e74223a2034392c202273746172745f65706f6368223a20313532373730383531382c202274797065223a20312c202275726c223a202268747470733a2f2f776869746570617065722e6d6172746b6973742e6f7267227d5d5d"
# same proposal data as old, but streamlined format
@pytest.fixture
def proposal_hex_new():
return "7b22656e645f65706f6368223a20313534373138333939342c20226e616d65223a20226a61636b2d73706172726f772d6e65772d736869702d7573696e672d6d6172746b697374222c20227061796d656e745f61646472657373223a2022544663706d64766e613539567557646843515159734b45346a6d3674554d6575454a222c20227061796d656e745f616d6f756e74223a2034392c202273746172745f65706f6368223a20313532373730383531382c202274797065223a20312c202275726c223a202268747470733a2f2f776869746570617065722e6d6172746b6973742e6f7267227d"
# old format trigger hex w/multi-dimensional array
@pytest.fixture
def trigger_hex_old():
return "5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2034333830302c20227061796d656e745f616464726573736573223a202254535466654d65577751694344774d53545752616a39777756474e6a5a466676466b7c54456a4d6e6842356d41507270673752344355435347514e6e4a7150654146425448222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d"
# same data as new, but simpler format
@pytest.fixture
def trigger_hex_new():
return "7b226576656e745f626c6f636b5f686569676874223a2034333830302c20227061796d656e745f616464726573736573223a202254535466654d65577751694344774d53545752616a39777756474e6a5a466676466b7c54456a4d6e6842356d41507270673752344355435347514e6e4a7150654146425448222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d"
# TODO: remove fixtures below here once test_SHIM_serialise_for_martkistd removed
@pytest.fixture
def sentinel_proposal_hex():
return '7b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d'
@pytest.fixture
def sentinel_superblock_hex():
return '7b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d'
@pytest.fixture
def martkistd_proposal_hex():
return '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d5d'
@pytest.fixture
def martkistd_superblock_hex():
return '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
# ========================================================================
def test_SHIM_serialise_for_martkistd(sentinel_proposal_hex, sentinel_superblock_hex):
assert martkistlib.SHIM_serialise_for_martkistd(sentinel_proposal_hex) == '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d5d'
assert martkistlib.SHIM_serialise_for_martkistd(sentinel_superblock_hex) == '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
def test_valid_json():
import binascii
# test some valid JSON
assert gobject_json.valid_json("{}") is True
assert gobject_json.valid_json("null") is True
assert gobject_json.valid_json("true") is True
assert gobject_json.valid_json("false") is True
assert gobject_json.valid_json("\"rubbish\"") is True
assert gobject_json.valid_json(
binascii.unhexlify(proposal_hex_old())
) is True
assert gobject_json.valid_json(
binascii.unhexlify(proposal_hex_new())
) is True
assert gobject_json.valid_json(
binascii.unhexlify(trigger_hex_new())
) is True
assert gobject_json.valid_json(
binascii.unhexlify(trigger_hex_old())
) is True
# test some invalid/bad/not JSON
assert gobject_json.valid_json("False") is False
assert gobject_json.valid_json("True") is False
assert gobject_json.valid_json("Null") is False
assert gobject_json.valid_json("NULL") is False
assert gobject_json.valid_json("nil") is False
assert gobject_json.valid_json("rubbish") is False
assert gobject_json.valid_json("{{}") is False
assert gobject_json.valid_json("") is False
poorly_formatted = trigger_hex_old() + "7d"
assert gobject_json.valid_json(
binascii.unhexlify(poorly_formatted)
) is False
def test_extract_object():
from decimal import Decimal
import binascii
# jack sparrow needs a new ship - same expected proposal data for both new &
# old formats
expected = {
'type': 1,
'name': 'jack-sparrow-new-ship-using-martkist',
'url': 'https://whitepaper.martkist.org',
'start_epoch': 1527708518,
'end_epoch': 1547183994,
'payment_address': 'TFcpmdvna59VuWdhCQQYsKE4jm6tUMeuEJ',
'payment_amount': Decimal('49'),
}
# test proposal old format
json_str = binascii.unhexlify(proposal_hex_old()).decode('utf-8')
assert gobject_json.extract_object(json_str) == expected
# test proposal new format
json_str = binascii.unhexlify(proposal_hex_new()).decode('utf-8')
assert gobject_json.extract_object(json_str) == expected
# same expected trigger data for both new & old formats
expected = {
'type': 2,
'event_block_height': 43800,
'payment_addresses': 'TSTfeMeWwQiCDwMSTWRaj9wwVGNjZFfvFk|TEjMnhB5mAPrpg7R4CUCSGQNnJqPeAFBTH',
'payment_amounts': '5|3',
}
# test trigger old format
json_str = binascii.unhexlify(trigger_hex_old()).decode('utf-8')
assert gobject_json.extract_object(json_str) == expected
# test trigger new format
json_str = binascii.unhexlify(trigger_hex_new()).decode('utf-8')
assert gobject_json.extract_object(json_str) == expected
|
[] |
[] |
[
"SENTINEL_CONFIG"
] |
[]
|
["SENTINEL_CONFIG"]
|
python
| 1 | 0 | |
core/src/org/pentaho/di/core/util/EnvUtil.java
|
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.core.util;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.variables.Variables;
import org.pentaho.di.version.BuildVersion;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.StringTokenizer;
import java.util.TimeZone;
public class EnvUtil {
private static Properties env = null;
/**
* Returns the properties from the users kettle home directory.
*
* @param fileName
* the relative name of the properties file in the users kettle directory.
* @return the map of properties.
*/
public static Properties readProperties( final String fileName ) throws KettleException {
if ( !new File( fileName ).exists() ) {
return readPropertiesByFullPath( Const.getKettleDirectory() + Const.FILE_SEPARATOR + fileName );
}
return readPropertiesByFullPath( fileName );
}
private static Properties readPropertiesByFullPath( final String fileName ) throws KettleException {
Properties props = new Properties();
InputStream is = null;
try {
is = new FileInputStream( fileName );
props.load( is );
} catch ( IOException ioe ) {
throw new KettleException( "Unable to read file '" + fileName + "'", ioe );
} finally {
if ( is != null ) {
try {
is.close();
} catch ( IOException e ) {
// ignore
}
}
}
return props;
}
/**
* Adds the kettle properties the the global system properties.
*
* @throws KettleException
* in case the properties file can't be read.
*/
public static void environmentInit() throws KettleException {
// Workaround for a Mac OS/X Leopard issue where getContextClassLoader() is returning
// null when run from the eclipse IDE
// http://lists.apple.com/archives/java-dev/2007/Nov/msg00385.html - DM
// Moving this hack to the first place where the NPE is triggered so all entrypoints can be debugged in Mac Eclipse
if ( Thread.currentThread().getContextClassLoader() == null ) {
Thread.currentThread().setContextClassLoader( ClassLoader.getSystemClassLoader() );
}
Map<?, ?> kettleProperties = EnvUtil.readProperties( Const.KETTLE_PROPERTIES );
applyKettleProperties( kettleProperties );
// Also put some default values for obscure environment variables in there...
// Place-holders if you will.
//
System.getProperties().put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, "1" );
System.getProperties().put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, "0" );
System.getProperties().put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, "slave-trans-name" );
System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_COPYNR, "0" );
System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_NAME, "step-name" );
System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_PARTITION_ID, "partition-id" );
System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_PARTITION_NR, "0" );
System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_UNIQUE_COUNT, "1" );
System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_UNIQUE_NUMBER, "0" );
}
public static void applyKettleProperties( Map<?, ?> kettleProperties ) {
applyKettleProperties( kettleProperties, false );
}
public static void applyKettleProperties( Map<?, ?> kettleProperties, boolean override ) {
Variables variables = new Variables();
for ( Object key : kettleProperties.keySet() ) {
String variable = (String) key;
String value = variables.environmentSubstitute( (String) kettleProperties.get( key ) );
variables.setVariable( variable, value );
}
Properties systemProperties = System.getProperties();
// Copy the data over to the system properties...
//
for ( String variable : variables.listVariables() ) {
String value = variables.getVariable( variable );
// Too many developers bump into the issue of the kettle.properties editor setting
// an empty string in the kettle.properties file...
//
if ( variable.equals( Const.KETTLE_PLUGIN_CLASSES ) || variable.equals( Const.KETTLE_PLUGIN_PACKAGES ) ) {
String jvmValue = System.getProperty( variable );
if ( !Const.isEmpty( jvmValue ) ) {
if ( !Const.isEmpty( value ) ) {
value += "," + jvmValue;
} else {
value = jvmValue;
}
}
} else {
if ( !override && systemProperties.containsKey( variable ) ) {
continue;
}
}
System.setProperty( variable, value );
}
}
/**
* Add a number of internal variables to the Kettle Variables at the root.
*
* @param variables
*/
public static void addInternalVariables( Properties prop ) {
// Add a bunch of internal variables
// The Kettle version
prop.put( Const.INTERNAL_VARIABLE_KETTLE_VERSION, BuildVersion.getInstance().getVersion() );
// The Kettle build version
prop.put( Const.INTERNAL_VARIABLE_KETTLE_BUILD_VERSION, BuildVersion.getInstance().getVersion() );
// The Kettle build date
prop.put( Const.INTERNAL_VARIABLE_KETTLE_BUILD_DATE, BuildVersion.getInstance().getBuildDate() );
}
/**
* Get System.getenv() in a reflection kind of way. The problem is that System.getenv() was deprecated in Java 1.4
* while reinstated in 1.5 This method will get at getenv() using reflection and will return empty properties when
* used in 1.4
*
* @return Properties containing the environment. You're not meant to change any value in the returned Properties!
*
*/
@SuppressWarnings( { "unchecked" } )
private static final Properties getEnv() {
Class<?> system = System.class;
if ( env == null ) {
Map<String, String> returnMap = null;
try {
Method method = system.getMethod( "getenv" );
returnMap = (Map<String, String>) method.invoke( system );
} catch ( Exception ex ) {
returnMap = null;
}
env = new Properties();
if ( returnMap != null ) {
// We're on a VM with getenv() defined.
ArrayList<String> list = new ArrayList<String>( returnMap.keySet() );
for ( int i = 0; i < list.size(); i++ ) {
String var = list.get( i );
String val = returnMap.get( var );
env.setProperty( var, val );
}
}
}
return env;
}
/**
* @return an array of strings, made up of all the environment variables available in the VM, format var=value. To be
* used for Runtime.exec(cmd, envp)
*/
public static final String[] getEnvironmentVariablesForRuntimeExec() {
Properties sysprops = new Properties();
sysprops.putAll( getEnv() );
sysprops.putAll( System.getProperties() );
addInternalVariables( sysprops );
String[] envp = new String[sysprops.size()];
List<Object> list = new ArrayList<Object>( sysprops.keySet() );
for ( int i = 0; i < list.size(); i++ ) {
String var = (String) list.get( i );
String val = sysprops.getProperty( var );
envp[i] = var + "=" + val;
}
return envp;
}
/**
* This method is written especially for weird JVM's like IBM's on AIX and OS/400. On these platforms, we notice that
* environment variables have an extra double quote around it... This is messing up the ability to specify things.
*
* @param key
* The key, the name of the environment variable to return
* @param def
* The default value to return in case the key can't be found
* @return The value of a System environment variable in the java virtual machine. If the key is not present, the
* variable is not defined and the default value is returned.
*/
public static final String getSystemPropertyStripQuotes( String key, String def ) {
String value = System.getProperty( key, def );
if ( value.startsWith( "\"" ) && value.endsWith( "\"" ) && value.length() > 1 ) {
return value.substring( 1, value.length() - 2 );
}
return value;
}
/**
* This method is written especially for weird JVM's like
*
* @param key
* The key, the name of the environment variable to return
* @param def
* The default value to return in case the key can't be found
* @return The value of a System environment variable in the java virtual machine. If the key is not present, the
* variable is not defined and the default value is returned.
*/
public static final String getSystemProperty( String key, String def ) {
String value = System.getProperty( key, def );
return value;
}
/**
* @param key
* The key, the name of the environment variable to return
* @return The value of a System environment variable in the java virtual machine. If the key is not present, the
* variable is not defined and null returned.
*/
public static final String getSystemProperty( String key ) {
return getSystemProperty( key, null );
}
/**
* Returns an available java.util.Locale object for the given localeCode.
*
* The localeCode code can be case insensitive, if it is available the method will find it and return it.
*
* @param localeCode
* @return java.util.Locale.
*/
public static Locale createLocale( String localeCode ) {
Locale resultLocale = null;
if ( localeCode != null ) {
StringTokenizer parser = new StringTokenizer( localeCode, "_" );
if ( parser.countTokens() == 2 ) {
resultLocale = new Locale( parser.nextToken(), parser.nextToken() );
} else {
resultLocale = new Locale( localeCode );
}
}
return resultLocale;
}
public static TimeZone createTimeZone( String timeZoneId ) {
TimeZone resultTimeZone = null;
if ( !Const.isEmpty( timeZoneId ) ) {
return TimeZone.getTimeZone( timeZoneId );
} else {
resultTimeZone = TimeZone.getDefault();
}
return resultTimeZone;
}
public static String[] getTimeZones() {
String[] timeZones = TimeZone.getAvailableIDs();
Arrays.sort( timeZones );
return timeZones;
}
public static String[] getLocaleList() {
Locale[] locales = Locale.getAvailableLocales();
String[] strings = new String[locales.length];
for ( int i = 0; i < strings.length; i++ ) {
strings[i] = locales[i].toString();
}
Arrays.sort( strings );
return strings;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
pkg/analytics/uuid.go
|
/*
Copyright 2019 LitmusChaos Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package analytics
import (
"fmt"
"os"
clientset "github.com/litmuschaos/chaos-operator/pkg/kubernetes"
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// ClientUUID contains clientUUID for analytics
var ClientUUID string
// it derives the UID of the chaos-operator deployment
// and used it for the analytics
func getUID() (string, error) {
// creates kubernetes client
clients, err := clientset.CreateClientSet()
if err != nil {
return "", err
}
// deriving operator pod name & namespace
podName := os.Getenv("POD_NAME")
podNamespace := os.Getenv("POD_NAMESPACE")
if podName == "" || podNamespace == "" {
return podName, fmt.Errorf("POD_NAME or POD_NAMESPACE ENV not set")
}
// get operator pod details
pod, err := clients.CoreV1().Pods(podNamespace).Get(podName, v1.GetOptions{})
if err != nil {
return "", fmt.Errorf("unable to get %s pod in %s namespace", podName, podNamespace)
}
return getOperatorUID(pod, clients)
}
// it returns the deployment name, derived from the owner references
func getDeploymentName(pod *core_v1.Pod, clients *kubernetes.Clientset) (string, error) {
for _, own := range pod.OwnerReferences {
if own.Kind == "ReplicaSet" {
rs, err := clients.AppsV1().ReplicaSets(pod.Namespace).Get(own.Name, v1.GetOptions{})
if err != nil {
return "", err
}
for _, own := range rs.OwnerReferences {
if own.Kind == "Deployment" {
return own.Name, nil
}
}
}
}
return "", fmt.Errorf("no deployment found for %v pod", pod.Name)
}
// it returns the uid of the chaos-operator deployment
func getOperatorUID(pod *core_v1.Pod, clients *kubernetes.Clientset) (string, error) {
// derive the deployment name belongs to operator pod
deployName, err := getDeploymentName(pod, clients)
if err != nil {
return "", err
}
deploy, err := clients.AppsV1().Deployments(pod.Namespace).Get(deployName, v1.GetOptions{})
if err != nil {
return "", fmt.Errorf("unable to get %s deployment in %s namespace", deployName, pod.Namespace)
}
if string(deploy.UID) == "" {
return "", fmt.Errorf("unable to find the deployment uid")
}
return string(deploy.UID), nil
}
|
[
"\"POD_NAME\"",
"\"POD_NAMESPACE\""
] |
[] |
[
"POD_NAMESPACE",
"POD_NAME"
] |
[]
|
["POD_NAMESPACE", "POD_NAME"]
|
go
| 2 | 0 | |
mkn/downloader.py
|
import os
import hashlib
import logging
import requests
from tqdm import tqdm
from pathlib import Path
logger = logging.getLogger('mkn')
HOME_DIR = str(Path.home())
MNK_RESOURCE_DIR = os.getenv('mkn', os.path.join(HOME_DIR, 'mkn_resources'))
def download():
dir = MNK_RESOURCE_DIR
Path(dir).mkdir(parents=True, exist_ok=True)
path = os.path.join(dir, "mkn-resources.zip")
md5_value = "93b8e912d8a0172d0114a7e2de1114e1"
url = "https://github.com/mojikimino/mkn/releases/download/resources/mkn-resources.zip"
# data = open(path, "rb").read()
# _md5_value = hashlib.md5(data).hexdigest()
# print(value)
if os.path.exists(path):
print("exists")
else:
verbose = True
r = requests.get(url, stream=True)
with open(path, 'wb') as f:
file_size = int(r.headers.get('content-length'))
default_chunk_size = 131072
desc = 'Downloading ' + url
with tqdm(
total=file_size,
unit='B',
unit_scale=True,
disable=not verbose,
desc=desc
) as pbar:
for chunk in r.iter_content(chunk_size=default_chunk_size):
if chunk:
f.write(chunk)
f.flush()
pbar.update(len(chunk))
if __name__ == "__main__":
download()
|
[] |
[] |
[
"mkn"
] |
[]
|
["mkn"]
|
python
| 1 | 0 | |
mpd-fzf.go
|
package main
import (
"bufio"
"compress/gzip"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
runewidth "github.com/mattn/go-runewidth"
)
// Forward slashes are one of the very few characters not allowed in paths
const delimiter string = "////"
func fail(err error) {
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func failOn(b bool, message string) {
if b {
fail(errors.New(message))
}
}
func keyval(line string) (string, string) {
i := strings.Index(line, ":")
if i == -1 || i == len(line)-1 {
return line, ""
}
return line[:i], line[i+2:]
}
type Track struct {
Album string
Artist string
AlbumArtist string
Date string
Filename string
Genre string
Path string
Time string
Title string
}
func (t *Track) Set(key, value string) {
switch key {
case "Album":
t.Album = value
case "Artist":
// Sometimes Artist is a very long string of names,
// don't discard them completely
if len(value) > 40 {
t.Artist = value[:40]
} else {
t.Artist = value
}
case "AlbumArtist":
// Sometimes AlbumArtist is a very long string of names, discard those
if len(value) < 40 {
t.AlbumArtist = value
}
case "Date":
t.Date = value
case "Genre":
t.Genre = value
case "Time":
t.Time = formatDurationString(value)
case "Title":
t.Title = value
}
}
func formatDurationString(str string) string {
duration, err := time.ParseDuration(str + "s")
if err != nil {
return ""
}
zero := time.Time{}
format := zero.Add(duration).Format("04:05")
if duration > time.Hour {
format = fmt.Sprintf("%d:%s", int(duration.Hours()), format)
}
return "(" + format + ")"
}
func withoutExt(path string) string {
basename := filepath.Base(path)
return strings.TrimSuffix(basename, filepath.Ext(basename))
}
func truncateAndPad(s string, maxWidth int, suffix string) string {
if maxWidth < 0 {
panic("suffix length greater than maxWidth chars")
}
return runewidth.FillRight(runewidth.Truncate(s, maxWidth, suffix), maxWidth)
}
func trackFormatter() func(*Track) string {
var width, ignored int
// tmux pane_width > $COLUMNS > stty size > default 80
cmd := exec.Command("tmux", "display-message", "-p", "#{pane_width}")
out, err := cmd.Output()
if err == nil {
_, err = fmt.Sscanf(string(out), "%d\n", &width)
}
if err != nil {
width, err = strconv.Atoi(os.Getenv("COLUMNS"))
}
if err != nil {
cmd := exec.Command("stty", "size")
cmd.Stdin = os.Stdin
out, err := cmd.Output()
if err == nil {
fmt.Sscanf(string(out), "%d %d\n", &ignored, &width)
}
}
if width <= 20 {
// A sane enough default/fallback
width = 80
}
contentLen := width - 5 // remove 5 for fzf display
return func(t *Track) string {
name := t.Title
if t.Title == "" {
name = withoutExt(t.Filename)
}
str := name
// TODO -- Some kind of column formatting? If the terminal is wide?
if t.AlbumArtist != "" && t.Artist != "" && t.AlbumArtist != t.Artist {
str = t.AlbumArtist + " - " + name + " // " + t.Artist
} else if t.AlbumArtist != "" {
str = t.AlbumArtist + " - " + name
} else if t.Artist != "" {
str = t.Artist + " - " + name
}
if t.Album != "" {
str += " {" + t.Album + "}"
}
str = truncateAndPad(str, contentLen-len(t.Time), "..")
return str + t.Time + delimiter + t.Path
}
}
func groupByArtist(tracks []*Track) []*Track {
// group by artist, then shuffle to stop same order, but keep artist together
artists := map[string][]*Track{}
for _, t := range tracks {
artists[t.Artist] = append(artists[t.Artist], t)
}
shuffled := make([]*Track, len(tracks))
i := 0
for _, tracks := range artists {
for _, t := range tracks {
shuffled[i] = t
i += 1
}
}
return shuffled
}
func parse(scan *bufio.Scanner) []*Track {
tracks, track := []*Track{}, new(Track)
dirs := []string{}
for scan.Scan() {
key, value := keyval(scan.Text())
switch key {
case "directory":
dirs = append(dirs, value)
case "end":
failOn(len(dirs) <= 0, "Invalid directory state. Corrupted database?")
dirs = dirs[:len(dirs)-1]
case "Artist", "Album", "AlbumArtist", "Date", "Genre", "Time", "Title":
track.Set(key, value)
case "song_begin":
track.Filename = value
track.Path = filepath.Join(append(dirs, track.Filename)...)
case "song_end":
tracks = append(tracks, track)
track = new(Track)
}
}
fail(scan.Err())
return tracks
}
func expandUser(path, home string) string {
if path[:2] == "~/" {
path = strings.Replace(path, "~", home, 1)
}
return path
}
func findDbFile() string {
usr, err := user.Current()
fail(err)
home := usr.HomeDir
paths := []string{
filepath.Join(os.Getenv("XDG_CONFIG_HOME"), "/mpd/mpd.conf"),
filepath.Join(home, ".config", "/mpd/mpd.conf"),
filepath.Join(home, ".mpdconf"),
"/etc/mpd.conf",
"/usr/local/etc/musicpd.conf",
}
var f *os.File
var confpath string
for _, path := range paths {
f, err = os.Open(path)
if err == nil {
confpath = path
break
}
}
failOn(f == nil, "No config file found")
expDb := regexp.MustCompile(`^\s*db_file\s*"([^"]+)"`)
scan := bufio.NewScanner(f)
var dbFile string
for scan.Scan() {
m := expDb.FindStringSubmatch(scan.Text())
if m != nil {
dbFile = expandUser(m[1], home)
}
}
fail(scan.Err())
fail(f.Close())
failOn(dbFile == "", fmt.Sprintf("Could not find 'db_file' in configuration file '%s'", confpath))
return dbFile
}
func fzfCheckExit(err error) {
if err != nil {
if exerr, ok := err.(*exec.ExitError); ok {
if status, ok := exerr.Sys().(syscall.WaitStatus); ok {
// FZF returns 130 when killed by ctrl+C
if status.ExitStatus() == 130 {
os.Exit(0)
} else {
fail(err)
}
} else {
fail(err)
}
} else {
fail(err)
}
}
}
func parseFzfOutput(output []byte) []string {
songs := strings.Split(string(output), "\n")
if len(songs) == 0 || songs[0] == "" {
return []string{}
}
if songs[len(songs)-1] == "" {
songs = songs[:len(songs)-1]
}
for i, s := range songs {
songs[i] = s[strings.LastIndex(s, delimiter)+len(delimiter):]
}
return songs
}
func fzfSongs(tracks []*Track) []string {
format := trackFormatter()
fzf := exec.Command("fzf-tmux", "--no-hscroll", "-m")
fzf.Stderr = os.Stderr
in, err := fzf.StdinPipe()
fail(err)
out, err := fzf.StdoutPipe()
fail(err)
fail(fzf.Start())
for _, t := range tracks {
fmt.Fprintln(in, format(t))
}
fail(in.Close())
fzfOutput, err := ioutil.ReadAll(out)
fail(err)
fzfCheckExit(fzf.Wait())
return parseFzfOutput(fzfOutput)
}
func removeSongs(songs []string) error {
fnames := make(map[string]struct{})
for _, s := range songs {
if s != "" {
fnames[s] = struct{}{}
}
}
mpc := exec.Command("mpc", "playlist", "-f", `%position% %file%`)
out, err := mpc.Output()
if err != nil {
return err
}
mpc = exec.Command("mpc", "del")
in, _ := mpc.StdinPipe()
if err = mpc.Start(); err != nil {
in.Close()
return err
}
for _, s := range strings.Split(string(out), "\n") {
posFname := strings.SplitN(s, " ", 2)
if len(posFname) == 1 {
continue
}
if _, ok := fnames[posFname[1]]; ok {
fmt.Fprintln(in, posFname[0])
}
}
if err = in.Close(); err != nil {
return err
}
return mpc.Wait()
}
func insertSongs(songs []string) error {
mpc := exec.Command("mpc", "insert")
in, _ := mpc.StdinPipe()
if err := mpc.Start(); err != nil {
in.Close()
return err
}
// Reverse order isn't required when adding a bunch of songs from stdin
for _, s := range songs {
fmt.Fprintln(in, s)
}
if err := in.Close(); err != nil {
return err
}
return mpc.Wait()
}
func readTracks() []*Track {
dbFile := findDbFile()
f, err := os.Open(dbFile)
fail(err)
gz, err := gzip.NewReader(f)
fail(err)
scan := bufio.NewScanner(gz)
tracks := groupByArtist(parse(scan))
fail(gz.Close())
fail(f.Close())
return tracks
}
func main() {
songs := fzfSongs(readTracks())
if len(songs) == 0 {
return
}
fail(removeSongs(songs))
fail(insertSongs(songs))
}
|
[
"\"COLUMNS\"",
"\"XDG_CONFIG_HOME\""
] |
[] |
[
"XDG_CONFIG_HOME",
"COLUMNS"
] |
[]
|
["XDG_CONFIG_HOME", "COLUMNS"]
|
go
| 2 | 0 | |
monai/config/deviceconfig.py
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from collections import OrderedDict
import monai
import numpy as np
import torch
try:
import ignite
ignite_version = ignite.__version__
del ignite
except (ImportError, AttributeError):
ignite_version = "NOT INSTALLED or UNKNOWN VERSION."
try:
import nibabel
nibabel_version = nibabel.__version__
del nibabel
except (ImportError, AttributeError):
nibabel_version = "NOT INSTALLED or UNKNOWN VERSION."
try:
import skimage
skimage_version = skimage.__version__
del skimage
except (ImportError, AttributeError):
skimage_version = "NOT INSTALLED or UNKNOWN VERSION."
try:
import PIL
PIL_version = PIL.__version__
del PIL
except (ImportError, AttributeError):
PIL_version = "NOT INSTALLED or UNKNOWN VERSION."
try:
import tensorboard
tensorboard_version = tensorboard.__version__
del tensorboard
except (ImportError, AttributeError):
tensorboard_version = "NOT INSTALLED or UNKNOWN VERSION."
def get_config_values():
"""
Read the package versions into a dictionary.
"""
output = OrderedDict()
output["MONAI"] = monai.__version__
output["Python"] = sys.version.replace("\n", " ")
output["Numpy"] = np.version.full_version
output["Pytorch"] = torch.__version__
return output
def get_optional_config_values():
"""
Read the optional package versions into a dictionary.
"""
output = OrderedDict()
output["Pytorch Ignite"] = ignite_version
output["Nibabel"] = nibabel_version
output["scikit-image"] = skimage_version
output["Pillow"] = PIL_version
output["Tensorboard"] = tensorboard_version
return output
def print_config(file=sys.stdout):
"""
Print the package versions to `file`.
Defaults to `sys.stdout`.
"""
for k, v in get_config_values().items():
print(f"{k} version: {v}", file=file, flush=True)
print("\nOptional dependencies:", file=file, flush=True)
for k, v in get_optional_config_values().items():
print(f"{k} version: {v}", file=file, flush=True)
print("\nFor details about installing the optional dependencies, please visit:", file=file, flush=True)
print(
" http://monai.rtfd.io/en/latest/installation.html#installing-the-recommended-dependencies",
file=file,
flush=True,
)
def set_visible_devices(*dev_inds):
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, dev_inds))
def get_torch_version_tuple():
"""
Returns:
tuple of ints represents the pytorch major/minor version.
"""
return tuple([int(x) for x in torch.__version__.split(".")[:2]])
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
ext/context.py
|
import discord
from discord.ext import commands
import asyncio
from colorthief import ColorThief
import io
import os
class CustomContext(commands.Context):
"""Custom Context class to provide utility."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def delete(self):
"""shortcut"""
return self.message.delete()
async def get_ban(self, name_or_id):
"""Helper function to retrieve a banned user"""
for ban in await self.guild.bans():
if name_or_id.isdigit():
if ban.user.id == int(name_or_id):
return ban
if name_or_id.lower() in str(ban.user).lower():
return ban
async def purge(self, *args, **kwargs):
"""Shortcut to channel.purge, preset for selfbots."""
kwargs.setdefault("bulk", False)
await self.channel.purge(*args, **kwargs)
async def get_dominant_color(self, url=None, quality=10):
"""Returns the dominant color of an image from a url"""
maybe_col = os.environ.get("COLOR")
url = url or self.author.avatar_url
if maybe_col:
raw = int(maybe_col.strip("#"), 16)
return discord.Color(value=raw)
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
image = await resp.read()
except:
return discord.Color.default()
with io.BytesIO(image) as f:
try:
color = ColorThief(f).get_color(quality=quality)
except:
return discord.Color.dark_grey()
return discord.Color.from_rgb(*color)
async def success(self, msg=None, delete=False):
if delete:
await ctx.message.delete()
if msg:
await self.send(msg)
else:
await self.message.add_reaction("✔")
async def failure(self, msg=None):
if msg:
await self.send(msg)
else:
await self.message.add_reaction("❌")
@staticmethod
def paginate(text: str):
"""Simple generator that paginates text."""
last = 0
pages = []
for curr in range(0, len(text)):
if curr % 1980 == 0:
pages.append(text[last:curr])
last = curr
appd_index = curr
if appd_index != len(text) - 1:
pages.append(text[last:curr])
return list(filter(lambda a: a != "", pages))
|
[] |
[] |
[
"COLOR"
] |
[]
|
["COLOR"]
|
python
| 1 | 0 | |
config/parser.go
|
package config
import (
"fmt"
"os"
"path"
"path/filepath"
"gopkg.in/yaml.v2"
)
type PRSectionConfig struct {
Title string
Filters string
Limit *int `yaml:"limit,omitempty"`
}
type PreviewConfig struct {
Open bool
Width int
}
type Defaults struct {
Preview PreviewConfig
PrsLimit int `yaml:"prsLimit"`
}
type Config struct {
PRSections []PRSectionConfig `yaml:"prSections"`
Defaults Defaults
}
const PrsDir = "prs"
const ConfigFileName = "config.yml"
type configError struct {
configDir string
parser ConfigParser
err error
}
type ConfigParser struct{}
func (parser ConfigParser) getDefaultConfig() Config {
return Config{
Defaults: Defaults{
Preview: PreviewConfig{
Open: true,
Width: 50,
},
PrsLimit: 20,
},
PRSections: []PRSectionConfig{
{
Title: "My Pull Requests",
Filters: "is:open author:@me",
},
{
Title: "Needs My Review",
Filters: "is:open review-requested:@me",
},
{
Title: "Subscribed",
Filters: "is:open -author:@me repo:cli/cli repo:dlvhdr/gh-prs",
},
},
}
}
func (parser ConfigParser) getDefaultConfigYamlContents() string {
defaultConfig := parser.getDefaultConfig()
yaml, _ := yaml.Marshal(defaultConfig)
return string(yaml)
}
func (e configError) Error() string {
return fmt.Sprintf(
`Couldn't find a config.yml configuration file.
Create one under: %s
Example of a config.yml file:
%s
For more info, go to https://github.com/dlvhdr/gh-prs
press q to exit.
Original error: %v`,
path.Join(e.configDir, PrsDir, ConfigFileName),
string(e.parser.getDefaultConfigYamlContents()),
e.err,
)
}
func (parser ConfigParser) writeDefaultConfigContents(newConfigFile *os.File) error {
_, err := newConfigFile.WriteString(parser.getDefaultConfigYamlContents())
if err != nil {
return err
}
return nil
}
func (parser ConfigParser) createConfigFileIfMissing(configFilePath string) error {
if _, err := os.Stat(configFilePath); os.IsNotExist(err) {
newConfigFile, err := os.OpenFile(configFilePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return err
}
defer newConfigFile.Close()
return parser.writeDefaultConfigContents(newConfigFile)
}
return nil
}
func (parser ConfigParser) getConfigFileOrCreateIfMissing() (*string, error) {
var err error
configDir := os.Getenv("XDG_CONFIG_HOME")
if configDir == "" {
configDir, err = os.UserConfigDir()
if err != nil {
return nil, configError{parser: parser, configDir: configDir, err: err}
}
}
prsConfigDir := filepath.Join(configDir, PrsDir)
err = os.MkdirAll(prsConfigDir, os.ModePerm)
if err != nil {
return nil, configError{parser: parser, configDir: configDir, err: err}
}
configFilePath := filepath.Join(prsConfigDir, ConfigFileName)
err = parser.createConfigFileIfMissing(configFilePath)
if err != nil {
return nil, configError{parser: parser, configDir: configDir, err: err}
}
return &configFilePath, nil
}
type parsingError struct {
err error
}
func (e parsingError) Error() string {
return fmt.Sprintf("failed parsing config.yml: %v", e.err)
}
func (parser ConfigParser) readConfigFile(path string) (Config, error) {
config := parser.getDefaultConfig()
data, err := os.ReadFile(path)
if err != nil {
return config, configError{parser: parser, configDir: path, err: err}
}
err = yaml.Unmarshal([]byte(data), &config)
return config, err
}
func initParser() ConfigParser {
return ConfigParser{}
}
func ParseConfig() (Config, error) {
parser := initParser()
var config Config
var err error
configFilePath, err := parser.getConfigFileOrCreateIfMissing()
if err != nil {
return config, parsingError{err: err}
}
config, err = parser.readConfigFile(*configFilePath)
if err != nil {
return config, parsingError{err: err}
}
return config, nil
}
|
[
"\"XDG_CONFIG_HOME\""
] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
go
| 1 | 0 | |
framework/src/main/java/org/checkerframework/framework/util/CheckerMain.java
|
package org.checkerframework.framework.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.jar.JarInputStream;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.ZipEntry;
import org.checkerframework.checker.signature.qual.FullyQualifiedName;
import org.checkerframework.javacutil.BugInCF;
import org.checkerframework.javacutil.SystemUtil;
/**
* This class behaves similarly to javac. CheckerMain does the following:
*
* <ul>
* <li>add the {@code javac.jar} to the runtime classpath of the process that runs the Checker
* Framework.
* <li>parse and implement any special options used by the Checker Framework, e.g., using
* "shortnames" for annotation processors
* <li>pass all remaining command-line arguments to the real javac
* </ul>
*
* To debug this class, use the {@code -AoutputArgsToFile=FILENAME} command-line argument or {@code
* -AoutputArgsToFile=-} to output to standard out.
*
* <p>"To run the Checker Framework" really means to run java, where the program being run is a
* special version of javac, and javac is passed a {@code -processor} command-line argument that
* mentions a Checker Framework checker. There are 5 relevant classpaths: The classpath and
* bootclasspath when running java, and the classpath, bootclasspath, and processorpath used by
* javac. The latter three are the only important ones.
*
* <p>Note for developers: Try to limit the work done (and options interpreted) by CheckerMain,
* because its functionality is not available to users who choose not to use the Checker Framework
* javac script.
*/
public class CheckerMain {
/** Any exception thrown by the Checker Framework escapes to the command line. */
public static void main(String[] args) {
final File pathToThisJar = new File(findPathTo(CheckerMain.class, false));
ArrayList<String> alargs = new ArrayList<>(args.length);
alargs.addAll(Arrays.asList(args));
final CheckerMain program = new CheckerMain(pathToThisJar, alargs);
final int exitStatus = program.invokeCompiler();
System.exit(exitStatus);
}
/** The path to the javacJar to use. */
protected final File javacJar;
/** The path to the jar containing CheckerMain.class (i.e. checker.jar). */
protected final File checkerJar;
/** The path to checker-qual.jar. */
protected final File checkerQualJar;
private final List<String> compilationBootclasspath;
private final List<String> runtimeClasspath;
private final List<String> jvmOpts;
/**
* Each element is either a classpath element (a directory or jar file) or is a classpath
* (containing elements separated by File.pathSeparator). To produce the final classpath,
* concatenate them all (separated by File.pathSeparator).
*/
private final List<String> cpOpts;
/** Processor path options. */
private final List<String> ppOpts;
/** Arguments to the Checker Framework. */
private final List<String> toolOpts;
/** Command-line argument files (specified with @ on the command line). */
private final List<File> argListFiles;
/**
* Option name for specifying an alternative checker-qual.jar location. The accompanying value
* MUST be the path to the jar file (NOT the path to its encompassing directory)
*/
public static final String CHECKER_QUAL_PATH_OPT = "-checkerQualJar";
/**
* Option name for specifying an alternative javac.jar location. The accompanying value MUST be
* the path to the jar file (NOT the path to its encompassing directory)
*/
public static final String JAVAC_PATH_OPT = "-javacJar";
/**
* Option name for specifying an alternative jdk.jar location. The accompanying value MUST be
* the path to the jar file (NOT the path to its encompassing directory)
*/
public static final String JDK_PATH_OPT = "-jdkJar";
/**
* Construct all the relevant file locations and Java version given the path to this jar and a
* set of directories in which to search for jars.
*/
public CheckerMain(final File checkerJar, final List<String> args) {
this.checkerJar = checkerJar;
final File searchPath = checkerJar.getParentFile();
replaceShorthandProcessor(args);
argListFiles = collectArgFiles(args);
this.checkerQualJar =
extractFileArg(
CHECKER_QUAL_PATH_OPT, new File(searchPath, "checker-qual.jar"), args);
this.javacJar = extractFileArg(JAVAC_PATH_OPT, new File(searchPath, "javac.jar"), args);
this.compilationBootclasspath = createCompilationBootclasspath(args);
this.runtimeClasspath = createRuntimeClasspath(args);
this.jvmOpts = extractJvmOpts(args);
this.cpOpts = createCpOpts(args);
this.ppOpts = createPpOpts(args);
this.toolOpts = args;
assertValidState();
}
/** Assert that required jars exist. */
protected void assertValidState() {
if (SystemUtil.getJreVersion() < 9) {
assertFilesExist(Arrays.asList(javacJar, checkerJar, checkerQualJar));
} else {
// TODO: once the jdk11 jars exist, check for them.
assertFilesExist(Arrays.asList(checkerJar, checkerQualJar));
}
}
public void addToClasspath(List<String> cpOpts) {
this.cpOpts.addAll(cpOpts);
}
public void addToProcessorpath(List<String> ppOpts) {
this.ppOpts.addAll(ppOpts);
}
public void addToRuntimeClasspath(List<String> runtimeClasspathOpts) {
this.runtimeClasspath.addAll(runtimeClasspathOpts);
}
protected List<String> createRuntimeClasspath(final List<String> argsList) {
return new ArrayList<>(Arrays.asList(javacJar.getAbsolutePath()));
}
/**
* Returns the compilation bootclasspath from {@code argsList}.
*
* @param argsList args to add
* @return the compilation bootclasspath from {@code argsList}
*/
protected List<String> createCompilationBootclasspath(final List<String> argsList) {
return extractBootClassPath(argsList);
}
protected List<String> createCpOpts(final List<String> argsList) {
final List<String> extractedOpts = extractCpOpts(argsList);
extractedOpts.add(0, this.checkerQualJar.getAbsolutePath());
return extractedOpts;
}
// Assumes that createCpOpts has already been run.
protected List<String> createPpOpts(final List<String> argsList) {
final List<String> extractedOpts = extractPpOpts(argsList);
if (extractedOpts.isEmpty()) {
// If processorpath is not provided, then javac uses the classpath.
// CheckerMain always supplies a processorpath, so if the user
// didn't specify a processorpath, then use the classpath.
extractedOpts.addAll(this.cpOpts);
}
extractedOpts.add(0, this.checkerJar.getAbsolutePath());
return extractedOpts;
}
/**
* Return the arguments that start with @ and therefore are files that contain javac arguments.
*
* @param args a list of command-line arguments; is not modified
* @return a List of files representing all arguments that started with @
*/
protected List<File> collectArgFiles(final List<String> args) {
final List<File> argListFiles = new ArrayList<>();
for (final String arg : args) {
if (arg.startsWith("@")) {
argListFiles.add(new File(arg.substring(1)));
}
}
return argListFiles;
}
/**
* Remove the argument given by argumentName and the subsequent value from the list args if
* present. Return the subsequent value.
*
* @param argumentName a command-line option name whose argument to extract
* @param alternative default value to return if argumentName does not appear in args
* @param args the current list of arguments
* @return the string that follows argumentName if argumentName is in args, or alternative if
* argumentName is not present in args
*/
protected static String extractArg(
final String argumentName, final String alternative, final List<String> args) {
int i = args.indexOf(argumentName);
if (i == -1) {
return alternative;
} else if (i == args.size() - 1) {
throw new BugInCF(
"Command line contains " + argumentName + " but no value following it");
} else {
args.remove(i);
return args.remove(i);
}
}
/**
* Remove the argument given by argumentName and the subsequent value from the list args if
* present. Return the subsequent value wrapped as a File.
*
* @param argumentName argument to extract
* @param alternative file to return if argumentName is not found in args
* @param args the current list of arguments
* @return the string that follows argumentName wrapped as a File if argumentName is in args or
* alternative if argumentName is not present in args
*/
protected static File extractFileArg(
final String argumentName, final File alternative, final List<String> args) {
final String filePath = extractArg(argumentName, null, args);
if (filePath == null) {
return alternative;
} else {
return new File(filePath);
}
}
/**
* Find all args that match the given pattern and extract their index 1 group. Add all the index
* 1 groups to the returned list. Remove all matching args from the input args list.
*
* @param pattern a pattern with at least one matching group
* @param allowEmpties whether or not to add empty group(1) matches to the returned list
* @param args the arguments to extract from
* @return a list of arguments from the first group that matched the pattern for each input args
* or the empty list if there were none
*/
protected static List<String> extractOptWithPattern(
final Pattern pattern, boolean allowEmpties, final List<String> args) {
final List<String> matchedArgs = new ArrayList<>();
int i = 0;
while (i < args.size()) {
final Matcher matcher = pattern.matcher(args.get(i));
if (matcher.matches()) {
final String arg = matcher.group(1).trim();
if (!arg.isEmpty() || allowEmpties) {
matchedArgs.add(arg);
}
args.remove(i);
} else {
i++;
}
}
return matchedArgs;
}
/**
* A pattern to match bootclasspath prepend entries, used to construct one {@code
* -Xbootclasspath/p:} command-line argument.
*/
protected static final Pattern BOOT_CLASS_PATH_REGEX =
Pattern.compile("^(?:-J)?-Xbootclasspath/p:(.*)$");
// TODO: Why does this treat -J and -J-X the same? They have different semantics, don't they?
/**
* Remove all {@code -Xbootclasspath/p:} or {@code -J-Xbootclasspath/p:} arguments from args and
* add them to the returned list.
*
* @param args the arguments to extract from
* @return all non-empty arguments matching BOOT_CLASS_PATH_REGEX or an empty list if there were
* none
*/
protected static List<String> extractBootClassPath(final List<String> args) {
return extractOptWithPattern(BOOT_CLASS_PATH_REGEX, false, args);
}
/** Matches all {@code -J} arguments. */
protected static final Pattern JVM_OPTS_REGEX = Pattern.compile("^(?:-J)(.*)$");
/**
* Remove all {@code -J} arguments from {@code args} and add them to the returned list (without
* the {@code -J} prefix).
*
* @param args the arguments to extract from
* @return all {@code -J} arguments (without the {@code -J} prefix) or an empty list if there
* were none
*/
protected static List<String> extractJvmOpts(final List<String> args) {
return extractOptWithPattern(JVM_OPTS_REGEX, false, args);
}
/**
* Return the last {@code -cp} or {@code -classpath} option. If no {@code -cp} or {@code
* -classpath} arguments were present, then return the CLASSPATH environment variable (if set)
* followed by the current directory.
*
* <p>Also removes all {@code -cp} and {@code -classpath} options from args.
*
* @param args a list of arguments to extract from; is side-effected by this
* @return collection of classpaths to concatenate to use when calling javac.jar
*/
protected static List<String> extractCpOpts(final List<String> args) {
List<String> actualArgs = new ArrayList<>();
String lastCpArg = null;
for (int i = 0; i < args.size(); i++) {
if ((args.get(i).equals("-cp") || args.get(i).equals("-classpath"))
&& (i + 1 < args.size())) {
args.remove(i);
// Every classpath entry overrides the one before it.
lastCpArg = args.remove(i);
// re-process whatever is currently at element i
i--;
}
}
// The logic below is exactly what the javac script does. If no command-line classpath is
// specified, use the "CLASSPATH" environment variable followed by the current directory.
if (lastCpArg == null) {
final String systemClassPath = System.getenv("CLASSPATH");
if (systemClassPath != null && !systemClassPath.trim().isEmpty()) {
actualArgs.add(systemClassPath.trim());
}
actualArgs.add(".");
} else {
actualArgs.add(lastCpArg);
}
return actualArgs;
}
/**
* Remove the {@code -processorpath} options and their arguments from args. Return the last
* argument.
*
* @param args a list of arguments to extract from
* @return the arguments that should be put on the processorpath when calling javac.jar
*/
protected static List<String> extractPpOpts(final List<String> args) {
List<String> actualArgs = new ArrayList<>();
String path = null;
for (int i = 0; i < args.size(); i++) {
if (args.get(i).equals("-processorpath") && (i + 1 < args.size())) {
args.remove(i);
path = args.remove(i);
// re-process whatever is currently at element i
i--;
}
}
if (path != null) {
actualArgs.add(path);
}
return actualArgs;
}
protected void addMainToArgs(final List<String> args) {
args.add("com.sun.tools.javac.Main");
}
/** Invoke the compiler with all relevant jars on its classpath and/or bootclasspath. */
public List<String> getExecArguments() {
List<String> args = new ArrayList<>(jvmOpts.size() + cpOpts.size() + toolOpts.size() + 7);
// TODO: do we need java.exe on Windows?
final String java = "java";
args.add(java);
if (SystemUtil.getJreVersion() == 8) {
args.add("-Xbootclasspath/p:" + String.join(File.pathSeparator, runtimeClasspath));
} else {
args.addAll(
Arrays.asList(
"--illegal-access=warn",
"--add-opens",
"jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED"));
}
args.add("-classpath");
args.add(String.join(File.pathSeparator, runtimeClasspath));
args.add("-ea");
// com.sun.tools needs to be enabled separately
args.add("-ea:com.sun.tools...");
args.addAll(jvmOpts);
addMainToArgs(args);
if (!argsListHasClassPath(argListFiles)) {
args.add("-classpath");
args.add(quote(concatenatePaths(cpOpts)));
}
if (!argsListHasProcessorPath(argListFiles)) {
args.add("-processorpath");
args.add(quote(concatenatePaths(ppOpts)));
}
if (SystemUtil.getJreVersion() == 8) {
// No classes on the compilation bootclasspath will be loaded
// during compilation, but the classes are read by the compiler
// without loading them. The compiler assumes that any class on
// this bootclasspath will be on the bootclasspath of the JVM used
// to later run the classfiles that Javac produces.
args.add(
"-Xbootclasspath/p:"
+ String.join(File.pathSeparator, compilationBootclasspath));
// We currently provide a Java 8 JDK and want to be runnable
// on a Java 8 JVM. So set source/target to 8.
args.add("-source");
args.add("8");
args.add("-target");
args.add("8");
}
args.addAll(toolOpts);
return args;
}
/** Given a list of paths, concatenate them to form a single path. Also expand wildcards. */
private String concatenatePaths(List<String> paths) {
List<String> elements = new ArrayList<>();
for (String path : paths) {
for (String element : path.split(File.pathSeparator)) {
elements.addAll(expandWildcards(element));
}
}
return String.join(File.pathSeparator, elements);
}
/** The string "/*" (on Unix). */
private static final String FILESEP_STAR = File.separator + "*";
/**
* Given a path element that might be a wildcard, return a list of the elements it expands to.
* If the element isn't a wildcard, return a singleton list containing the argument.
*/
private List<String> expandWildcards(String pathElement) {
if (pathElement.equals("*")) {
return jarFiles(".");
} else if (pathElement.endsWith(FILESEP_STAR)) {
return jarFiles(pathElement.substring(0, pathElement.length() - 1));
} else if (pathElement.equals("")) {
return Collections.emptyList();
} else {
return Collections.singletonList(pathElement);
}
}
/** Return all the .jar and .JAR files in the given directory. */
private List<String> jarFiles(String directory) {
File dir = new File(directory);
File[] jarFiles =
dir.listFiles((d, name) -> name.endsWith(".jar") || name.endsWith(".JAR"));
List<String> result = new ArrayList<>(jarFiles.length);
for (File jarFile : jarFiles) {
result.add(jarFile.toString());
}
return result;
}
/** Invoke the compiler with all relevant jars on its classpath and/or bootclasspath. */
public int invokeCompiler() {
List<String> args = getExecArguments();
for (int i = 0; i < args.size(); i++) {
String arg = args.get(i);
if (arg.startsWith("-AoutputArgsToFile=")) {
String fileName = arg.substring(19);
args.remove(i);
outputArgumentsToFile(fileName, args);
break;
}
}
// Actually invoke the compiler
return ExecUtil.execute(args.toArray(new String[args.size()]), System.out, System.err);
}
private static void outputArgumentsToFile(String outputFilename, List<String> args) {
if (outputFilename != null) {
String errorMessage = null;
try {
PrintWriter writer =
(outputFilename.equals("-")
? new PrintWriter(System.out)
: new PrintWriter(outputFilename, "UTF-8"));
for (int i = 0; i < args.size(); i++) {
String arg = args.get(i);
// We would like to include the filename of the argfile instead of its contents.
// The problem is that the file will sometimes disappear by the time the user
// can look at or run the resulting script. Maven deletes the argfile very
// shortly after it has been handed off to javac, for example. Ideally we would
// print the argfile filename as a comment but the resulting file couldn't then
// be run as a script on Unix or Windows.
if (arg.startsWith("@")) {
// Read argfile and include its parameters in the output file.
String inputFilename = arg.substring(1);
BufferedReader br = new BufferedReader(new FileReader(inputFilename));
String line;
while ((line = br.readLine()) != null) {
writer.print(line);
writer.print(" ");
}
br.close();
} else {
writer.print(arg);
writer.print(" ");
}
}
writer.close();
} catch (IOException e) {
errorMessage = e.toString();
}
if (errorMessage != null) {
System.err.println(
"Failed to output command-line arguments to file "
+ outputFilename
+ " due to exception: "
+ errorMessage);
}
}
}
/**
* Returns true if some @arglist file sets the classpath.
*
* @param argListFiles command-line argument files (specified with @ on the command line)
*/
private static boolean argsListHasClassPath(final List<File> argListFiles) {
for (final String arg : expandArgFiles(argListFiles)) {
if (arg.contains("-classpath") || arg.contains("-cp")) {
return true;
}
}
return false;
}
/**
* Returns true if some @arglist file sets the processorpath.
*
* @param argListFiles command-line argument files (specified with @ on the command line)
*/
private static boolean argsListHasProcessorPath(final List<File> argListFiles) {
for (final String arg : expandArgFiles(argListFiles)) {
if (arg.contains("-processorpath")) {
return true;
}
}
return false;
}
/**
* Return all the lines in all the files.
*
* @param files a list of files
* @return a list of all the lines in all the files
*/
protected static List<String> expandArgFiles(final List<File> files) {
final List<String> content = new ArrayList<>();
for (final File file : files) {
try {
content.addAll(SystemUtil.readFile(file));
} catch (final IOException exc) {
throw new RuntimeException("Could not open file: " + file.getAbsolutePath(), exc);
}
}
return content;
}
/**
* Find the jar file or directory containing the .class file from which cls was loaded.
*
* @param cls the class whose .class file we wish to locate; if null, CheckerMain.class
* @param errIfFromDirectory if false, throw an exception if the file was loaded from a
* directory
*/
public static String findPathTo(Class<?> cls, boolean errIfFromDirectory)
throws IllegalStateException {
if (cls == null) {
cls = CheckerMain.class;
}
String name = cls.getName();
String classFileName;
/* name is something like pakkage.name.ContainingClass$ClassName. We need to turn this into ContainingClass$ClassName.class. */
{
int idx = name.lastIndexOf('.');
classFileName = (idx == -1 ? name : name.substring(idx + 1)) + ".class";
}
String uri = cls.getResource(classFileName).toString();
if (uri.startsWith("file:")) {
if (errIfFromDirectory) {
return uri;
} else {
throw new IllegalStateException(
"This class has been loaded from a directory and not from a jar file.");
}
}
if (!uri.startsWith("jar:file:")) {
int idx = uri.indexOf(':');
String protocol = idx == -1 ? "(unknown)" : uri.substring(0, idx);
throw new IllegalStateException(
"This class has been loaded remotely via the "
+ protocol
+ " protocol. Only loading from a jar on the local file system is supported.");
}
int idx = uri.indexOf('!');
// Sanity check
if (idx == -1) {
throw new IllegalStateException(
"You appear to have loaded this class from a local jar file, but I can't make sense of the URL!");
}
try {
String fileName =
URLDecoder.decode(
uri.substring("jar:file:".length(), idx),
Charset.defaultCharset().name());
return new File(fileName).getAbsolutePath();
} catch (UnsupportedEncodingException e) {
throw new BugInCF("Default charset doesn't exist. Your VM is borked.");
}
}
/**
* Assert that all files in the list exist and if they don't, throw a RuntimeException with a
* list of the files that do not exist.
*
* @param expectedFiles files that must exist
*/
private static void assertFilesExist(final List<File> expectedFiles) {
final List<File> missingFiles = new ArrayList<>();
for (final File file : expectedFiles) {
if (file == null) {
throw new RuntimeException("Null passed to assertFilesExist");
}
if (!file.exists()) {
missingFiles.add(file);
}
}
if (!missingFiles.isEmpty()) {
List<String> missingAbsoluteFilenames = new ArrayList<>(missingFiles.size());
for (File missingFile : missingFiles) {
missingAbsoluteFilenames.add(missingFile.getAbsolutePath());
}
throw new RuntimeException(
"The following files could not be located: "
+ String.join(", ", missingAbsoluteFilenames));
}
}
private static String quote(final String str) {
if (str.contains(" ")) {
if (str.contains("\"")) {
throw new BugInCF(
"Don't know how to quote a string containing a double-quote character "
+ str);
}
return "\"" + str + "\"";
}
return str;
}
///////////////////////////////////////////////////////////////////////////
/// Shorthand checker names
///
/** Processor shorthand is enabled for processors in this directory in checker.jar. */
protected static final String CHECKER_BASE_DIR_NAME = "org/checkerframework/checker/";
/** Processor shorthand is enabled for processors in this directory in checker.jar. */
protected static final String COMMON_BASE_DIR_NAME = "org/checkerframework/common/";
/**
* Returns true if processorString, once transformed into fully-qualified form, is present in
* fullyQualifiedCheckerNames. Used by SourceChecker to determine whether a class is annotated
* for any processor that is being run.
*
* @param processorString the name of a single processor, not a comma-separated list of
* processors
* @param fullyQualifiedCheckerNames a list of fully-qualified checker names
* @return true if the fully-qualified version of {@code processorString} is in {@code
* fullyQualifiedCheckerNames}
*/
public static boolean matchesCheckerOrSubcheckerFromList(
final String processorString,
List<@FullyQualifiedName String> fullyQualifiedCheckerNames) {
if (processorString.contains(",")) {
return false; // Do not process strings containing multiple processors.
}
return fullyQualifiedCheckerNames.contains(
unshorthandProcessorNames(processorString, fullyQualifiedCheckerNames, true));
}
/**
* For every "-processor" argument in args, replace its immediate successor argument using
* unabbreviateProcessorNames.
*/
protected void replaceShorthandProcessor(final List<String> args) {
for (int i = 0; i < args.size(); i++) {
final int nextIndex = i + 1;
if (args.size() > nextIndex) {
if (args.get(i).equals("-processor")) {
final String replacement =
unshorthandProcessorNames(
args.get(nextIndex), getAllCheckerClassNames(), false);
args.remove(nextIndex);
args.add(nextIndex, replacement);
}
}
}
}
/**
* Returns the list of fully qualified names of the checkers found in checker.jar. This covers
* only checkers with the name ending in "Checker". Checkers with a name ending in "Subchecker"
* are not included in the returned list. Note however that it is possible for a checker with
* the name ending in "Checker" to be used as a subchecker.
*
* @return fully qualified names of the checkers found in checker.jar
*/
private List<@FullyQualifiedName String> getAllCheckerClassNames() {
ArrayList<@FullyQualifiedName String> checkerClassNames = new ArrayList<>();
try {
final JarInputStream checkerJarIs = new JarInputStream(new FileInputStream(checkerJar));
ZipEntry entry;
while ((entry = checkerJarIs.getNextEntry()) != null) {
final String name = entry.getName();
// Checkers ending in "Subchecker" are not included in this list used by
// CheckerMain.
if ((name.startsWith(CHECKER_BASE_DIR_NAME)
|| name.startsWith(COMMON_BASE_DIR_NAME))
&& name.endsWith("Checker.class")) {
// Forward slash is used instead of File.separator because checker.jar uses / as
// the separator.
@SuppressWarnings("signature") // string manipulation
@FullyQualifiedName String fqName =
String.join(
".",
name.substring(0, name.length() - ".class".length())
.split("/"));
checkerClassNames.add(fqName);
}
}
checkerJarIs.close();
} catch (IOException e) {
// When using CheckerDevelMain we might not have a checker.jar file built yet.
// Issue a warning instead of aborting execution.
System.err.printf(
"Could not read %s. Shorthand processor names will not work.%n", checkerJar);
}
return checkerClassNames;
}
/**
* Takes a string of comma-separated processor names, and expands any shorthands to
* fully-qualified names from the fullyQualifiedCheckerNames list. For example:
*
* <pre>
* NullnessChecker → org.checkerframework.checker.nullness.NullnessChecker
* nullness → org.checkerframework.checker.nullness.NullnessChecker
* NullnessChecker,RegexChecker → org.checkerframework.checker.nullness.NullnessChecker,org.checkerframework.checker.regex.RegexChecker
* </pre>
*
* Note, a processor entry only gets replaced if it contains NO "." (i.e., it is not qualified
* by a package name) and can be found under the package org.checkerframework.checker in
* checker.jar.
*
* @param processorsString a comma-separated string identifying processors
* @param fullyQualifiedCheckerNames a list of fully-qualified checker names to match
* processorsString against
* @param allowSubcheckers whether to match against fully qualified checker names ending with
* "Subchecker"
* @return processorsString where all shorthand references to Checker Framework built-in
* checkers are replaced with fully-qualified references
*/
protected static String unshorthandProcessorNames(
final String processorsString,
List<@FullyQualifiedName String> fullyQualifiedCheckerNames,
boolean allowSubcheckers) {
final String[] processors = processorsString.split(",");
for (int i = 0; i < processors.length; i++) {
if (!processors[i].contains(".")) { // Not already fully qualified
processors[i] =
unshorthandProcessorName(
processors[i], fullyQualifiedCheckerNames, allowSubcheckers);
}
}
return String.join(",", processors);
}
/**
* Given a processor name, tries to expand it to a checker in the fullyQualifiedCheckerNames
* list. Returns that expansion, or the argument itself if the expansion fails.
*
* @param processorName a processor name, possibly in shorthand
* @param fullyQualifiedCheckerNames all checker names
* @param allowSubcheckers whether to match subcheckers as well as checkers
* @return the fully-qualified version of {@code processorName} in {@code
* fullyQualifiedCheckerNames}, or else {@code processorName} itself
*/
private static String unshorthandProcessorName(
final String processorName,
List<@FullyQualifiedName String> fullyQualifiedCheckerNames,
boolean allowSubcheckers) {
for (final String name : fullyQualifiedCheckerNames) {
boolean tryMatch = false;
String[] checkerPath =
name.substring(0, name.length() - "Checker".length()).split("\\.");
String checkerNameShort = checkerPath[checkerPath.length - 1];
String checkerName = checkerNameShort + "Checker";
if (name.endsWith("Checker")) {
checkerPath = name.substring(0, name.length() - "Checker".length()).split("\\.");
checkerNameShort = checkerPath[checkerPath.length - 1];
checkerName = checkerNameShort + "Checker";
tryMatch = true;
} else if (allowSubcheckers && name.endsWith("Subchecker")) {
checkerPath = name.substring(0, name.length() - "Subchecker".length()).split("\\.");
checkerNameShort = checkerPath[checkerPath.length - 1];
checkerName = checkerNameShort + "Subchecker";
tryMatch = true;
}
if (tryMatch) {
if (processorName.equalsIgnoreCase(checkerName)
|| processorName.equalsIgnoreCase(checkerNameShort)) {
return name;
}
}
}
return processorName; // If not matched, return the input string.
}
/**
* Given a shorthand processor name, returns true if it can be expanded to a checker in the
* fullyQualifiedCheckerNames list.
*
* @param processorName a string identifying one processor
* @param fullyQualifiedCheckerNames a list of fully-qualified checker names to match
* processorName against
* @param allowSubcheckers whether to match against fully qualified checker names ending with
* "Subchecker"
* @return true if the shorthand processor name can be expanded to a checker in {@code
* fullyQualifiedCheckerNames}
*/
public static boolean matchesFullyQualifiedProcessor(
final String processorName,
List<@FullyQualifiedName String> fullyQualifiedCheckerNames,
boolean allowSubcheckers) {
return !processorName.equals(
unshorthandProcessorName(
processorName, fullyQualifiedCheckerNames, allowSubcheckers));
}
}
|
[
"\"CLASSPATH\""
] |
[] |
[
"CLASSPATH"
] |
[]
|
["CLASSPATH"]
|
java
| 1 | 0 | |
src/azure-cli-core/azure/cli/core/_profile.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import collections
import errno
import json
import os
import os.path
import re
import string
from copy import deepcopy
from enum import Enum
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core._environment import get_config_dir
from azure.cli.core._session import ACCOUNT
from azure.cli.core.util import get_file_json, in_cloud_console, open_page_in_browser, can_launch_browser,\
is_windows, is_wsl
from azure.cli.core.cloud import get_active_cloud, set_cloud_subscription
logger = get_logger(__name__)
# Names below are used by azure-xplat-cli to persist account information into
# ~/.azure/azureProfile.json or osx/keychainer or windows secure storage,
# which azure-cli will share.
# Please do not rename them unless you know what you are doing.
_IS_DEFAULT_SUBSCRIPTION = 'isDefault'
_SUBSCRIPTION_ID = 'id'
_SUBSCRIPTION_NAME = 'name'
# Tenant of the token which is used to list the subscription
_TENANT_ID = 'tenantId'
# Home tenant of the subscription, which maps to tenantId in 'Subscriptions - List REST API'
# https://docs.microsoft.com/en-us/rest/api/resources/subscriptions/list
_HOME_TENANT_ID = 'homeTenantId'
_MANAGED_BY_TENANTS = 'managedByTenants'
_USER_ENTITY = 'user'
_USER_NAME = 'name'
_CLOUD_SHELL_ID = 'cloudShellID'
_SUBSCRIPTIONS = 'subscriptions'
_INSTALLATION_ID = 'installationId'
_ENVIRONMENT_NAME = 'environmentName'
_STATE = 'state'
_USER_TYPE = 'type'
_USER = 'user'
_SERVICE_PRINCIPAL = 'servicePrincipal'
_SERVICE_PRINCIPAL_ID = 'servicePrincipalId'
_SERVICE_PRINCIPAL_TENANT = 'servicePrincipalTenant'
_SERVICE_PRINCIPAL_CERT_FILE = 'certificateFile'
_SERVICE_PRINCIPAL_CERT_THUMBPRINT = 'thumbprint'
_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH = 'useCertSNIssuerAuth'
_TOKEN_ENTRY_USER_ID = 'userId'
_TOKEN_ENTRY_TOKEN_TYPE = 'tokenType'
# This could mean either real access token, or client secret of a service principal
# This naming is no good, but can't change because xplat-cli does so.
_ACCESS_TOKEN = 'accessToken'
_REFRESH_TOKEN = 'refreshToken'
TOKEN_FIELDS_EXCLUDED_FROM_PERSISTENCE = ['familyName',
'givenName',
'isUserIdDisplayable',
'tenantId']
_CLIENT_ID = '04b07795-8ddb-461a-bbee-02f9e1bf7b46'
_COMMON_TENANT = 'common'
_TENANT_LEVEL_ACCOUNT_NAME = 'N/A(tenant level account)'
_SYSTEM_ASSIGNED_IDENTITY = 'systemAssignedIdentity'
_USER_ASSIGNED_IDENTITY = 'userAssignedIdentity'
_ASSIGNED_IDENTITY_INFO = 'assignedIdentityInfo'
_AZ_LOGIN_MESSAGE = "Please run 'az login' to setup account."
_USE_VENDORED_SUBSCRIPTION_SDK = True
def load_subscriptions(cli_ctx, all_clouds=False, refresh=False):
profile = Profile(cli_ctx=cli_ctx)
if refresh:
profile.refresh_accounts()
subscriptions = profile.load_cached_subscriptions(all_clouds)
return subscriptions
def _get_authority_url(cli_ctx, tenant):
authority_url = cli_ctx.cloud.endpoints.active_directory
is_adfs = bool(re.match('.+(/adfs|/adfs/)$', authority_url, re.I))
if is_adfs:
authority_url = authority_url.rstrip('/') # workaround: ADAL is known to reject auth urls with trailing /
else:
authority_url = authority_url.rstrip('/') + '/' + (tenant or _COMMON_TENANT)
return authority_url, is_adfs
def _authentication_context_factory(cli_ctx, tenant, cache):
import adal
authority_url, is_adfs = _get_authority_url(cli_ctx, tenant)
return adal.AuthenticationContext(authority_url, cache=cache, api_version=None, validate_authority=(not is_adfs))
_AUTH_CTX_FACTORY = _authentication_context_factory
def _load_tokens_from_file(file_path):
if os.path.isfile(file_path):
try:
return get_file_json(file_path, throw_on_empty=False) or []
except (CLIError, ValueError) as ex:
raise CLIError("Failed to load token files. If you have a repro, please log an issue at "
"https://github.com/Azure/azure-cli/issues. At the same time, you can clean "
"up by running 'az account clear' and then 'az login'. (Inner Error: {})".format(ex))
return []
def _delete_file(file_path):
try:
os.remove(file_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def get_credential_types(cli_ctx):
class CredentialType(Enum): # pylint: disable=too-few-public-methods
cloud = get_active_cloud(cli_ctx)
management = cli_ctx.cloud.endpoints.management
rbac = cli_ctx.cloud.endpoints.active_directory_graph_resource_id
return CredentialType
def _get_cloud_console_token_endpoint():
return os.environ.get('MSI_ENDPOINT')
# pylint: disable=too-many-lines,too-many-instance-attributes
class Profile:
_global_creds_cache = None
def __init__(self, storage=None, auth_ctx_factory=None, use_global_creds_cache=True,
async_persist=True, cli_ctx=None):
from azure.cli.core import get_default_cli
self.cli_ctx = cli_ctx or get_default_cli()
self._storage = storage or ACCOUNT
self.auth_ctx_factory = auth_ctx_factory or _AUTH_CTX_FACTORY
if use_global_creds_cache:
# for perf, use global cache
if not Profile._global_creds_cache:
Profile._global_creds_cache = CredsCache(self.cli_ctx, self.auth_ctx_factory,
async_persist=async_persist)
self._creds_cache = Profile._global_creds_cache
else:
self._creds_cache = CredsCache(self.cli_ctx, self.auth_ctx_factory, async_persist=async_persist)
self._management_resource_uri = self.cli_ctx.cloud.endpoints.management
self._ad_resource_uri = self.cli_ctx.cloud.endpoints.active_directory_resource_id
self._ad = self.cli_ctx.cloud.endpoints.active_directory
self._msi_creds = None
def find_subscriptions_on_login(self,
interactive,
username,
password,
is_service_principal,
tenant,
use_device_code=False,
allow_no_subscriptions=False,
subscription_finder=None,
use_cert_sn_issuer=None):
from azure.cli.core._debug import allow_debug_adal_connection
allow_debug_adal_connection()
subscriptions = []
if not subscription_finder:
subscription_finder = SubscriptionFinder(self.cli_ctx,
self.auth_ctx_factory,
self._creds_cache.adal_token_cache)
if interactive:
if not use_device_code and (in_cloud_console() or not can_launch_browser()):
logger.info('Detect no GUI is available, so fall back to device code')
use_device_code = True
if not use_device_code:
try:
authority_url, _ = _get_authority_url(self.cli_ctx, tenant)
subscriptions = subscription_finder.find_through_authorization_code_flow(
tenant, self._ad_resource_uri, authority_url)
except RuntimeError:
use_device_code = True
logger.warning('Not able to launch a browser to log you in, falling back to device code...')
if use_device_code:
subscriptions = subscription_finder.find_through_interactive_flow(
tenant, self._ad_resource_uri)
else:
if is_service_principal:
if not tenant:
raise CLIError('Please supply tenant using "--tenant"')
sp_auth = ServicePrincipalAuth(password, use_cert_sn_issuer)
subscriptions = subscription_finder.find_from_service_principal_id(
username, sp_auth, tenant, self._ad_resource_uri)
else:
subscriptions = subscription_finder.find_from_user_account(
username, password, tenant, self._ad_resource_uri)
if not allow_no_subscriptions and not subscriptions:
if username:
msg = "No subscriptions found for {}.".format(username)
else:
# Don't show username if bare 'az login' is used
msg = "No subscriptions found."
raise CLIError(msg)
if is_service_principal:
self._creds_cache.save_service_principal_cred(sp_auth.get_entry_to_persist(username,
tenant))
if self._creds_cache.adal_token_cache.has_state_changed:
self._creds_cache.persist_cached_creds()
if allow_no_subscriptions:
t_list = [s.tenant_id for s in subscriptions]
bare_tenants = [t for t in subscription_finder.tenants if t not in t_list]
profile = Profile(cli_ctx=self.cli_ctx)
tenant_accounts = profile._build_tenant_level_accounts(bare_tenants) # pylint: disable=protected-access
subscriptions.extend(tenant_accounts)
if not subscriptions:
return []
consolidated = self._normalize_properties(subscription_finder.user_id, subscriptions,
is_service_principal, bool(use_cert_sn_issuer))
self._set_subscriptions(consolidated)
# use deepcopy as we don't want to persist these changes to file.
return deepcopy(consolidated)
def _normalize_properties(self, user, subscriptions, is_service_principal, cert_sn_issuer_auth=None,
user_assigned_identity_id=None):
import sys
consolidated = []
for s in subscriptions:
display_name = s.display_name
if display_name is None:
display_name = ''
try:
display_name.encode(sys.getdefaultencoding())
except (UnicodeEncodeError, UnicodeDecodeError): # mainly for Python 2.7 with ascii as the default encoding
display_name = re.sub(r'[^\x00-\x7f]', lambda x: '?', display_name)
subscription_dict = {
_SUBSCRIPTION_ID: s.id.rpartition('/')[2],
_SUBSCRIPTION_NAME: display_name,
_STATE: s.state,
_USER_ENTITY: {
_USER_NAME: user,
_USER_TYPE: _SERVICE_PRINCIPAL if is_service_principal else _USER
},
_IS_DEFAULT_SUBSCRIPTION: False,
_TENANT_ID: s.tenant_id,
_ENVIRONMENT_NAME: self.cli_ctx.cloud.name
}
# For subscription account from Subscriptions - List 2019-06-01 and later.
if subscription_dict[_SUBSCRIPTION_NAME] != _TENANT_LEVEL_ACCOUNT_NAME:
if hasattr(s, 'home_tenant_id'):
subscription_dict[_HOME_TENANT_ID] = s.home_tenant_id
if hasattr(s, 'managed_by_tenants'):
if s.managed_by_tenants is None:
# managedByTenants is missing from the response. This is a known service issue:
# https://github.com/Azure/azure-rest-api-specs/issues/9567
# pylint: disable=line-too-long
raise CLIError("Invalid profile is used for cloud '{cloud_name}'. "
"To configure the cloud profile, run `az cloud set --name {cloud_name} --profile <profile>(e.g. 2019-03-01-hybrid)`. "
"For more information about using Azure CLI with Azure Stack, see "
"https://docs.microsoft.com/azure-stack/user/azure-stack-version-profiles-azurecli2"
.format(cloud_name=self.cli_ctx.cloud.name))
subscription_dict[_MANAGED_BY_TENANTS] = [{_TENANT_ID: t.tenant_id} for t in s.managed_by_tenants]
consolidated.append(subscription_dict)
if cert_sn_issuer_auth:
consolidated[-1][_USER_ENTITY][_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH] = True
if user_assigned_identity_id:
consolidated[-1][_USER_ENTITY][_ASSIGNED_IDENTITY_INFO] = user_assigned_identity_id
return consolidated
def _build_tenant_level_accounts(self, tenants):
result = []
for t in tenants:
s = self._new_account()
s.id = '/subscriptions/' + t
s.subscription = t
s.tenant_id = t
s.display_name = _TENANT_LEVEL_ACCOUNT_NAME
result.append(s)
return result
def _new_account(self):
"""Build an empty Subscription which will be used as a tenant account.
API version doesn't matter as only specified attributes are preserved by _normalize_properties."""
if _USE_VENDORED_SUBSCRIPTION_SDK:
from azure.cli.core.vendored_sdks.subscriptions.models import Subscription
SubscriptionType = Subscription
else:
from azure.cli.core.profiles import ResourceType, get_sdk
SubscriptionType = get_sdk(self.cli_ctx, ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS,
'Subscription', mod='models')
s = SubscriptionType()
s.state = 'Enabled'
return s
def find_subscriptions_in_vm_with_msi(self, identity_id=None, allow_no_subscriptions=None):
# pylint: disable=too-many-statements
import jwt
from msrestazure.tools import is_valid_resource_id
from azure.cli.core.adal_authentication import MSIAuthenticationWrapper
resource = self.cli_ctx.cloud.endpoints.active_directory_resource_id
if identity_id:
if is_valid_resource_id(identity_id):
msi_creds = MSIAuthenticationWrapper(resource=resource, msi_res_id=identity_id)
identity_type = MsiAccountTypes.user_assigned_resource_id
else:
authenticated = False
from azure.cli.core.azclierror import AzureResponseError
try:
msi_creds = MSIAuthenticationWrapper(resource=resource, client_id=identity_id)
identity_type = MsiAccountTypes.user_assigned_client_id
authenticated = True
except AzureResponseError as ex:
if 'http error: 400, reason: Bad Request' in ex.error_msg:
logger.info('Sniff: not an MSI client id')
else:
raise
if not authenticated:
try:
identity_type = MsiAccountTypes.user_assigned_object_id
msi_creds = MSIAuthenticationWrapper(resource=resource, object_id=identity_id)
authenticated = True
except AzureResponseError as ex:
if 'http error: 400, reason: Bad Request' in ex.error_msg:
logger.info('Sniff: not an MSI object id')
else:
raise
if not authenticated:
raise CLIError('Failed to connect to MSI, check your managed service identity id.')
else:
identity_type = MsiAccountTypes.system_assigned
msi_creds = MSIAuthenticationWrapper(resource=resource)
token_entry = msi_creds.token
token = token_entry['access_token']
logger.info('MSI: token was retrieved. Now trying to initialize local accounts...')
decode = jwt.decode(token, verify=False, algorithms=['RS256'])
tenant = decode['tid']
subscription_finder = SubscriptionFinder(self.cli_ctx, self.auth_ctx_factory, None)
subscriptions = subscription_finder.find_from_raw_token(tenant, token)
base_name = ('{}-{}'.format(identity_type, identity_id) if identity_id else identity_type)
user = _USER_ASSIGNED_IDENTITY if identity_id else _SYSTEM_ASSIGNED_IDENTITY
if not subscriptions:
if allow_no_subscriptions:
subscriptions = self._build_tenant_level_accounts([tenant])
else:
raise CLIError('No access was configured for the VM, hence no subscriptions were found. '
"If this is expected, use '--allow-no-subscriptions' to have tenant level access.")
consolidated = self._normalize_properties(user, subscriptions, is_service_principal=True,
user_assigned_identity_id=base_name)
self._set_subscriptions(consolidated)
return deepcopy(consolidated)
def find_subscriptions_in_cloud_console(self):
import jwt
_, token, _ = self._get_token_from_cloud_shell(self.cli_ctx.cloud.endpoints.active_directory_resource_id)
logger.info('MSI: token was retrieved. Now trying to initialize local accounts...')
decode = jwt.decode(token, verify=False, algorithms=['RS256'])
tenant = decode['tid']
subscription_finder = SubscriptionFinder(self.cli_ctx, self.auth_ctx_factory, None)
subscriptions = subscription_finder.find_from_raw_token(tenant, token)
if not subscriptions:
raise CLIError('No subscriptions were found in the cloud shell')
user = decode.get('unique_name', 'N/A')
consolidated = self._normalize_properties(user, subscriptions, is_service_principal=False)
for s in consolidated:
s[_USER_ENTITY][_CLOUD_SHELL_ID] = True
self._set_subscriptions(consolidated)
return deepcopy(consolidated)
def _get_token_from_cloud_shell(self, resource): # pylint: disable=no-self-use
from azure.cli.core.adal_authentication import MSIAuthenticationWrapper
auth = MSIAuthenticationWrapper(resource=resource)
auth.set_token()
token_entry = auth.token
return (token_entry['token_type'], token_entry['access_token'], token_entry)
def _set_subscriptions(self, new_subscriptions, merge=True, secondary_key_name=None):
def _get_key_name(account, secondary_key_name):
return (account[_SUBSCRIPTION_ID] if secondary_key_name is None
else '{}-{}'.format(account[_SUBSCRIPTION_ID], account[secondary_key_name]))
def _match_account(account, subscription_id, secondary_key_name, secondary_key_val):
return (account[_SUBSCRIPTION_ID] == subscription_id and
(secondary_key_val is None or account[secondary_key_name] == secondary_key_val))
existing_ones = self.load_cached_subscriptions(all_clouds=True)
active_one = next((x for x in existing_ones if x.get(_IS_DEFAULT_SUBSCRIPTION)), None)
active_subscription_id = active_one[_SUBSCRIPTION_ID] if active_one else None
active_secondary_key_val = active_one[secondary_key_name] if (active_one and secondary_key_name) else None
active_cloud = self.cli_ctx.cloud
default_sub_id = None
# merge with existing ones
if merge:
dic = collections.OrderedDict((_get_key_name(x, secondary_key_name), x) for x in existing_ones)
else:
dic = collections.OrderedDict()
dic.update((_get_key_name(x, secondary_key_name), x) for x in new_subscriptions)
subscriptions = list(dic.values())
if subscriptions:
if active_one:
new_active_one = next(
(x for x in new_subscriptions if _match_account(x, active_subscription_id, secondary_key_name,
active_secondary_key_val)), None)
for s in subscriptions:
s[_IS_DEFAULT_SUBSCRIPTION] = False
if not new_active_one:
new_active_one = Profile._pick_working_subscription(new_subscriptions)
else:
new_active_one = Profile._pick_working_subscription(new_subscriptions)
new_active_one[_IS_DEFAULT_SUBSCRIPTION] = True
default_sub_id = new_active_one[_SUBSCRIPTION_ID]
set_cloud_subscription(self.cli_ctx, active_cloud.name, default_sub_id)
self._storage[_SUBSCRIPTIONS] = subscriptions
@staticmethod
def _pick_working_subscription(subscriptions):
s = next((x for x in subscriptions if x.get(_STATE) == 'Enabled'), None)
return s or subscriptions[0]
def is_tenant_level_account(self):
return self.get_subscription()[_SUBSCRIPTION_NAME] == _TENANT_LEVEL_ACCOUNT_NAME
def set_active_subscription(self, subscription): # take id or name
subscriptions = self.load_cached_subscriptions(all_clouds=True)
active_cloud = self.cli_ctx.cloud
subscription = subscription.lower()
result = [x for x in subscriptions
if subscription in [x[_SUBSCRIPTION_ID].lower(),
x[_SUBSCRIPTION_NAME].lower()] and
x[_ENVIRONMENT_NAME] == active_cloud.name]
if len(result) != 1:
raise CLIError("The subscription of '{}' {} in cloud '{}'.".format(
subscription, "doesn't exist" if not result else 'has more than one match', active_cloud.name))
for s in subscriptions:
s[_IS_DEFAULT_SUBSCRIPTION] = False
result[0][_IS_DEFAULT_SUBSCRIPTION] = True
set_cloud_subscription(self.cli_ctx, active_cloud.name, result[0][_SUBSCRIPTION_ID])
self._storage[_SUBSCRIPTIONS] = subscriptions
def logout(self, user_or_sp):
subscriptions = self.load_cached_subscriptions(all_clouds=True)
result = [x for x in subscriptions
if user_or_sp.lower() == x[_USER_ENTITY][_USER_NAME].lower()]
subscriptions = [x for x in subscriptions if x not in result]
self._storage[_SUBSCRIPTIONS] = subscriptions
self._creds_cache.remove_cached_creds(user_or_sp)
def logout_all(self):
self._storage[_SUBSCRIPTIONS] = []
self._creds_cache.remove_all_cached_creds()
def load_cached_subscriptions(self, all_clouds=False):
subscriptions = self._storage.get(_SUBSCRIPTIONS) or []
active_cloud = self.cli_ctx.cloud
cached_subscriptions = [sub for sub in subscriptions
if all_clouds or sub[_ENVIRONMENT_NAME] == active_cloud.name]
# use deepcopy as we don't want to persist these changes to file.
return deepcopy(cached_subscriptions)
def get_current_account_user(self):
try:
active_account = self.get_subscription()
except CLIError:
raise CLIError('There are no active accounts.')
return active_account[_USER_ENTITY][_USER_NAME]
def get_subscription(self, subscription=None): # take id or name
subscriptions = self.load_cached_subscriptions()
if not subscriptions:
raise CLIError(_AZ_LOGIN_MESSAGE)
result = [x for x in subscriptions if (
not subscription and x.get(_IS_DEFAULT_SUBSCRIPTION) or
subscription and subscription.lower() in [x[_SUBSCRIPTION_ID].lower(), x[
_SUBSCRIPTION_NAME].lower()])]
if not result and subscription:
raise CLIError("Subscription '{}' not found. "
"Check the spelling and casing and try again.".format(subscription))
if not result and not subscription:
raise CLIError("No subscription found. Run 'az account set' to select a subscription.")
if len(result) > 1:
raise CLIError("Multiple subscriptions with the name '{}' found. "
"Specify the subscription ID.".format(subscription))
return result[0]
def get_subscription_id(self, subscription=None): # take id or name
return self.get_subscription(subscription)[_SUBSCRIPTION_ID]
def get_access_token_for_resource(self, username, tenant, resource):
tenant = tenant or 'common'
_, access_token, _ = self._creds_cache.retrieve_token_for_user(
username, tenant, resource)
return access_token
@staticmethod
def _try_parse_msi_account_name(account):
msi_info, user = account[_USER_ENTITY].get(_ASSIGNED_IDENTITY_INFO), account[_USER_ENTITY].get(_USER_NAME)
if user in [_SYSTEM_ASSIGNED_IDENTITY, _USER_ASSIGNED_IDENTITY]:
if not msi_info:
msi_info = account[_SUBSCRIPTION_NAME] # fall back to old persisting way
parts = msi_info.split('-', 1)
if parts[0] in MsiAccountTypes.valid_msi_account_types():
return parts[0], (None if len(parts) <= 1 else parts[1])
return None, None
def get_login_credentials(self, resource=None, subscription_id=None, aux_subscriptions=None, aux_tenants=None):
if aux_tenants and aux_subscriptions:
raise CLIError("Please specify only one of aux_subscriptions and aux_tenants, not both")
account = self.get_subscription(subscription_id)
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
resource = resource or self.cli_ctx.cloud.endpoints.active_directory_resource_id
identity_type, identity_id = Profile._try_parse_msi_account_name(account)
external_tenants_info = []
if aux_tenants:
external_tenants_info = [tenant for tenant in aux_tenants if tenant != account[_TENANT_ID]]
if aux_subscriptions:
ext_subs = [aux_sub for aux_sub in aux_subscriptions if aux_sub != subscription_id]
for ext_sub in ext_subs:
sub = self.get_subscription(ext_sub)
if sub[_TENANT_ID] != account[_TENANT_ID]:
external_tenants_info.append(sub[_TENANT_ID])
if identity_type is None:
def _retrieve_token(sdk_resource=None):
# When called by
# - Track 1 SDK, use `resource` specified by CLI
# - Track 2 SDK, use `sdk_resource` specified by SDK and ignore `resource` specified by CLI
token_resource = sdk_resource or resource
logger.debug("Retrieving token from ADAL for resource %r", token_resource)
if in_cloud_console() and account[_USER_ENTITY].get(_CLOUD_SHELL_ID):
return self._get_token_from_cloud_shell(token_resource)
if user_type == _USER:
return self._creds_cache.retrieve_token_for_user(username_or_sp_id,
account[_TENANT_ID], token_resource)
use_cert_sn_issuer = account[_USER_ENTITY].get(_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH)
return self._creds_cache.retrieve_token_for_service_principal(username_or_sp_id, token_resource,
account[_TENANT_ID],
use_cert_sn_issuer)
def _retrieve_tokens_from_external_tenants(sdk_resource=None):
token_resource = sdk_resource or resource
logger.debug("Retrieving token from ADAL for external tenants and resource %r", token_resource)
external_tokens = []
for sub_tenant_id in external_tenants_info:
if user_type == _USER:
external_tokens.append(self._creds_cache.retrieve_token_for_user(
username_or_sp_id, sub_tenant_id, token_resource))
else:
external_tokens.append(self._creds_cache.retrieve_token_for_service_principal(
username_or_sp_id, token_resource, sub_tenant_id, token_resource))
return external_tokens
from azure.cli.core.adal_authentication import AdalAuthentication
auth_object = AdalAuthentication(_retrieve_token,
_retrieve_tokens_from_external_tenants if external_tenants_info else None)
else:
if self._msi_creds is None:
self._msi_creds = MsiAccountTypes.msi_auth_factory(identity_type, identity_id, resource)
auth_object = self._msi_creds
return (auth_object,
str(account[_SUBSCRIPTION_ID]),
str(account[_TENANT_ID]))
def get_msal_token(self, scopes, data):
"""
This is added only for vmssh feature.
It is a temporary solution and will deprecate after MSAL adopted completely.
"""
from msal import ClientApplication
import posixpath
account = self.get_subscription()
username = account[_USER_ENTITY][_USER_NAME]
tenant = account[_TENANT_ID] or 'common'
_, refresh_token, _, _ = self.get_refresh_token()
authority = posixpath.join(self.cli_ctx.cloud.endpoints.active_directory, tenant)
app = ClientApplication(_CLIENT_ID, authority=authority)
result = app.acquire_token_by_refresh_token(refresh_token, scopes, data=data)
if 'error' in result:
from azure.cli.core.adal_authentication import aad_error_handler
aad_error_handler(result)
return username, result["access_token"]
def get_refresh_token(self, resource=None,
subscription=None):
account = self.get_subscription(subscription)
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
resource = resource or self.cli_ctx.cloud.endpoints.active_directory_resource_id
if user_type == _USER:
_, _, token_entry = self._creds_cache.retrieve_token_for_user(
username_or_sp_id, account[_TENANT_ID], resource)
return None, token_entry.get(_REFRESH_TOKEN), token_entry[_ACCESS_TOKEN], str(account[_TENANT_ID])
sp_secret = self._creds_cache.retrieve_cred_for_service_principal(username_or_sp_id)
return username_or_sp_id, sp_secret, None, str(account[_TENANT_ID])
def get_raw_token(self, resource=None, subscription=None, tenant=None):
logger.debug("Profile.get_raw_token invoked with resource=%r, subscription=%r, tenant=%r",
resource, subscription, tenant)
if subscription and tenant:
raise CLIError("Please specify only one of subscription and tenant, not both")
account = self.get_subscription(subscription)
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
resource = resource or self.cli_ctx.cloud.endpoints.active_directory_resource_id
identity_type, identity_id = Profile._try_parse_msi_account_name(account)
if identity_type:
# MSI
if tenant:
raise CLIError("Tenant shouldn't be specified for MSI account")
msi_creds = MsiAccountTypes.msi_auth_factory(identity_type, identity_id, resource)
msi_creds.set_token()
token_entry = msi_creds.token
creds = (token_entry['token_type'], token_entry['access_token'], token_entry)
elif in_cloud_console() and account[_USER_ENTITY].get(_CLOUD_SHELL_ID):
# Cloud Shell
if tenant:
raise CLIError("Tenant shouldn't be specified for Cloud Shell account")
creds = self._get_token_from_cloud_shell(resource)
else:
tenant_dest = tenant if tenant else account[_TENANT_ID]
import adal
try:
if user_type == _USER:
# User
creds = self._creds_cache.retrieve_token_for_user(username_or_sp_id,
tenant_dest, resource)
else:
# Service Principal
use_cert_sn_issuer = bool(account[_USER_ENTITY].get(_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH))
creds = self._creds_cache.retrieve_token_for_service_principal(username_or_sp_id,
resource,
tenant_dest,
use_cert_sn_issuer)
except adal.AdalError as ex:
from azure.cli.core.adal_authentication import adal_error_handler
adal_error_handler(ex)
return (creds,
None if tenant else str(account[_SUBSCRIPTION_ID]),
str(tenant if tenant else account[_TENANT_ID]))
def refresh_accounts(self, subscription_finder=None):
subscriptions = self.load_cached_subscriptions()
to_refresh = subscriptions
from azure.cli.core._debug import allow_debug_adal_connection
allow_debug_adal_connection()
subscription_finder = subscription_finder or SubscriptionFinder(self.cli_ctx,
self.auth_ctx_factory,
self._creds_cache.adal_token_cache)
refreshed_list = set()
result = []
for s in to_refresh:
user_name = s[_USER_ENTITY][_USER_NAME]
if user_name in refreshed_list:
continue
refreshed_list.add(user_name)
is_service_principal = (s[_USER_ENTITY][_USER_TYPE] == _SERVICE_PRINCIPAL)
tenant = s[_TENANT_ID]
subscriptions = []
try:
if is_service_principal:
sp_auth = ServicePrincipalAuth(self._creds_cache.retrieve_cred_for_service_principal(user_name))
subscriptions = subscription_finder.find_from_service_principal_id(user_name, sp_auth, tenant,
self._ad_resource_uri)
else:
subscriptions = subscription_finder.find_from_user_account(user_name, None, None,
self._ad_resource_uri)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Refreshing for '%s' failed with an error '%s'. The existing accounts were not "
"modified. You can run 'az login' later to explicitly refresh them", user_name, ex)
result += deepcopy([r for r in to_refresh if r[_USER_ENTITY][_USER_NAME] == user_name])
continue
if not subscriptions:
if s[_SUBSCRIPTION_NAME] == _TENANT_LEVEL_ACCOUNT_NAME:
subscriptions = self._build_tenant_level_accounts([s[_TENANT_ID]])
if not subscriptions:
continue
consolidated = self._normalize_properties(subscription_finder.user_id,
subscriptions,
is_service_principal)
result += consolidated
if self._creds_cache.adal_token_cache.has_state_changed:
self._creds_cache.persist_cached_creds()
self._set_subscriptions(result, merge=False)
def get_sp_auth_info(self, subscription_id=None, name=None, password=None, cert_file=None):
from collections import OrderedDict
account = self.get_subscription(subscription_id)
# is the credential created through command like 'create-for-rbac'?
result = OrderedDict()
if name and (password or cert_file):
result['clientId'] = name
if password:
result['clientSecret'] = password
else:
result['clientCertificate'] = cert_file
result['subscriptionId'] = subscription_id or account[_SUBSCRIPTION_ID]
else: # has logged in through cli
user_type = account[_USER_ENTITY].get(_USER_TYPE)
if user_type == _SERVICE_PRINCIPAL:
result['clientId'] = account[_USER_ENTITY][_USER_NAME]
sp_auth = ServicePrincipalAuth(self._creds_cache.retrieve_cred_for_service_principal(
account[_USER_ENTITY][_USER_NAME]))
secret = getattr(sp_auth, 'secret', None)
if secret:
result['clientSecret'] = secret
else:
# we can output 'clientCertificateThumbprint' if asked
result['clientCertificate'] = sp_auth.certificate_file
result['subscriptionId'] = account[_SUBSCRIPTION_ID]
else:
raise CLIError('SDK Auth file is only applicable when authenticated using a service principal')
result[_TENANT_ID] = account[_TENANT_ID]
endpoint_mappings = OrderedDict() # use OrderedDict to control the output sequence
endpoint_mappings['active_directory'] = 'activeDirectoryEndpointUrl'
endpoint_mappings['resource_manager'] = 'resourceManagerEndpointUrl'
endpoint_mappings['active_directory_graph_resource_id'] = 'activeDirectoryGraphResourceId'
endpoint_mappings['sql_management'] = 'sqlManagementEndpointUrl'
endpoint_mappings['gallery'] = 'galleryEndpointUrl'
endpoint_mappings['management'] = 'managementEndpointUrl'
from azure.cli.core.cloud import CloudEndpointNotSetException
for e in endpoint_mappings:
try:
result[endpoint_mappings[e]] = getattr(get_active_cloud(self.cli_ctx).endpoints, e)
except CloudEndpointNotSetException:
result[endpoint_mappings[e]] = None
return result
def get_installation_id(self):
installation_id = self._storage.get(_INSTALLATION_ID)
if not installation_id:
import uuid
installation_id = str(uuid.uuid1())
self._storage[_INSTALLATION_ID] = installation_id
return installation_id
class MsiAccountTypes:
# pylint: disable=no-method-argument,no-self-argument
system_assigned = 'MSI'
user_assigned_client_id = 'MSIClient'
user_assigned_object_id = 'MSIObject'
user_assigned_resource_id = 'MSIResource'
@staticmethod
def valid_msi_account_types():
return [MsiAccountTypes.system_assigned, MsiAccountTypes.user_assigned_client_id,
MsiAccountTypes.user_assigned_object_id, MsiAccountTypes.user_assigned_resource_id]
@staticmethod
def msi_auth_factory(cli_account_name, identity, resource):
from azure.cli.core.adal_authentication import MSIAuthenticationWrapper
if cli_account_name == MsiAccountTypes.system_assigned:
return MSIAuthenticationWrapper(resource=resource)
if cli_account_name == MsiAccountTypes.user_assigned_client_id:
return MSIAuthenticationWrapper(resource=resource, client_id=identity)
if cli_account_name == MsiAccountTypes.user_assigned_object_id:
return MSIAuthenticationWrapper(resource=resource, object_id=identity)
if cli_account_name == MsiAccountTypes.user_assigned_resource_id:
return MSIAuthenticationWrapper(resource=resource, msi_res_id=identity)
raise ValueError("unrecognized msi account name '{}'".format(cli_account_name))
class SubscriptionFinder:
'''finds all subscriptions for a user or service principal'''
def __init__(self, cli_ctx, auth_context_factory, adal_token_cache, arm_client_factory=None):
self._adal_token_cache = adal_token_cache
self._auth_context_factory = auth_context_factory
self.user_id = None # will figure out after log user in
self.cli_ctx = cli_ctx
def create_arm_client_factory(credentials):
if arm_client_factory:
return arm_client_factory(credentials)
from azure.cli.core.profiles import ResourceType, get_api_version
from azure.cli.core.commands.client_factory import _prepare_client_kwargs_track2
client_type = self._get_subscription_client_class()
if client_type is None:
from azure.cli.core.azclierror import CLIInternalError
raise CLIInternalError("Unable to get '{}' in profile '{}'"
.format(ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS, cli_ctx.cloud.profile))
api_version = get_api_version(cli_ctx, ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS)
client_kwargs = _prepare_client_kwargs_track2(cli_ctx)
# We don't need to change credential_scopes as 'scopes' is ignored by BasicTokenCredential anyway
client = client_type(credentials, api_version=api_version,
base_url=self.cli_ctx.cloud.endpoints.resource_manager, **client_kwargs)
return client
self._arm_client_factory = create_arm_client_factory
self.tenants = []
def find_from_user_account(self, username, password, tenant, resource):
context = self._create_auth_context(tenant)
if password:
token_entry = context.acquire_token_with_username_password(resource, username, password, _CLIENT_ID)
else: # when refresh account, we will leverage local cached tokens
token_entry = context.acquire_token(resource, username, _CLIENT_ID)
if not token_entry:
return []
self.user_id = token_entry[_TOKEN_ENTRY_USER_ID]
if tenant is None:
result = self._find_using_common_tenant(token_entry[_ACCESS_TOKEN], resource)
else:
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
return result
def find_through_authorization_code_flow(self, tenant, resource, authority_url):
# launch browser and get the code
results = _get_authorization_code(resource, authority_url)
if not results.get('code'):
raise CLIError('Login failed') # error detail is already displayed through previous steps
# exchange the code for the token
context = self._create_auth_context(tenant)
token_entry = context.acquire_token_with_authorization_code(results['code'], results['reply_url'],
resource, _CLIENT_ID, None)
self.user_id = token_entry[_TOKEN_ENTRY_USER_ID]
logger.warning("You have logged in. Now let us find all the subscriptions to which you have access...")
if tenant is None:
result = self._find_using_common_tenant(token_entry[_ACCESS_TOKEN], resource)
else:
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
return result
def find_through_interactive_flow(self, tenant, resource):
context = self._create_auth_context(tenant)
code = context.acquire_user_code(resource, _CLIENT_ID)
logger.warning(code['message'])
token_entry = context.acquire_token_with_device_code(resource, code, _CLIENT_ID)
self.user_id = token_entry[_TOKEN_ENTRY_USER_ID]
if tenant is None:
result = self._find_using_common_tenant(token_entry[_ACCESS_TOKEN], resource)
else:
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
return result
def find_from_service_principal_id(self, client_id, sp_auth, tenant, resource):
context = self._create_auth_context(tenant, False)
token_entry = sp_auth.acquire_token(context, resource, client_id)
self.user_id = client_id
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
self.tenants = [tenant]
return result
# only occur inside cloud console or VM with identity
def find_from_raw_token(self, tenant, token):
# decode the token, so we know the tenant
result = self._find_using_specific_tenant(tenant, token)
self.tenants = [tenant]
return result
def _create_auth_context(self, tenant, use_token_cache=True):
token_cache = self._adal_token_cache if use_token_cache else None
return self._auth_context_factory(self.cli_ctx, tenant, token_cache)
def _find_using_common_tenant(self, access_token, resource):
import adal
from azure.cli.core.adal_authentication import BasicTokenCredential
all_subscriptions = []
empty_tenants = []
mfa_tenants = []
token_credential = BasicTokenCredential(access_token)
client = self._arm_client_factory(token_credential)
tenants = client.tenants.list()
for t in tenants:
tenant_id = t.tenant_id
logger.debug("Finding subscriptions under tenant %s", tenant_id)
# display_name is available since /tenants?api-version=2018-06-01,
# not available in /tenants?api-version=2016-06-01
if not hasattr(t, 'display_name'):
t.display_name = None
temp_context = self._create_auth_context(tenant_id)
try:
logger.debug("Acquiring a token with tenant=%s, resource=%s", tenant_id, resource)
temp_credentials = temp_context.acquire_token(resource, self.user_id, _CLIENT_ID)
except adal.AdalError as ex:
# because user creds went through the 'common' tenant, the error here must be
# tenant specific, like the account was disabled. For such errors, we will continue
# with other tenants.
msg = (getattr(ex, 'error_response', None) or {}).get('error_description') or ''
if 'AADSTS50076' in msg:
# The tenant requires MFA and can't be accessed with home tenant's refresh token
mfa_tenants.append(t)
else:
logger.warning("Failed to authenticate '%s' due to error '%s'", t, ex)
continue
subscriptions = self._find_using_specific_tenant(
tenant_id,
temp_credentials[_ACCESS_TOKEN])
if not subscriptions:
empty_tenants.append(t)
# When a subscription can be listed by multiple tenants, only the first appearance is retained
for sub_to_add in subscriptions:
add_sub = True
for sub_to_compare in all_subscriptions:
if sub_to_add.subscription_id == sub_to_compare.subscription_id:
logger.warning("Subscription %s '%s' can be accessed from tenants %s(default) and %s. "
"To select a specific tenant when accessing this subscription, "
"use 'az login --tenant TENANT_ID'.",
sub_to_add.subscription_id, sub_to_add.display_name,
sub_to_compare.tenant_id, sub_to_add.tenant_id)
add_sub = False
break
if add_sub:
all_subscriptions.append(sub_to_add)
# Show warning for empty tenants
if empty_tenants:
logger.warning("The following tenants don't contain accessible subscriptions. "
"Use 'az login --allow-no-subscriptions' to have tenant level access.")
for t in empty_tenants:
if t.display_name:
logger.warning("%s '%s'", t.tenant_id, t.display_name)
else:
logger.warning("%s", t.tenant_id)
# Show warning for MFA tenants
if mfa_tenants:
logger.warning("The following tenants require Multi-Factor Authentication (MFA). "
"Use 'az login --tenant TENANT_ID' to explicitly login to a tenant.")
for t in mfa_tenants:
if t.display_name:
logger.warning("%s '%s'", t.tenant_id, t.display_name)
else:
logger.warning("%s", t.tenant_id)
return all_subscriptions
def _find_using_specific_tenant(self, tenant, access_token):
from azure.cli.core.adal_authentication import BasicTokenCredential
token_credential = BasicTokenCredential(access_token)
client = self._arm_client_factory(token_credential)
subscriptions = client.subscriptions.list()
all_subscriptions = []
for s in subscriptions:
# map tenantId from REST API to homeTenantId
if hasattr(s, "tenant_id"):
setattr(s, 'home_tenant_id', s.tenant_id)
setattr(s, 'tenant_id', tenant)
all_subscriptions.append(s)
self.tenants.append(tenant)
return all_subscriptions
def _get_subscription_client_class(self): # pylint: disable=no-self-use
"""Get the subscription client class. It can come from either the vendored SDK or public SDK, depending
on the design of architecture.
"""
if _USE_VENDORED_SUBSCRIPTION_SDK:
# Use vendered subscription SDK to decouple from `resource` command module
from azure.cli.core.vendored_sdks.subscriptions import SubscriptionClient
client_type = SubscriptionClient
else:
# Use the public SDK
from azure.cli.core.profiles import ResourceType
from azure.cli.core.profiles._shared import get_client_class
client_type = get_client_class(ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS)
return client_type
class CredsCache:
'''Caches AAD tokena and service principal secrets, and persistence will
also be handled
'''
def __init__(self, cli_ctx, auth_ctx_factory=None, async_persist=True):
# AZURE_ACCESS_TOKEN_FILE is used by Cloud Console and not meant to be user configured
self._token_file = (os.environ.get('AZURE_ACCESS_TOKEN_FILE', None) or
os.path.join(get_config_dir(), 'accessTokens.json'))
self._service_principal_creds = []
self._auth_ctx_factory = auth_ctx_factory
self._adal_token_cache_attr = None
self._should_flush_to_disk = False
self._async_persist = async_persist
self._ctx = cli_ctx
if async_persist:
import atexit
atexit.register(self.flush_to_disk)
def persist_cached_creds(self):
self._should_flush_to_disk = True
if not self._async_persist:
self.flush_to_disk()
self.adal_token_cache.has_state_changed = False
def flush_to_disk(self):
if self._should_flush_to_disk:
with os.fdopen(os.open(self._token_file, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as cred_file:
items = self.adal_token_cache.read_items()
all_creds = [entry for _, entry in items]
# trim away useless fields (needed for cred sharing with xplat)
for i in all_creds:
for key in TOKEN_FIELDS_EXCLUDED_FROM_PERSISTENCE:
i.pop(key, None)
all_creds.extend(self._service_principal_creds)
cred_file.write(json.dumps(all_creds))
def retrieve_token_for_user(self, username, tenant, resource):
context = self._auth_ctx_factory(self._ctx, tenant, cache=self.adal_token_cache)
token_entry = context.acquire_token(resource, username, _CLIENT_ID)
if not token_entry:
raise CLIError("Could not retrieve token from local cache.{}".format(
" Please run 'az login'." if not in_cloud_console() else ''))
if self.adal_token_cache.has_state_changed:
self.persist_cached_creds()
return (token_entry[_TOKEN_ENTRY_TOKEN_TYPE], token_entry[_ACCESS_TOKEN], token_entry)
def retrieve_token_for_service_principal(self, sp_id, resource, tenant, use_cert_sn_issuer=False):
self.load_adal_token_cache()
matched = [x for x in self._service_principal_creds if sp_id == x[_SERVICE_PRINCIPAL_ID]]
if not matched:
raise CLIError("Could not retrieve credential from local cache for service principal {}. "
"Please run 'az login' for this service principal."
.format(sp_id))
matched_with_tenant = [x for x in matched if tenant == x[_SERVICE_PRINCIPAL_TENANT]]
if matched_with_tenant:
cred = matched_with_tenant[0]
else:
logger.warning("Could not retrieve credential from local cache for service principal %s under tenant %s. "
"Trying credential under tenant %s, assuming that is an app credential.",
sp_id, tenant, matched[0][_SERVICE_PRINCIPAL_TENANT])
cred = matched[0]
context = self._auth_ctx_factory(self._ctx, tenant, None)
sp_auth = ServicePrincipalAuth(cred.get(_ACCESS_TOKEN, None) or
cred.get(_SERVICE_PRINCIPAL_CERT_FILE, None),
use_cert_sn_issuer)
token_entry = sp_auth.acquire_token(context, resource, sp_id)
return (token_entry[_TOKEN_ENTRY_TOKEN_TYPE], token_entry[_ACCESS_TOKEN], token_entry)
def retrieve_cred_for_service_principal(self, sp_id):
"""Returns the secret or certificate of the specified service principal."""
self.load_adal_token_cache()
matched = [x for x in self._service_principal_creds if sp_id == x[_SERVICE_PRINCIPAL_ID]]
if not matched:
raise CLIError("No matched service principal found")
cred = matched[0]
return cred.get(_ACCESS_TOKEN) or cred.get(_SERVICE_PRINCIPAL_CERT_FILE)
@property
def adal_token_cache(self):
return self.load_adal_token_cache()
def load_adal_token_cache(self):
if self._adal_token_cache_attr is None:
import adal
all_entries = _load_tokens_from_file(self._token_file)
self._load_service_principal_creds(all_entries)
real_token = [x for x in all_entries if x not in self._service_principal_creds]
self._adal_token_cache_attr = adal.TokenCache(json.dumps(real_token))
return self._adal_token_cache_attr
def save_service_principal_cred(self, sp_entry):
self.load_adal_token_cache()
matched = [x for x in self._service_principal_creds
if sp_entry[_SERVICE_PRINCIPAL_ID] == x[_SERVICE_PRINCIPAL_ID] and
sp_entry[_SERVICE_PRINCIPAL_TENANT] == x[_SERVICE_PRINCIPAL_TENANT]]
state_changed = False
if matched:
# pylint: disable=line-too-long
if (sp_entry.get(_ACCESS_TOKEN, None) != matched[0].get(_ACCESS_TOKEN, None) or
sp_entry.get(_SERVICE_PRINCIPAL_CERT_FILE, None) != matched[0].get(_SERVICE_PRINCIPAL_CERT_FILE, None)):
self._service_principal_creds.remove(matched[0])
self._service_principal_creds.append(sp_entry)
state_changed = True
else:
self._service_principal_creds.append(sp_entry)
state_changed = True
if state_changed:
self.persist_cached_creds()
def _load_service_principal_creds(self, creds):
for c in creds:
if c.get(_SERVICE_PRINCIPAL_ID):
self._service_principal_creds.append(c)
return self._service_principal_creds
def remove_cached_creds(self, user_or_sp):
state_changed = False
# clear AAD tokens
tokens = self.adal_token_cache.find({_TOKEN_ENTRY_USER_ID: user_or_sp})
if tokens:
state_changed = True
self.adal_token_cache.remove(tokens)
# clear service principal creds
matched = [x for x in self._service_principal_creds
if x[_SERVICE_PRINCIPAL_ID] == user_or_sp]
if matched:
state_changed = True
self._service_principal_creds = [x for x in self._service_principal_creds
if x not in matched]
if state_changed:
self.persist_cached_creds()
def remove_all_cached_creds(self):
# we can clear file contents, but deleting it is simpler
_delete_file(self._token_file)
class ServicePrincipalAuth:
def __init__(self, password_arg_value, use_cert_sn_issuer=None):
if not password_arg_value:
raise CLIError('missing secret or certificate in order to '
'authenticate through a service principal')
if os.path.isfile(password_arg_value):
certificate_file = password_arg_value
from OpenSSL.crypto import load_certificate, FILETYPE_PEM, Error
self.certificate_file = certificate_file
self.public_certificate = None
try:
with open(certificate_file, 'r') as file_reader:
self.cert_file_string = file_reader.read()
cert = load_certificate(FILETYPE_PEM, self.cert_file_string)
self.thumbprint = cert.digest("sha1").decode()
if use_cert_sn_issuer:
# low-tech but safe parsing based on
# https://github.com/libressl-portable/openbsd/blob/master/src/lib/libcrypto/pem/pem.h
match = re.search(r'\-+BEGIN CERTIFICATE.+\-+(?P<public>[^-]+)\-+END CERTIFICATE.+\-+',
self.cert_file_string, re.I)
self.public_certificate = match.group('public').strip()
except (UnicodeDecodeError, Error):
raise CLIError('Invalid certificate, please use a valid PEM file.')
else:
self.secret = password_arg_value
def acquire_token(self, authentication_context, resource, client_id):
if hasattr(self, 'secret'):
return authentication_context.acquire_token_with_client_credentials(resource, client_id, self.secret)
return authentication_context.acquire_token_with_client_certificate(resource, client_id, self.cert_file_string,
self.thumbprint, self.public_certificate)
def get_entry_to_persist(self, sp_id, tenant):
entry = {
_SERVICE_PRINCIPAL_ID: sp_id,
_SERVICE_PRINCIPAL_TENANT: tenant,
}
if hasattr(self, 'secret'):
entry[_ACCESS_TOKEN] = self.secret
else:
entry[_SERVICE_PRINCIPAL_CERT_FILE] = self.certificate_file
entry[_SERVICE_PRINCIPAL_CERT_THUMBPRINT] = self.thumbprint
return entry
def _get_authorization_code_worker(authority_url, resource, results):
# pylint: disable=too-many-statements
import socket
import random
import http.server
class ClientRedirectServer(http.server.HTTPServer): # pylint: disable=too-few-public-methods
query_params = {}
class ClientRedirectHandler(http.server.BaseHTTPRequestHandler):
# pylint: disable=line-too-long
def do_GET(self):
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs # pylint: disable=import-error
if self.path.endswith('/favicon.ico'): # deal with legacy IE
self.send_response(204)
return
query = self.path.split('?', 1)[-1]
query = parse_qs(query, keep_blank_values=True)
self.server.query_params = query
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
landing_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'auth_landing_pages',
'ok.html' if 'code' in query else 'fail.html')
with open(landing_file, 'rb') as html_file:
self.wfile.write(html_file.read())
def log_message(self, format, *args): # pylint: disable=redefined-builtin,unused-argument,no-self-use
pass # this prevent http server from dumping messages to stdout
reply_url = None
# On Windows, HTTPServer by default doesn't throw error if the port is in-use
# https://github.com/Azure/azure-cli/issues/10578
if is_windows():
logger.debug('Windows is detected. Set HTTPServer.allow_reuse_address to False')
ClientRedirectServer.allow_reuse_address = False
elif is_wsl():
logger.debug('WSL is detected. Set HTTPServer.allow_reuse_address to False')
ClientRedirectServer.allow_reuse_address = False
for port in range(8400, 9000):
try:
web_server = ClientRedirectServer(('localhost', port), ClientRedirectHandler)
reply_url = "http://localhost:{}".format(port)
break
except socket.error as ex:
logger.warning("Port '%s' is taken with error '%s'. Trying with the next one", port, ex)
except UnicodeDecodeError:
logger.warning("Please make sure there is no international (Unicode) character in the computer name "
r"or C:\Windows\System32\drivers\etc\hosts file's 127.0.0.1 entries. "
"For more details, please see https://github.com/Azure/azure-cli/issues/12957")
break
if reply_url is None:
logger.warning("Error: can't reserve a port for authentication reply url")
return
try:
request_state = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(20))
except NotImplementedError:
request_state = 'code'
# launch browser:
url = ('{0}/oauth2/authorize?response_type=code&client_id={1}'
'&redirect_uri={2}&state={3}&resource={4}&prompt=select_account')
url = url.format(authority_url, _CLIENT_ID, reply_url, request_state, resource)
logger.info('Open browser with url: %s', url)
succ = open_page_in_browser(url)
if succ is False:
web_server.server_close()
results['no_browser'] = True
return
# Emit a warning to inform that a browser is opened.
# Only show the path part of the URL and hide the query string.
logger.warning("The default web browser has been opened at %s. Please continue the login in the web browser. "
"If no web browser is available or if the web browser fails to open, use device code flow "
"with `az login --use-device-code`.", url.split('?')[0])
# wait for callback from browser.
while True:
web_server.handle_request()
if 'error' in web_server.query_params or 'code' in web_server.query_params:
break
if 'error' in web_server.query_params:
logger.warning('Authentication Error: "%s". Description: "%s" ', web_server.query_params['error'],
web_server.query_params.get('error_description'))
return
if 'code' in web_server.query_params:
code = web_server.query_params['code']
else:
logger.warning('Authentication Error: Authorization code was not captured in query strings "%s"',
web_server.query_params)
return
if 'state' in web_server.query_params:
response_state = web_server.query_params['state'][0]
if response_state != request_state:
raise RuntimeError("mismatched OAuth state")
else:
raise RuntimeError("missing OAuth state")
results['code'] = code[0]
results['reply_url'] = reply_url
def _get_authorization_code(resource, authority_url):
import threading
import time
results = {}
t = threading.Thread(target=_get_authorization_code_worker,
args=(authority_url, resource, results))
t.daemon = True
t.start()
while True:
time.sleep(2) # so that ctrl+c can stop the command
if not t.is_alive():
break # done
if results.get('no_browser'):
raise RuntimeError()
return results
|
[] |
[] |
[
"MSI_ENDPOINT",
"AZURE_ACCESS_TOKEN_FILE"
] |
[]
|
["MSI_ENDPOINT", "AZURE_ACCESS_TOKEN_FILE"]
|
python
| 2 | 0 | |
pkg/testing/integration/program.go
|
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
cryptorand "crypto/rand"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/pkg/v3/backend/filestate"
"github.com/pulumi/pulumi/pkg/v3/engine"
"github.com/pulumi/pulumi/pkg/v3/operations"
"github.com/pulumi/pulumi/pkg/v3/resource/stack"
"github.com/pulumi/pulumi/sdk/v3/go/common/apitype"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/config"
pulumi_testing "github.com/pulumi/pulumi/sdk/v3/go/common/testing"
"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
"github.com/pulumi/pulumi/sdk/v3/go/common/tools"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/retry"
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
"github.com/stretchr/testify/assert"
user "github.com/tweekmonster/luser"
)
const PythonRuntime = "python"
const NodeJSRuntime = "nodejs"
const GoRuntime = "go"
const DotNetRuntime = "dotnet"
const windowsOS = "windows"
// RuntimeValidationStackInfo contains details related to the stack that runtime validation logic may want to use.
type RuntimeValidationStackInfo struct {
StackName tokens.QName
Deployment *apitype.DeploymentV3
RootResource apitype.ResourceV3
Outputs map[string]interface{}
Events []apitype.EngineEvent
}
// EditDir is an optional edit to apply to the example, as subsequent deployments.
type EditDir struct {
Dir string
ExtraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo)
// Additive is true if Dir should be copied *on top* of the test directory.
// Otherwise Dir *replaces* the test directory, except we keep .pulumi/ and Pulumi.yaml and Pulumi.<stack>.yaml.
Additive bool
// ExpectFailure is true if we expect this test to fail. This is very coarse grained, and will essentially
// tolerate *any* failure in the program (IDEA: in the future, offer a way to narrow this down more).
ExpectFailure bool
// ExpectNoChanges is true if the edit is expected to not propose any changes.
ExpectNoChanges bool
// Stdout is the writer to use for all stdout messages.
Stdout io.Writer
// Stderr is the writer to use for all stderr messages.
Stderr io.Writer
// Verbose may be set to true to print messages as they occur, rather than buffering and showing upon failure.
Verbose bool
// Run program directory in query mode.
QueryMode bool
}
// TestCommandStats is a collection of data related to running a single command during a test.
type TestCommandStats struct {
// StartTime is the time at which the command was started
StartTime string `json:"startTime"`
// EndTime is the time at which the command exited
EndTime string `json:"endTime"`
// ElapsedSeconds is the time at which the command exited
ElapsedSeconds float64 `json:"elapsedSeconds"`
// StackName is the name of the stack
StackName string `json:"stackName"`
// TestId is the unique ID of the test run
TestID string `json:"testId"`
// StepName is the command line which was invoked
StepName string `json:"stepName"`
// CommandLine is the command line which was invoked
CommandLine string `json:"commandLine"`
// TestName is the name of the directory in which the test was executed
TestName string `json:"testName"`
// IsError is true if the command failed
IsError bool `json:"isError"`
// The Cloud that the test was run against, or empty for local deployments
CloudURL string `json:"cloudURL"`
}
// TestStatsReporter reports results and metadata from a test run.
type TestStatsReporter interface {
ReportCommand(stats TestCommandStats)
}
// ConfigValue is used to provide config values to a test program.
type ConfigValue struct {
// The config key to pass to `pulumi config`.
Key string
// The config value to pass to `pulumi config`.
Value string
// Secret indicates that the `--secret` flag should be specified when calling `pulumi config`.
Secret bool
// Path indicates that the `--path` flag should be specified when calling `pulumi config`.
Path bool
}
// ProgramTestOptions provides options for ProgramTest
type ProgramTestOptions struct {
// Dir is the program directory to test.
Dir string
// Array of NPM packages which must be `yarn linked` (e.g. {"pulumi", "@pulumi/aws"})
Dependencies []string
// Map of package names to versions. The test will use the specified versions of these packages instead of what
// is declared in `package.json`.
Overrides map[string]string
// Map of config keys and values to set (e.g. {"aws:region": "us-east-2"}).
Config map[string]string
// Map of secure config keys and values to set (e.g. {"aws:region": "us-east-2"}).
Secrets map[string]string
// List of config keys and values to set in order, including Secret and Path options.
OrderedConfig []ConfigValue
// SecretsProvider is the optional custom secrets provider to use instead of the default.
SecretsProvider string
// EditDirs is an optional list of edits to apply to the example, as subsequent deployments.
EditDirs []EditDir
// ExtraRuntimeValidation is an optional callback for additional validation, called before applying edits.
ExtraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo)
// RelativeWorkDir is an optional path relative to `Dir` which should be used as working directory during tests.
RelativeWorkDir string
// AllowEmptyPreviewChanges is true if we expect that this test's no-op preview may propose changes (e.g.
// because the test is sensitive to the exact contents of its working directory and those contents change
// incidentally between the initial update and the empty update).
AllowEmptyPreviewChanges bool
// AllowEmptyUpdateChanges is true if we expect that this test's no-op update may perform changes (e.g.
// because the test is sensitive to the exact contents of its working directory and those contents change
// incidentally between the initial update and the empty update).
AllowEmptyUpdateChanges bool
// ExpectFailure is true if we expect this test to fail. This is very coarse grained, and will essentially
// tolerate *any* failure in the program (IDEA: in the future, offer a way to narrow this down more).
ExpectFailure bool
// ExpectRefreshChanges may be set to true if a test is expected to have changes yielded by an immediate refresh.
// This could occur, for example, is a resource's state is constantly changing outside of Pulumi (e.g., timestamps).
ExpectRefreshChanges bool
// RetryFailedSteps indicates that failed updates, refreshes, and destroys should be retried after a brief
// intermission. A maximum of 3 retries will be attempted.
RetryFailedSteps bool
// SkipRefresh indicates that the refresh step should be skipped entirely.
SkipRefresh bool
// SkipPreview indicates that the preview step should be skipped entirely.
SkipPreview bool
// SkipUpdate indicates that the update step should be skipped entirely.
SkipUpdate bool
// SkipExportImport skips testing that exporting and importing the stack works properly.
SkipExportImport bool
// SkipEmptyPreviewUpdate skips the no-change preview/update that is performed that validates
// that no changes happen.
SkipEmptyPreviewUpdate bool
// SkipStackRemoval indicates that the stack should not be removed. (And so the test's results could be inspected
// in the Pulumi Service after the test has completed.)
SkipStackRemoval bool
// Quick implies SkipPreview, SkipExportImport and SkipEmptyPreviewUpdate
Quick bool
// PreviewCommandlineFlags specifies flags to add to the `pulumi preview` command line (e.g. "--color=raw")
PreviewCommandlineFlags []string
// UpdateCommandlineFlags specifies flags to add to the `pulumi up` command line (e.g. "--color=raw")
UpdateCommandlineFlags []string
// QueryCommandlineFlags specifies flags to add to the `pulumi query` command line (e.g. "--color=raw")
QueryCommandlineFlags []string
// RunBuild indicates that the build step should be run (e.g. run `yarn build` for `nodejs` programs)
RunBuild bool
// RunUpdateTest will ensure that updates to the package version can test for spurious diffs
RunUpdateTest bool
// DecryptSecretsInOutput will ensure that stack output is passed `--show-secrets` parameter
// Used in conjunction with ExtraRuntimeValidation
DecryptSecretsInOutput bool
// CloudURL is an optional URL to override the default Pulumi Service API (https://api.pulumi-staging.io). The
// PULUMI_ACCESS_TOKEN environment variable must also be set to a valid access token for the target cloud.
CloudURL string
// StackName allows the stack name to be explicitly provided instead of computed from the
// environment during tests.
StackName string
// If non-empty, specifies the value of the `--tracing` flag to pass
// to Pulumi CLI, which may be a Zipkin endpoint or a
// `file:./local.trace` style url for AppDash tracing.
//
// Template `{command}` syntax will be expanded to the current
// command name such as `pulumi-stack-rm`. This is useful for
// file-based tracing since `ProgramTest` performs multiple
// CLI invocations that can inadvertently overwrite the trace
// file.
Tracing string
// NoParallel will opt the test out of being ran in parallel.
NoParallel bool
// PrePulumiCommand specifies a callback that will be executed before each `pulumi` invocation. This callback may
// optionally return another callback to be invoked after the `pulumi` invocation completes.
PrePulumiCommand func(verb string) (func(err error) error, error)
// ReportStats optionally specifies how to report results from the test for external collection.
ReportStats TestStatsReporter
// Stdout is the writer to use for all stdout messages.
Stdout io.Writer
// Stderr is the writer to use for all stderr messages.
Stderr io.Writer
// Verbose may be set to true to print messages as they occur, rather than buffering and showing upon failure.
Verbose bool
// DebugLogging may be set to anything >0 to enable excessively verbose debug logging from `pulumi`. This is
// equivalent to `--logtostderr -v=N`, where N is the value of DebugLogLevel. This may also be enabled by setting
// the environment variable PULUMI_TEST_DEBUG_LOG_LEVEL.
DebugLogLevel int
// DebugUpdates may be set to true to enable debug logging from `pulumi preview`, `pulumi up`, and
// `pulumi destroy`. This may also be enabled by setting the environment variable PULUMI_TEST_DEBUG_UPDATES.
DebugUpdates bool
// Bin is a location of a `pulumi` executable to be run. Taken from the $PATH if missing.
Bin string
// YarnBin is a location of a `yarn` executable to be run. Taken from the $PATH if missing.
YarnBin string
// GoBin is a location of a `go` executable to be run. Taken from the $PATH if missing.
GoBin string
// PythonBin is a location of a `python` executable to be run. Taken from the $PATH if missing.
PythonBin string
// PipenvBin is a location of a `pipenv` executable to run. Taken from the $PATH if missing.
PipenvBin string
// DotNetBin is a location of a `dotnet` executable to be run. Taken from the $PATH if missing.
DotNetBin string
// Additional environment variables to pass for each command we run.
Env []string
// Automatically create and use a virtual environment, rather than using the Pipenv tool. This is now the default
// behavior, so this option no longer has any affect. To go back to the old behavior use the `UsePipenv` option.
UseAutomaticVirtualEnv bool
// Use the Pipenv tool to manage the virtual environment.
UsePipenv bool
// If set, this hook is called after the `pulumi preview` command has completed.
PreviewCompletedHook func(dir string) error
// JSONOutput indicates that the `--json` flag should be passed to `up`, `preview`,
// `refresh` and `destroy` commands.
JSONOutput bool
}
func (opts *ProgramTestOptions) GetDebugLogLevel() int {
if opts.DebugLogLevel > 0 {
return opts.DebugLogLevel
}
if du := os.Getenv("PULUMI_TEST_DEBUG_LOG_LEVEL"); du != "" {
if n, e := strconv.Atoi(du); e != nil {
panic(e)
} else if n > 0 {
return n
}
}
return 0
}
func (opts *ProgramTestOptions) GetDebugUpdates() bool {
return opts.DebugUpdates || os.Getenv("PULUMI_TEST_DEBUG_UPDATES") != ""
}
// GetStackName returns a stack name to use for this test.
func (opts *ProgramTestOptions) GetStackName() tokens.QName {
if opts.StackName == "" {
// Fetch the host and test dir names, cleaned so to contain just [a-zA-Z0-9-_] chars.
hostname, err := os.Hostname()
contract.AssertNoErrorf(err, "failure to fetch hostname for stack prefix")
var host string
for _, c := range hostname {
if len(host) >= 10 {
break
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '-' || c == '_' {
host += string(c)
}
}
var test string
for _, c := range filepath.Base(opts.Dir) {
if len(test) >= 10 {
break
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '-' || c == '_' {
test += string(c)
}
}
b := make([]byte, 4)
_, err = cryptorand.Read(b)
contract.AssertNoError(err)
opts.StackName = strings.ToLower("p-it-" + host + "-" + test + "-" + hex.EncodeToString(b))
}
return tokens.QName(opts.StackName)
}
// GetStackNameWithOwner gets the name of the stack prepended with an owner, if PULUMI_TEST_OWNER is set.
// We use this in CI to create test stacks in an organization that all developers have access to, for debugging.
func (opts *ProgramTestOptions) GetStackNameWithOwner() tokens.QName {
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner != "" {
return tokens.QName(fmt.Sprintf("%s/%s", owner, opts.GetStackName()))
}
return opts.GetStackName()
}
// With combines a source set of options with a set of overrides.
func (opts ProgramTestOptions) With(overrides ProgramTestOptions) ProgramTestOptions {
if overrides.Dir != "" {
opts.Dir = overrides.Dir
}
if overrides.Dependencies != nil {
opts.Dependencies = overrides.Dependencies
}
if overrides.Overrides != nil {
opts.Overrides = overrides.Overrides
}
for k, v := range overrides.Config {
if opts.Config == nil {
opts.Config = make(map[string]string)
}
opts.Config[k] = v
}
for k, v := range overrides.Secrets {
if opts.Secrets == nil {
opts.Secrets = make(map[string]string)
}
opts.Secrets[k] = v
}
if overrides.SecretsProvider != "" {
opts.SecretsProvider = overrides.SecretsProvider
}
if overrides.EditDirs != nil {
opts.EditDirs = overrides.EditDirs
}
if overrides.ExtraRuntimeValidation != nil {
opts.ExtraRuntimeValidation = overrides.ExtraRuntimeValidation
}
if overrides.RelativeWorkDir != "" {
opts.RelativeWorkDir = overrides.RelativeWorkDir
}
if overrides.AllowEmptyPreviewChanges {
opts.AllowEmptyPreviewChanges = overrides.AllowEmptyPreviewChanges
}
if overrides.AllowEmptyUpdateChanges {
opts.AllowEmptyUpdateChanges = overrides.AllowEmptyUpdateChanges
}
if overrides.ExpectFailure {
opts.ExpectFailure = overrides.ExpectFailure
}
if overrides.ExpectRefreshChanges {
opts.ExpectRefreshChanges = overrides.ExpectRefreshChanges
}
if overrides.RetryFailedSteps {
opts.RetryFailedSteps = overrides.RetryFailedSteps
}
if overrides.SkipRefresh {
opts.SkipRefresh = overrides.SkipRefresh
}
if overrides.SkipPreview {
opts.SkipPreview = overrides.SkipPreview
}
if overrides.SkipUpdate {
opts.SkipUpdate = overrides.SkipUpdate
}
if overrides.SkipExportImport {
opts.SkipExportImport = overrides.SkipExportImport
}
if overrides.SkipEmptyPreviewUpdate {
opts.SkipEmptyPreviewUpdate = overrides.SkipEmptyPreviewUpdate
}
if overrides.SkipStackRemoval {
opts.SkipStackRemoval = overrides.SkipStackRemoval
}
if overrides.Quick {
opts.Quick = overrides.Quick
}
if overrides.PreviewCommandlineFlags != nil {
opts.PreviewCommandlineFlags = append(opts.PreviewCommandlineFlags, overrides.PreviewCommandlineFlags...)
}
if overrides.UpdateCommandlineFlags != nil {
opts.UpdateCommandlineFlags = append(opts.UpdateCommandlineFlags, overrides.UpdateCommandlineFlags...)
}
if overrides.QueryCommandlineFlags != nil {
opts.QueryCommandlineFlags = append(opts.QueryCommandlineFlags, overrides.QueryCommandlineFlags...)
}
if overrides.RunBuild {
opts.RunBuild = overrides.RunBuild
}
if overrides.RunUpdateTest {
opts.RunUpdateTest = overrides.RunUpdateTest
}
if overrides.DecryptSecretsInOutput {
opts.DecryptSecretsInOutput = overrides.DecryptSecretsInOutput
}
if overrides.CloudURL != "" {
opts.CloudURL = overrides.CloudURL
}
if overrides.StackName != "" {
opts.StackName = overrides.StackName
}
if overrides.Tracing != "" {
opts.Tracing = overrides.Tracing
}
if overrides.NoParallel {
opts.NoParallel = overrides.NoParallel
}
if overrides.PrePulumiCommand != nil {
opts.PrePulumiCommand = overrides.PrePulumiCommand
}
if overrides.ReportStats != nil {
opts.ReportStats = overrides.ReportStats
}
if overrides.Stdout != nil {
opts.Stdout = overrides.Stdout
}
if overrides.Stderr != nil {
opts.Stderr = overrides.Stderr
}
if overrides.Verbose {
opts.Verbose = overrides.Verbose
}
if overrides.DebugLogLevel != 0 {
opts.DebugLogLevel = overrides.DebugLogLevel
}
if overrides.DebugUpdates {
opts.DebugUpdates = overrides.DebugUpdates
}
if overrides.Bin != "" {
opts.Bin = overrides.Bin
}
if overrides.YarnBin != "" {
opts.YarnBin = overrides.YarnBin
}
if overrides.GoBin != "" {
opts.GoBin = overrides.GoBin
}
if overrides.PipenvBin != "" {
opts.PipenvBin = overrides.PipenvBin
}
if overrides.Env != nil {
opts.Env = append(opts.Env, overrides.Env...)
}
if overrides.UsePipenv {
opts.UsePipenv = overrides.UsePipenv
}
return opts
}
type regexFlag struct {
re *regexp.Regexp
}
func (rf *regexFlag) String() string {
if rf.re == nil {
return ""
}
return rf.re.String()
}
func (rf *regexFlag) Set(v string) error {
r, err := regexp.Compile(v)
if err != nil {
return err
}
rf.re = r
return nil
}
var directoryMatcher regexFlag
var listDirs bool
var pipMutex *fsutil.FileMutex
func init() {
flag.Var(&directoryMatcher, "dirs", "optional list of regexes to use to select integration tests to run")
flag.BoolVar(&listDirs, "list-dirs", false, "list available integration tests without running them")
mutexPath := filepath.Join(os.TempDir(), "pip-mutex.lock")
pipMutex = fsutil.NewFileMutex(mutexPath)
}
// GetLogs retrieves the logs for a given stack in a particular region making the query provided.
//
// [provider] should be one of "aws" or "azure"
func GetLogs(
t *testing.T,
provider, region string,
stackInfo RuntimeValidationStackInfo,
query operations.LogQuery) *[]operations.LogEntry {
snap, err := stack.DeserializeDeploymentV3(*stackInfo.Deployment, stack.DefaultSecretsProvider)
assert.NoError(t, err)
tree := operations.NewResourceTree(snap.Resources)
if !assert.NotNil(t, tree) {
return nil
}
cfg := map[config.Key]string{
config.MustMakeKey(provider, "region"): region,
}
ops := tree.OperationsProvider(cfg)
// Validate logs from example
logs, err := ops.GetLogs(query)
if !assert.NoError(t, err) {
return nil
}
return logs
}
func prepareProgram(t *testing.T, opts *ProgramTestOptions) {
// If we're just listing tests, simply print this test's directory.
if listDirs {
fmt.Printf("%s\n", opts.Dir)
}
// If we have a matcher, ensure that this test matches its pattern.
if directoryMatcher.re != nil && !directoryMatcher.re.Match([]byte(opts.Dir)) {
t.Skip(fmt.Sprintf("Skipping: '%v' does not match '%v'", opts.Dir, directoryMatcher.re))
}
// Disable stack backups for tests to avoid filling up ~/.pulumi/backups with unnecessary
// backups of test stacks.
if err := os.Setenv(filestate.DisableCheckpointBackupsEnvVar, "1"); err != nil {
t.Errorf("error setting env var '%s': %v", filestate.DisableCheckpointBackupsEnvVar, err)
}
// We want tests to default into being ran in parallel, hence the odd double negative.
if !opts.NoParallel {
t.Parallel()
}
if ciutil.IsCI() && os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Skip("Skipping: PULUMI_ACCESS_TOKEN is not set")
}
// If the test panics, recover and log instead of letting the panic escape the test. Even though *this* test will
// have run deferred functions and cleaned up, if the panic reaches toplevel it will kill the process and prevent
// other tests running in parallel from cleaning up.
defer func() {
if failure := recover(); failure != nil {
t.Errorf("panic testing %v: %v", opts.Dir, failure)
}
}()
// Set up some default values for sending test reports and tracing data. We use environment varaiables to
// control these globally and set reasonable values for our own use in CI.
if opts.ReportStats == nil {
if v := os.Getenv("PULUMI_TEST_REPORT_CONFIG"); v != "" {
splits := strings.Split(v, ":")
if len(splits) != 3 {
t.Errorf("report config should be set to a value of the form: <aws-region>:<bucket-name>:<keyPrefix>")
}
opts.ReportStats = NewS3Reporter(splits[0], splits[1], splits[2])
}
}
if opts.Tracing == "" {
opts.Tracing = os.Getenv("PULUMI_TEST_TRACE_ENDPOINT")
}
}
// ProgramTest runs a lifecycle of Pulumi commands in a program working directory, using the `pulumi` and `yarn`
// binaries available on PATH. It essentially executes the following workflow:
//
// yarn install
// yarn link <each opts.Depencies>
// (+) yarn run build
// pulumi init
// (*) pulumi login
// pulumi stack init integrationtesting
// pulumi config set <each opts.Config>
// pulumi config set --secret <each opts.Secrets>
// pulumi preview
// pulumi up
// pulumi stack export --file stack.json
// pulumi stack import --file stack.json
// pulumi preview (expected to be empty)
// pulumi up (expected to be empty)
// pulumi destroy --yes
// pulumi stack rm --yes integrationtesting
//
// (*) Only if PULUMI_ACCESS_TOKEN is set.
// (+) Only if `opts.RunBuild` is true.
//
// All commands must return success return codes for the test to succeed, unless ExpectFailure is true.
func ProgramTest(t *testing.T, opts *ProgramTestOptions) {
prepareProgram(t, opts)
pt := newProgramTester(t, opts)
err := pt.TestLifeCycleInitAndDestroy()
assert.NoError(t, err)
}
// ProgramTestManualLifeCycle returns a ProgramTester than must be manually controlled in terms of its lifecycle
func ProgramTestManualLifeCycle(t *testing.T, opts *ProgramTestOptions) *ProgramTester {
prepareProgram(t, opts)
pt := newProgramTester(t, opts)
return pt
}
// ProgramTester contains state associated with running a single test pass.
type ProgramTester struct {
t *testing.T // the Go tester for this run.
opts *ProgramTestOptions // options that control this test run.
bin string // the `pulumi` binary we are using.
yarnBin string // the `yarn` binary we are using.
goBin string // the `go` binary we are using.
pythonBin string // the `python` binary we are using.
pipenvBin string // The `pipenv` binary we are using.
dotNetBin string // the `dotnet` binary we are using.
eventLog string // The path to the event log for this test.
maxStepTries int // The maximum number of times to retry a failed pulumi step.
tmpdir string // the temporary directory we use for our test environment
projdir string // the project directory we use for this run
TestFinished bool // whether or not the test if finished
}
func newProgramTester(t *testing.T, opts *ProgramTestOptions) *ProgramTester {
stackName := opts.GetStackName()
maxStepTries := 1
if opts.RetryFailedSteps {
maxStepTries = 3
}
if opts.Quick {
opts.SkipPreview = true
opts.SkipExportImport = true
opts.SkipEmptyPreviewUpdate = true
}
return &ProgramTester{
t: t,
opts: opts,
eventLog: filepath.Join(os.TempDir(), string(stackName)+"-events.json"),
maxStepTries: maxStepTries,
}
}
func (pt *ProgramTester) getBin() (string, error) {
return getCmdBin(&pt.bin, "pulumi", pt.opts.Bin)
}
func (pt *ProgramTester) getYarnBin() (string, error) {
return getCmdBin(&pt.yarnBin, "yarn", pt.opts.YarnBin)
}
func (pt *ProgramTester) getGoBin() (string, error) {
return getCmdBin(&pt.goBin, "go", pt.opts.GoBin)
}
// getPythonBin returns a path to the currently-installed `python` binary, or an error if it could not be found.
func (pt *ProgramTester) getPythonBin() (string, error) {
if pt.pythonBin == "" {
pt.pythonBin = pt.opts.PythonBin
if pt.opts.PythonBin == "" {
var err error
// Look for `python3` by default, but fallback to `python` if not found, except on Windows
// where we look for these in the reverse order because the default python.org Windows
// installation does not include a `python3` binary, and the existence of a `python3.exe`
// symlink to `python.exe` on some systems does not work correctly with the Python `venv`
// module.
pythonCmds := []string{"python3", "python"}
if runtime.GOOS == windowsOS {
pythonCmds = []string{"python", "python3"}
}
for _, bin := range pythonCmds {
pt.pythonBin, err = exec.LookPath(bin)
// Break on the first cmd we find on the path (if any).
if err == nil {
break
}
}
if err != nil {
return "", errors.Wrapf(err, "Expected to find one of %q on $PATH", pythonCmds)
}
}
}
return pt.pythonBin, nil
}
// getPipenvBin returns a path to the currently-installed Pipenv tool, or an error if the tool could not be found.
func (pt *ProgramTester) getPipenvBin() (string, error) {
return getCmdBin(&pt.pipenvBin, "pipenv", pt.opts.PipenvBin)
}
func (pt *ProgramTester) getDotNetBin() (string, error) {
return getCmdBin(&pt.dotNetBin, "dotnet", pt.opts.DotNetBin)
}
func (pt *ProgramTester) pulumiCmd(name string, args []string) ([]string, error) {
bin, err := pt.getBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
if du := pt.opts.GetDebugLogLevel(); du > 0 {
cmd = append(cmd, "--logtostderr", "-v="+strconv.Itoa(du))
}
cmd = append(cmd, args...)
if tracing := pt.opts.Tracing; tracing != "" {
cmd = append(cmd, "--tracing", strings.ReplaceAll(tracing, "{command}", name))
}
return cmd, nil
}
func (pt *ProgramTester) yarnCmd(args []string) ([]string, error) {
bin, err := pt.getYarnBin()
if err != nil {
return nil, err
}
result := []string{bin}
result = append(result, args...)
return withOptionalYarnFlags(result), nil
}
func (pt *ProgramTester) pythonCmd(args []string) ([]string, error) {
bin, err := pt.getPythonBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
return append(cmd, args...), nil
}
func (pt *ProgramTester) pipenvCmd(args []string) ([]string, error) {
bin, err := pt.getPipenvBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
return append(cmd, args...), nil
}
func (pt *ProgramTester) runCommand(name string, args []string, wd string) error {
return RunCommand(pt.t, name, args, wd, pt.opts)
}
func (pt *ProgramTester) runPulumiCommand(name string, args []string, wd string, expectFailure bool) error {
cmd, err := pt.pulumiCmd(name, args)
if err != nil {
return err
}
var postFn func(error) error
if pt.opts.PrePulumiCommand != nil {
postFn, err = pt.opts.PrePulumiCommand(args[0])
if err != nil {
return err
}
}
isUpdate := args[0] == "preview" || args[0] == "up" || args[0] == "destroy" || args[0] == "refresh"
// If we're doing a preview or an update and this project is a Python project, we need to run
// the command in the context of the virtual environment that Pipenv created in order to pick up
// the correct version of Python. We also need to do this for destroy and refresh so that
// dynamic providers are run in the right virtual environment.
// This is only necessary when not using automatic virtual environment support.
if pt.opts.UsePipenv && isUpdate {
projinfo, err := pt.getProjinfo(wd)
if err != nil {
return nil
}
if projinfo.Proj.Runtime.Name() == "python" {
pipenvBin, err := pt.getPipenvBin()
if err != nil {
return err
}
// "pipenv run" activates the current virtual environment and runs the remainder of the arguments as if it
// were a command.
cmd = append([]string{pipenvBin, "run"}, cmd...)
}
}
_, _, err = retry.Until(context.Background(), retry.Acceptor{
Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
runerr := pt.runCommand(name, cmd, wd)
if runerr == nil {
return true, nil, nil
} else if _, ok := runerr.(*exec.ExitError); ok && isUpdate && !expectFailure {
// the update command failed, let's try again, assuming we haven't failed a few times.
if try+1 >= pt.maxStepTries {
return false, nil, errors.Errorf("%v did not succeed after %v tries", cmd, try+1)
}
pt.t.Logf("%v failed: %v; retrying...", cmd, runerr)
return false, nil, nil
}
// some other error, fail
return false, nil, runerr
},
})
if postFn != nil {
if postErr := postFn(err); postErr != nil {
return multierror.Append(err, postErr)
}
}
return err
}
func (pt *ProgramTester) runYarnCommand(name string, args []string, wd string) error {
cmd, err := pt.yarnCmd(args)
if err != nil {
return err
}
_, _, err = retry.Until(context.Background(), retry.Acceptor{
Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
runerr := pt.runCommand(name, cmd, wd)
if runerr == nil {
return true, nil, nil
} else if _, ok := runerr.(*exec.ExitError); ok {
// yarn failed, let's try again, assuming we haven't failed a few times.
if try+1 >= 3 {
return false, nil, errors.Errorf("%v did not complete after %v tries", cmd, try+1)
}
return false, nil, nil
}
// someother error, fail
return false, nil, runerr
},
})
return err
}
func (pt *ProgramTester) runPythonCommand(name string, args []string, wd string) error {
cmd, err := pt.pythonCmd(args)
if err != nil {
return err
}
return pt.runCommand(name, cmd, wd)
}
func (pt *ProgramTester) runVirtualEnvCommand(name string, args []string, wd string) error {
// When installing with `pip install -e`, a PKG-INFO file is created. If two packages are being installed
// this way simultaneously (which happens often, when running tests), both installations will be writing the
// same file simultaneously. If one process catches "PKG-INFO" in a half-written state, the one process that
// observed the torn write will fail to install the package.
//
// To avoid this problem, we use pipMutex to explicitly serialize installation operations. Doing so avoids
// the problem of multiple processes stomping on the same files in the source tree. Note that pipMutex is a
// file mutex, so this strategy works even if the go test runner chooses to split up text execution across
// multiple processes. (Furthermore, each test gets an instance of ProgramTester and thus the mutex, so we'd
// need to be sharing the mutex globally in each test process if we weren't using the file system to lock.)
if name == "virtualenv-pip-install-package" {
if err := pipMutex.Lock(); err != nil {
panic(err)
}
if pt.opts.Verbose {
pt.t.Log("acquired pip install lock")
defer pt.t.Log("released pip install lock")
}
defer func() {
if err := pipMutex.Unlock(); err != nil {
panic(err)
}
}()
}
virtualenvBinPath, err := getVirtualenvBinPath(wd, args[0])
if err != nil {
return err
}
cmd := append([]string{virtualenvBinPath}, args[1:]...)
return pt.runCommand(name, cmd, wd)
}
func (pt *ProgramTester) runPipenvCommand(name string, args []string, wd string) error {
// Pipenv uses setuptools to install and uninstall packages. Setuptools has an installation mode called "develop"
// that we use to install the package being tested, since it is 1) lightweight and 2) not doing so has its own set
// of annoying problems.
//
// Setuptools develop does three things:
// 1. It invokes the "egg_info" command in the target package,
// 2. It creates a special `.egg-link` sentinel file in the current site-packages folder, pointing to the package
// being installed's path on disk
// 3. It updates easy-install.pth in site-packages so that pip understand that this package has been installed.
//
// Steps 2 and 3 operate entirely within the context of a virtualenv. The state that they mutate is fully contained
// within the current virtualenv. However, step 1 operates in the context of the package's source tree. Egg info
// is responsible for producing a minimal "egg" for a particular package, and its largest responsibility is creating
// a PKG-INFO file for a package. PKG-INFO contains, among other things, the version of the package being installed.
//
// If two packages are being installed in "develop" mode simultaneously (which happens often, when running tests),
// both installations will run "egg_info" on the source tree and both processes will be writing the same files
// simultaneously. If one process catches "PKG-INFO" in a half-written state, the one process that observed the
// torn write will fail to install the package (setuptools crashes).
//
// To avoid this problem, we use pipMutex to explicitly serialize installation operations. Doing so avoids the
// problem of multiple processes stomping on the same files in the source tree. Note that pipMutex is a file
// mutex, so this strategy works even if the go test runner chooses to split up text execution across multiple
// processes. (Furthermore, each test gets an instance of ProgramTester and thus the mutex, so we'd need to be
// sharing the mutex globally in each test process if we weren't using the file system to lock.)
if name == "pipenv-install-package" {
if err := pipMutex.Lock(); err != nil {
panic(err)
}
if pt.opts.Verbose {
pt.t.Log("acquired pip install lock")
defer pt.t.Log("released pip install lock")
}
defer func() {
if err := pipMutex.Unlock(); err != nil {
panic(err)
}
}()
}
cmd, err := pt.pipenvCmd(args)
if err != nil {
return err
}
return pt.runCommand(name, cmd, wd)
}
// TestLifeCyclePrepare prepares a test by creating a temporary directory
func (pt *ProgramTester) TestLifeCyclePrepare() error {
tmpdir, projdir, err := pt.copyTestToTemporaryDirectory()
pt.tmpdir = tmpdir
pt.projdir = projdir
return err
}
// TestCleanUp cleans up the temporary directory that a test used
func (pt *ProgramTester) TestCleanUp() {
testFinished := pt.TestFinished
if pt.tmpdir != "" {
if !testFinished || pt.t.Failed() {
// Test aborted or failed. Maybe copy to "failed tests" directory.
failedTestsDir := os.Getenv("PULUMI_FAILED_TESTS_DIR")
if failedTestsDir != "" {
dest := filepath.Join(failedTestsDir, pt.t.Name()+uniqueSuffix())
contract.IgnoreError(fsutil.CopyFile(dest, pt.tmpdir, nil))
}
} else {
contract.IgnoreError(os.RemoveAll(pt.tmpdir))
}
} else {
// When tmpdir is empty, we ran "in tree", which means we wrote output
// to the "command-output" folder in the projdir, and we should clean
// it up if the test passed
if testFinished && !pt.t.Failed() {
contract.IgnoreError(os.RemoveAll(filepath.Join(pt.projdir, commandOutputFolderName)))
}
}
}
// TestLifeCycleInitAndDestroy executes the test and cleans up
func (pt *ProgramTester) TestLifeCycleInitAndDestroy() error {
err := pt.TestLifeCyclePrepare()
if err != nil {
return errors.Wrapf(err, "copying test to temp dir %s", pt.tmpdir)
}
pt.TestFinished = false
defer pt.TestCleanUp()
err = pt.TestLifeCycleInitialize()
if err != nil {
return errors.Wrap(err, "initializing test project")
}
// Ensure that before we exit, we attempt to destroy and remove the stack.
defer func() {
destroyErr := pt.TestLifeCycleDestroy()
assert.NoError(pt.t, destroyErr)
}()
if err = pt.TestPreviewUpdateAndEdits(); err != nil {
return errors.Wrap(err, "running test preview, update, and edits")
}
if pt.opts.RunUpdateTest {
err = upgradeProjectDeps(pt.projdir, pt)
if err != nil {
return errors.Wrap(err, "upgrading project dependencies")
}
if err = pt.TestPreviewUpdateAndEdits(); err != nil {
return errors.Wrap(err, "running test preview, update, and edits")
}
}
pt.TestFinished = true
return nil
}
func upgradeProjectDeps(projectDir string, pt *ProgramTester) error {
projInfo, err := pt.getProjinfo(projectDir)
if err != nil {
return errors.Wrap(err, "getting project info")
}
switch rt := projInfo.Proj.Runtime.Name(); rt {
case NodeJSRuntime:
if err = pt.yarnLinkPackageDeps(projectDir); err != nil {
return err
}
case PythonRuntime:
if err = pt.installPipPackageDeps(projectDir); err != nil {
return err
}
default:
return errors.Errorf("unrecognized project runtime: %s", rt)
}
return nil
}
// TestLifeCycleInitialize initializes the project directory and stack along with any configuration
func (pt *ProgramTester) TestLifeCycleInitialize() error {
dir := pt.projdir
stackName := pt.opts.GetStackName()
// If RelativeWorkDir is specified, apply that relative to the temp folder for use as working directory during tests.
if pt.opts.RelativeWorkDir != "" {
dir = filepath.Join(dir, pt.opts.RelativeWorkDir)
}
// Set the default target Pulumi API if not overridden in options.
if pt.opts.CloudURL == "" {
pulumiAPI := os.Getenv("PULUMI_API")
if pulumiAPI != "" {
pt.opts.CloudURL = pulumiAPI
}
}
// Ensure all links are present, the stack is created, and all configs are applied.
pt.t.Logf("Initializing project (dir %s; stack %s)", dir, stackName)
// Login as needed.
stackInitName := string(pt.opts.GetStackNameWithOwner())
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" && pt.opts.CloudURL == "" {
fmt.Printf("Using existing logged in user for tests. Set PULUMI_ACCESS_TOKEN and/or PULUMI_API to override.\n")
} else {
// Set PulumiCredentialsPathEnvVar to our CWD, so we use credentials specific to just this
// test.
pt.opts.Env = append(pt.opts.Env, fmt.Sprintf("%s=%s", workspace.PulumiCredentialsPathEnvVar, dir))
loginArgs := []string{"login"}
loginArgs = addFlagIfNonNil(loginArgs, "--cloud-url", pt.opts.CloudURL)
// If this is a local OR cloud login, then don't attach the owner to the stack-name.
if pt.opts.CloudURL != "" {
stackInitName = string(pt.opts.GetStackName())
}
if err := pt.runPulumiCommand("pulumi-login", loginArgs, dir, false); err != nil {
return err
}
}
// Stack init
stackInitArgs := []string{"stack", "init", stackInitName}
if pt.opts.SecretsProvider != "" {
stackInitArgs = append(stackInitArgs, "--secrets-provider", pt.opts.SecretsProvider)
}
if err := pt.runPulumiCommand("pulumi-stack-init", stackInitArgs, dir, false); err != nil {
return err
}
for key, value := range pt.opts.Config {
if err := pt.runPulumiCommand("pulumi-config",
[]string{"config", "set", key, value}, dir, false); err != nil {
return err
}
}
for key, value := range pt.opts.Secrets {
if err := pt.runPulumiCommand("pulumi-config",
[]string{"config", "set", "--secret", key, value}, dir, false); err != nil {
return err
}
}
for _, cv := range pt.opts.OrderedConfig {
configArgs := []string{"config", "set", cv.Key, cv.Value}
if cv.Secret {
configArgs = append(configArgs, "--secret")
}
if cv.Path {
configArgs = append(configArgs, "--path")
}
if err := pt.runPulumiCommand("pulumi-config", configArgs, dir, false); err != nil {
return err
}
}
return nil
}
// TestLifeCycleDestroy destroys a stack and removes it
func (pt *ProgramTester) TestLifeCycleDestroy() error {
if pt.projdir != "" {
// Destroy and remove the stack.
pt.t.Log("Destroying stack")
destroy := []string{"destroy", "--non-interactive", "--yes", "--skip-preview"}
if pt.opts.GetDebugUpdates() {
destroy = append(destroy, "-d")
}
if pt.opts.JSONOutput {
destroy = append(destroy, "--json")
}
if err := pt.runPulumiCommand("pulumi-destroy", destroy, pt.projdir, false); err != nil {
return err
}
if pt.t.Failed() {
pt.t.Logf("Test failed, retaining stack '%s'", pt.opts.GetStackNameWithOwner())
return nil
}
if !pt.opts.SkipStackRemoval {
return pt.runPulumiCommand("pulumi-stack-rm", []string{"stack", "rm", "--yes"}, pt.projdir, false)
}
}
return nil
}
// TestPreviewUpdateAndEdits runs the preview, update, and any relevant edits
func (pt *ProgramTester) TestPreviewUpdateAndEdits() error {
dir := pt.projdir
// Now preview and update the real changes.
pt.t.Log("Performing primary preview and update")
initErr := pt.PreviewAndUpdate(dir, "initial", pt.opts.ExpectFailure, false, false)
// If the initial preview/update failed, just exit without trying the rest (but make sure to destroy).
if initErr != nil {
return initErr
}
// Perform an empty preview and update; nothing is expected to happen here.
if !pt.opts.SkipExportImport {
pt.t.Log("Roundtripping checkpoint via stack export and stack import")
if err := pt.exportImport(dir); err != nil {
return err
}
}
if !pt.opts.SkipEmptyPreviewUpdate {
msg := ""
if !pt.opts.AllowEmptyUpdateChanges {
msg = "(no changes expected)"
}
pt.t.Logf("Performing empty preview and update%s", msg)
if err := pt.PreviewAndUpdate(
dir, "empty", false, !pt.opts.AllowEmptyPreviewChanges, !pt.opts.AllowEmptyUpdateChanges); err != nil {
return err
}
}
// Run additional validation provided by the test options, passing in the checkpoint info.
if err := pt.performExtraRuntimeValidation(pt.opts.ExtraRuntimeValidation, dir); err != nil {
return err
}
if !pt.opts.SkipRefresh {
// Perform a refresh and ensure it doesn't yield changes.
refresh := []string{"refresh", "--non-interactive", "--yes", "--skip-preview"}
if pt.opts.GetDebugUpdates() {
refresh = append(refresh, "-d")
}
if pt.opts.JSONOutput {
refresh = append(refresh, "--json")
}
if !pt.opts.ExpectRefreshChanges {
refresh = append(refresh, "--expect-no-changes")
}
if err := pt.runPulumiCommand("pulumi-refresh", refresh, dir, false); err != nil {
return err
}
}
// If there are any edits, apply them and run a preview and update for each one.
return pt.testEdits(dir)
}
func (pt *ProgramTester) exportImport(dir string) error {
exportCmd := []string{"stack", "export", "--file", "stack.json"}
importCmd := []string{"stack", "import", "--file", "stack.json"}
defer func() {
contract.IgnoreError(os.Remove(filepath.Join(dir, "stack.json")))
}()
if err := pt.runPulumiCommand("pulumi-stack-export", exportCmd, dir, false); err != nil {
return err
}
return pt.runPulumiCommand("pulumi-stack-import", importCmd, dir, false)
}
// PreviewAndUpdate runs pulumi preview followed by pulumi up
func (pt *ProgramTester) PreviewAndUpdate(dir string, name string, shouldFail, expectNopPreview,
expectNopUpdate bool) error {
preview := []string{"preview", "--non-interactive"}
update := []string{"up", "--non-interactive", "--yes", "--skip-preview", "--event-log", pt.eventLog}
if pt.opts.GetDebugUpdates() {
preview = append(preview, "-d")
update = append(update, "-d")
}
if pt.opts.JSONOutput {
preview = append(preview, "--json")
update = append(update, "--json")
}
if expectNopPreview {
preview = append(preview, "--expect-no-changes")
}
if expectNopUpdate {
update = append(update, "--expect-no-changes")
}
if pt.opts.PreviewCommandlineFlags != nil {
preview = append(preview, pt.opts.PreviewCommandlineFlags...)
}
if pt.opts.UpdateCommandlineFlags != nil {
update = append(update, pt.opts.UpdateCommandlineFlags...)
}
// If not in quick mode, run an explicit preview.
if !pt.opts.SkipPreview {
if err := pt.runPulumiCommand("pulumi-preview-"+name, preview, dir, shouldFail); err != nil {
if shouldFail {
pt.t.Log("Permitting failure (ExpectFailure=true for this preview)")
return nil
}
return err
}
if pt.opts.PreviewCompletedHook != nil {
if err := pt.opts.PreviewCompletedHook(dir); err != nil {
return err
}
}
}
// Now run an update.
if !pt.opts.SkipUpdate {
if err := pt.runPulumiCommand("pulumi-update-"+name, update, dir, shouldFail); err != nil {
if shouldFail {
pt.t.Log("Permitting failure (ExpectFailure=true for this update)")
return nil
}
return err
}
}
// If we expected a failure, but none occurred, return an error.
if shouldFail {
return errors.New("expected this step to fail, but it succeeded")
}
return nil
}
func (pt *ProgramTester) query(dir string, name string, shouldFail bool) error {
query := []string{"query", "--non-interactive"}
if pt.opts.GetDebugUpdates() {
query = append(query, "-d")
}
if pt.opts.QueryCommandlineFlags != nil {
query = append(query, pt.opts.QueryCommandlineFlags...)
}
// Now run a query.
if err := pt.runPulumiCommand("pulumi-query-"+name, query, dir, shouldFail); err != nil {
if shouldFail {
pt.t.Log("Permitting failure (ExpectFailure=true for this update)")
return nil
}
return err
}
// If we expected a failure, but none occurred, return an error.
if shouldFail {
return errors.New("expected this step to fail, but it succeeded")
}
return nil
}
func (pt *ProgramTester) testEdits(dir string) error {
for i, edit := range pt.opts.EditDirs {
var err error
if err = pt.testEdit(dir, i, edit); err != nil {
return err
}
}
return nil
}
func (pt *ProgramTester) testEdit(dir string, i int, edit EditDir) error {
pt.t.Logf("Applying edit '%v' and rerunning preview and update", edit.Dir)
if edit.Additive {
// Just copy new files into dir
if err := fsutil.CopyFile(dir, edit.Dir, nil); err != nil {
return errors.Wrapf(err, "Couldn't copy %v into %v", edit.Dir, dir)
}
} else {
// Create a new temporary directory
newDir, err := ioutil.TempDir("", pt.opts.StackName+"-")
if err != nil {
return errors.Wrapf(err, "Couldn't create new temporary directory")
}
// Delete whichever copy of the test is unused when we return
dirToDelete := newDir
defer func() {
contract.IgnoreError(os.RemoveAll(dirToDelete))
}()
// Copy everything except Pulumi.yaml, Pulumi.<stack-name>.yaml, and .pulumi from source into new directory
exclusions := make(map[string]bool)
projectYaml := workspace.ProjectFile + ".yaml"
configYaml := workspace.ProjectFile + "." + pt.opts.StackName + ".yaml"
exclusions[workspace.BookkeepingDir] = true
exclusions[projectYaml] = true
exclusions[configYaml] = true
if err := fsutil.CopyFile(newDir, edit.Dir, exclusions); err != nil {
return errors.Wrapf(err, "Couldn't copy %v into %v", edit.Dir, newDir)
}
// Copy Pulumi.yaml, Pulumi.<stack-name>.yaml, and .pulumi from old directory to new directory
oldProjectYaml := filepath.Join(dir, projectYaml)
newProjectYaml := filepath.Join(newDir, projectYaml)
oldConfigYaml := filepath.Join(dir, configYaml)
newConfigYaml := filepath.Join(newDir, configYaml)
oldProjectDir := filepath.Join(dir, workspace.BookkeepingDir)
newProjectDir := filepath.Join(newDir, workspace.BookkeepingDir)
if err := fsutil.CopyFile(newProjectYaml, oldProjectYaml, nil); err != nil {
return errors.Wrap(err, "Couldn't copy Pulumi.yaml")
}
if err := fsutil.CopyFile(newConfigYaml, oldConfigYaml, nil); err != nil {
return errors.Wrapf(err, "Couldn't copy Pulumi.%s.yaml", pt.opts.StackName)
}
if err := fsutil.CopyFile(newProjectDir, oldProjectDir, nil); err != nil {
return errors.Wrap(err, "Couldn't copy .pulumi")
}
// Finally, replace our current temp directory with the new one.
dirOld := dir + ".old"
if err := os.Rename(dir, dirOld); err != nil {
return errors.Wrapf(err, "Couldn't rename %v to %v", dir, dirOld)
}
// There's a brief window here where the old temp dir name could be taken from us.
if err := os.Rename(newDir, dir); err != nil {
return errors.Wrapf(err, "Couldn't rename %v to %v", newDir, dir)
}
// Keep dir, delete oldDir
dirToDelete = dirOld
}
err := pt.prepareProjectDir(dir)
if err != nil {
return errors.Wrapf(err, "Couldn't prepare project in %v", dir)
}
oldStdOut := pt.opts.Stdout
oldStderr := pt.opts.Stderr
oldVerbose := pt.opts.Verbose
if edit.Stdout != nil {
pt.opts.Stdout = edit.Stdout
}
if edit.Stderr != nil {
pt.opts.Stderr = edit.Stderr
}
if edit.Verbose {
pt.opts.Verbose = true
}
defer func() {
pt.opts.Stdout = oldStdOut
pt.opts.Stderr = oldStderr
pt.opts.Verbose = oldVerbose
}()
if !edit.QueryMode {
if err = pt.PreviewAndUpdate(dir, fmt.Sprintf("edit-%d", i),
edit.ExpectFailure, edit.ExpectNoChanges, edit.ExpectNoChanges); err != nil {
return err
}
} else {
if err = pt.query(dir, fmt.Sprintf("query-%d", i), edit.ExpectFailure); err != nil {
return err
}
}
return pt.performExtraRuntimeValidation(edit.ExtraRuntimeValidation, dir)
}
func (pt *ProgramTester) performExtraRuntimeValidation(
extraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo), dir string) error {
if extraRuntimeValidation == nil {
return nil
}
stackName := pt.opts.GetStackName()
// Create a temporary file name for the stack export
tempDir, err := ioutil.TempDir("", string(stackName))
if err != nil {
return err
}
fileName := filepath.Join(tempDir, "stack.json")
// Invoke `pulumi stack export`
// There are situations where we want to get access to the secrets in the validation
// this will allow us to get access to them as part of running ExtraRuntimeValidation
var pulumiCommand []string
if pt.opts.DecryptSecretsInOutput {
pulumiCommand = append(pulumiCommand, "stack", "export", "--show-secrets", "--file", fileName)
} else {
pulumiCommand = append(pulumiCommand, "stack", "export", "--file", fileName)
}
if err = pt.runPulumiCommand("pulumi-export",
pulumiCommand, dir, false); err != nil {
return errors.Wrapf(err, "expected to export stack to file: %s", fileName)
}
// Open the exported JSON file
f, err := os.Open(fileName)
if err != nil {
return errors.Wrapf(err, "expected to be able to open file with stack exports: %s", fileName)
}
defer func() {
contract.IgnoreClose(f)
contract.IgnoreError(os.RemoveAll(tempDir))
}()
// Unmarshal the Deployment
var untypedDeployment apitype.UntypedDeployment
if err = json.NewDecoder(f).Decode(&untypedDeployment); err != nil {
return err
}
var deployment apitype.DeploymentV3
if err = json.Unmarshal(untypedDeployment.Deployment, &deployment); err != nil {
return err
}
// Get the root resource and outputs from the deployment
var rootResource apitype.ResourceV3
var outputs map[string]interface{}
for _, res := range deployment.Resources {
if res.Type == resource.RootStackType {
rootResource = res
outputs = res.Outputs
}
}
// Read the event log.
eventsFile, err := os.Open(pt.eventLog)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "expected to be able to open event log file %s", pt.eventLog)
}
defer contract.IgnoreClose(eventsFile)
decoder, events := json.NewDecoder(eventsFile), []apitype.EngineEvent{}
for {
var event apitype.EngineEvent
if err = decoder.Decode(&event); err != nil {
if err == io.EOF {
break
}
return errors.Wrapf(err, "decoding engine event")
}
events = append(events, event)
}
// Populate stack info object with all of this data to pass to the validation function
stackInfo := RuntimeValidationStackInfo{
StackName: pt.opts.GetStackName(),
Deployment: &deployment,
RootResource: rootResource,
Outputs: outputs,
Events: events,
}
pt.t.Log("Performing extra runtime validation.")
extraRuntimeValidation(pt.t, stackInfo)
pt.t.Log("Extra runtime validation complete.")
return nil
}
// copyTestToTemporaryDirectory creates a temporary directory to run the test in and copies the test to it.
func (pt *ProgramTester) copyTestToTemporaryDirectory() (string, string, error) {
// Get the source dir and project info.
sourceDir := pt.opts.Dir
projinfo, err := pt.getProjinfo(sourceDir)
if err != nil {
return "", "", err
}
if pt.opts.Stdout == nil {
pt.opts.Stdout = os.Stdout
}
if pt.opts.Stderr == nil {
pt.opts.Stderr = os.Stderr
}
pt.t.Logf("sample: %v", sourceDir)
bin, err := pt.getBin()
if err != nil {
return "", "", err
}
pt.t.Logf("pulumi: %v\n", bin)
stackName := string(pt.opts.GetStackName())
// For most projects, we will copy to a temporary directory. For Go projects, however, we must create
// a folder structure that adheres to GOPATH requirements
var tmpdir, projdir string
if projinfo.Proj.Runtime.Name() == "go" {
targetDir, err := tools.CreateTemporaryGoFolder("stackName")
if err != nil {
return "", "", errors.Wrap(err, "Couldn't create temporary directory")
}
tmpdir = targetDir
projdir = targetDir
} else {
targetDir, tempErr := ioutil.TempDir("", stackName+"-")
if tempErr != nil {
return "", "", errors.Wrap(tempErr, "Couldn't create temporary directory")
}
tmpdir = targetDir
projdir = targetDir
}
// Copy the source project.
if copyErr := fsutil.CopyFile(tmpdir, sourceDir, nil); copyErr != nil {
return "", "", copyErr
}
projinfo.Root = projdir
err = pt.prepareProject(projinfo)
if err != nil {
return "", "", errors.Wrapf(err, "Failed to prepare %v", projdir)
}
// TODO[pulumi/pulumi#5455]: Dynamic providers fail to load when used from multi-lang components.
// Until that's been fixed, this environment variable can be set by a test, which results in
// a package.json being emitted in the project directory and `yarn install && yarn link @pulumi/pulumi`
// being run.
// When the underlying issue has been fixed, the use of this environment variable should be removed.
var yarnLinkPulumi bool
for _, env := range pt.opts.Env {
if env == "PULUMI_TEST_YARN_LINK_PULUMI=true" {
yarnLinkPulumi = true
break
}
}
if yarnLinkPulumi {
const packageJSON = `{
"name": "test",
"peerDependencies": {
"@pulumi/pulumi": "latest"
}
}`
if err := ioutil.WriteFile(filepath.Join(projdir, "package.json"), []byte(packageJSON), 0600); err != nil {
return "", "", err
}
if err = pt.runYarnCommand("yarn-install", []string{"install"}, projdir); err != nil {
return "", "", err
}
if err := pt.runYarnCommand("yarn-link", []string{"link", "@pulumi/pulumi"}, projdir); err != nil {
return "", "", err
}
}
pt.t.Logf("projdir: %v", projdir)
return tmpdir, projdir, nil
}
func (pt *ProgramTester) getProjinfo(projectDir string) (*engine.Projinfo, error) {
// Load up the package so we know things like what language the project is.
projfile := filepath.Join(projectDir, workspace.ProjectFile+".yaml")
proj, err := workspace.LoadProject(projfile)
if err != nil {
return nil, err
}
return &engine.Projinfo{Proj: proj, Root: projectDir}, nil
}
// prepareProject runs setup necessary to get the project ready for `pulumi` commands.
func (pt *ProgramTester) prepareProject(projinfo *engine.Projinfo) error {
// Based on the language, invoke the right routine to prepare the target directory.
switch rt := projinfo.Proj.Runtime.Name(); rt {
case NodeJSRuntime:
return pt.prepareNodeJSProject(projinfo)
case PythonRuntime:
return pt.preparePythonProject(projinfo)
case GoRuntime:
return pt.prepareGoProject(projinfo)
case DotNetRuntime:
return pt.prepareDotNetProject(projinfo)
default:
return errors.Errorf("unrecognized project runtime: %s", rt)
}
}
// prepareProjectDir runs setup necessary to get the project ready for `pulumi` commands.
func (pt *ProgramTester) prepareProjectDir(projectDir string) error {
projinfo, err := pt.getProjinfo(projectDir)
if err != nil {
return err
}
return pt.prepareProject(projinfo)
}
// prepareNodeJSProject runs setup necessary to get a Node.js project ready for `pulumi` commands.
func (pt *ProgramTester) prepareNodeJSProject(projinfo *engine.Projinfo) error {
if err := pulumi_testing.WriteYarnRCForTest(projinfo.Root); err != nil {
return err
}
// Get the correct pwd to run Yarn in.
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
// If the test requested some packages to be overridden, we do two things. First, if the package is listed as a
// direct dependency of the project, we change the version constraint in the package.json. For transitive
// dependeices, we use yarn's "resolutions" feature to force them to a specific version.
if len(pt.opts.Overrides) > 0 {
packageJSON, err := readPackageJSON(cwd)
if err != nil {
return err
}
resolutions := make(map[string]interface{})
for packageName, packageVersion := range pt.opts.Overrides {
for _, section := range []string{"dependencies", "devDependencies"} {
if _, has := packageJSON[section]; has {
entry := packageJSON[section].(map[string]interface{})
if _, has := entry[packageName]; has {
entry[packageName] = packageVersion
}
}
}
pt.t.Logf("adding resolution for %s to version %s", packageName, packageVersion)
resolutions["**/"+packageName] = packageVersion
}
// Wack any existing resolutions section with our newly computed one.
packageJSON["resolutions"] = resolutions
if err := writePackageJSON(cwd, packageJSON); err != nil {
return err
}
}
// Now ensure dependencies are present.
if err = pt.runYarnCommand("yarn-install", []string{"install"}, cwd); err != nil {
return err
}
if !pt.opts.RunUpdateTest {
if err = pt.yarnLinkPackageDeps(cwd); err != nil {
return err
}
}
if pt.opts.RunBuild {
// And finally compile it using whatever build steps are in the package.json file.
if err = pt.runYarnCommand("yarn-build", []string{"run", "build"}, cwd); err != nil {
return err
}
}
return nil
}
// readPackageJSON unmarshals the package.json file located in pathToPackage.
func readPackageJSON(pathToPackage string) (map[string]interface{}, error) {
f, err := os.Open(filepath.Join(pathToPackage, "package.json"))
if err != nil {
return nil, errors.Wrap(err, "opening package.json")
}
defer contract.IgnoreClose(f)
var ret map[string]interface{}
if err := json.NewDecoder(f).Decode(&ret); err != nil {
return nil, errors.Wrap(err, "decoding package.json")
}
return ret, nil
}
func writePackageJSON(pathToPackage string, metadata map[string]interface{}) error {
// os.Create truncates the already existing file.
f, err := os.Create(filepath.Join(pathToPackage, "package.json"))
if err != nil {
return errors.Wrap(err, "opening package.json")
}
defer contract.IgnoreClose(f)
encoder := json.NewEncoder(f)
encoder.SetIndent("", " ")
return errors.Wrap(encoder.Encode(metadata), "writing package.json")
}
// preparePythonProject runs setup necessary to get a Python project ready for `pulumi` commands.
func (pt *ProgramTester) preparePythonProject(projinfo *engine.Projinfo) error {
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
if pt.opts.UsePipenv {
if err = pt.preparePythonProjectWithPipenv(cwd); err != nil {
return err
}
} else {
if err = pt.runPythonCommand("python-venv", []string{"-m", "venv", "venv"}, cwd); err != nil {
return err
}
projinfo.Proj.Runtime.SetOption("virtualenv", "venv")
projfile := filepath.Join(projinfo.Root, workspace.ProjectFile+".yaml")
if err = projinfo.Proj.Save(projfile); err != nil {
return errors.Wrap(err, "saving project")
}
if err := pt.runVirtualEnvCommand("virtualenv-pip-install",
[]string{"python", "-m", "pip", "install", "-r", "requirements.txt"}, cwd); err != nil {
return err
}
}
if !pt.opts.RunUpdateTest {
if err = pt.installPipPackageDeps(cwd); err != nil {
return err
}
}
return nil
}
func (pt *ProgramTester) preparePythonProjectWithPipenv(cwd string) error {
// Allow ENV var based overload of desired Python version for
// the Pipenv environment. This is useful in CI scenarios that
// need to pin a specific version such as 3.9.x vs 3.10.x.
pythonVersion := os.Getenv("PYTHON_VERSION")
if pythonVersion == "" {
pythonVersion = "3"
}
// Create a new Pipenv environment. This bootstraps a new virtual environment containing the version of Python that
// we requested. Note that this version of Python is sourced from the machine, so you must first install the version
// of Python that you are requesting on the host machine before building a virtualenv for it.
if err := pt.runPipenvCommand("pipenv-new", []string{"--python", pythonVersion}, cwd); err != nil {
return err
}
// Install the package's dependencies. We do this by running `pip` inside the virtualenv that `pipenv` has created.
// We don't use `pipenv install` because we don't want a lock file and prefer the similar model of `pip install`
// which matches what our customers do
err := pt.runPipenvCommand("pipenv-install", []string{"run", "pip", "install", "-r", "requirements.txt"}, cwd)
if err != nil {
return err
}
return nil
}
// YarnLinkPackageDeps bring in package dependencies via yarn
func (pt *ProgramTester) yarnLinkPackageDeps(cwd string) error {
for _, dependency := range pt.opts.Dependencies {
if err := pt.runYarnCommand("yarn-link", []string{"link", dependency}, cwd); err != nil {
return err
}
}
return nil
}
// InstallPipPackageDeps brings in package dependencies via pip install
func (pt *ProgramTester) installPipPackageDeps(cwd string) error {
var err error
for _, dep := range pt.opts.Dependencies {
// If the given filepath isn't absolute, make it absolute. We're about to pass it to pipenv and pipenv is
// operating inside of a random folder in /tmp.
if !filepath.IsAbs(dep) {
dep, err = filepath.Abs(dep)
if err != nil {
return err
}
}
if pt.opts.UsePipenv {
if err := pt.runPipenvCommand("pipenv-install-package",
[]string{"run", "pip", "install", "-e", dep}, cwd); err != nil {
return err
}
} else {
if err := pt.runVirtualEnvCommand("virtualenv-pip-install-package",
[]string{"python", "-m", "pip", "install", "-e", dep}, cwd); err != nil {
return err
}
}
}
return nil
}
func getVirtualenvBinPath(cwd, bin string) (string, error) {
virtualenvBinPath := filepath.Join(cwd, "venv", "bin", bin)
if runtime.GOOS == windowsOS {
virtualenvBinPath = filepath.Join(cwd, "venv", "Scripts", fmt.Sprintf("%s.exe", bin))
}
if info, err := os.Stat(virtualenvBinPath); err != nil || info.IsDir() {
return "", errors.Errorf("Expected %s to exist in virtual environment at %q", bin, virtualenvBinPath)
}
return virtualenvBinPath, nil
}
// getSanitizedPkg strips the version string from a go dep
// Note: most of the pulumi modules don't use major version subdirectories for modules
func getSanitizedModulePath(pkg string) string {
re := regexp.MustCompile(`v\d`)
v := re.FindString(pkg)
if v != "" {
return strings.TrimSuffix(strings.Replace(pkg, v, "", -1), "/")
}
return pkg
}
func getRewritePath(pkg string, gopath string, depRoot string) string {
var depParts []string
sanitizedPkg := getSanitizedModulePath(pkg)
splitPkg := strings.Split(sanitizedPkg, "/")
if depRoot != "" {
// Get the package name
// This is the value after "github.com/foo/bar"
repoName := splitPkg[2]
basePath := splitPkg[len(splitPkg)-1]
if basePath == repoName {
depParts = append([]string{depRoot, repoName})
} else {
depParts = append([]string{depRoot, repoName, basePath})
}
return filepath.Join(depParts...)
}
depParts = append([]string{gopath, "src"}, splitPkg...)
return filepath.Join(depParts...)
}
// prepareGoProject runs setup necessary to get a Go project ready for `pulumi` commands.
func (pt *ProgramTester) prepareGoProject(projinfo *engine.Projinfo) error {
// Go programs are compiled, so we will compile the project first.
goBin, err := pt.getGoBin()
if err != nil {
return errors.Wrap(err, "locating `go` binary")
}
// Ensure GOPATH is known.
gopath := os.Getenv("GOPATH")
if gopath == "" {
usr, userErr := user.Current()
if userErr != nil {
return userErr
}
gopath = filepath.Join(usr.HomeDir, "go")
}
depRoot := os.Getenv("PULUMI_GO_DEP_ROOT")
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
// initialize a go.mod for dependency resolution if one doesn't exist
_, err = os.Stat(filepath.Join(cwd, "go.mod"))
if err != nil {
err = pt.runCommand("go-mod-init", []string{goBin, "mod", "init"}, cwd)
if err != nil {
return err
}
}
// link local dependencies
for _, pkg := range pt.opts.Dependencies {
dep := getRewritePath(pkg, gopath, depRoot)
editStr := fmt.Sprintf("%s=%s", pkg, dep)
err = pt.runCommand("go-mod-edit", []string{goBin, "mod", "edit", "-replace", editStr}, cwd)
if err != nil {
return err
}
}
// tidy to resolve all transitive dependencies including from local dependencies above
err = pt.runCommand("go-mod-tidy", []string{goBin, "mod", "tidy"}, cwd)
if err != nil {
return err
}
if pt.opts.RunBuild {
outBin := filepath.Join(gopath, "bin", string(projinfo.Proj.Name))
if runtime.GOOS == windowsOS {
outBin = fmt.Sprintf("%s.exe", outBin)
}
err = pt.runCommand("go-build", []string{goBin, "build", "-o", outBin, "."}, cwd)
if err != nil {
return err
}
_, err = os.Stat(outBin)
if err != nil {
return fmt.Errorf("error finding built application artifact: %w", err)
}
}
return nil
}
// prepareDotNetProject runs setup necessary to get a .NET project ready for `pulumi` commands.
func (pt *ProgramTester) prepareDotNetProject(projinfo *engine.Projinfo) error {
dotNetBin, err := pt.getDotNetBin()
if err != nil {
return errors.Wrap(err, "locating `dotnet` binary")
}
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
localNuget := os.Getenv("PULUMI_LOCAL_NUGET")
if localNuget == "" {
localNuget = "/opt/pulumi/nuget"
}
for _, dep := range pt.opts.Dependencies {
// dotnet add package requires a specific version in case of a pre-release, so we have to look it up.
matches, err := filepath.Glob(filepath.Join(localNuget, dep+".?.*.nupkg"))
if err != nil {
return errors.Wrap(err, "failed to find a local Pulumi NuGet package")
}
if len(matches) != 1 {
return errors.Errorf("attempting to find a local Pulumi NuGet package yielded %v results", matches)
}
file := filepath.Base(matches[0])
r := strings.NewReplacer(dep+".", "", ".nupkg", "")
version := r.Replace(file)
err = pt.runCommand("dotnet-add-package",
[]string{dotNetBin, "add", "package", dep, "-v", version}, cwd)
if err != nil {
return errors.Wrapf(err, "failed to add dependency on %s", dep)
}
}
return nil
}
|
[
"\"PULUMI_TEST_DEBUG_LOG_LEVEL\"",
"\"PULUMI_TEST_DEBUG_UPDATES\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_ACCESS_TOKEN\"",
"\"PULUMI_TEST_REPORT_CONFIG\"",
"\"PULUMI_TEST_TRACE_ENDPOINT\"",
"\"PULUMI_FAILED_TESTS_DIR\"",
"\"PULUMI_API\"",
"\"PULUMI_ACCESS_TOKEN\"",
"\"PYTHON_VERSION\"",
"\"GOPATH\"",
"\"PULUMI_GO_DEP_ROOT\"",
"\"PULUMI_LOCAL_NUGET\""
] |
[] |
[
"PULUMI_ACCESS_TOKEN",
"PULUMI_FAILED_TESTS_DIR",
"PULUMI_TEST_DEBUG_UPDATES",
"PULUMI_API",
"PULUMI_GO_DEP_ROOT",
"PULUMI_LOCAL_NUGET",
"PULUMI_TEST_OWNER",
"GOPATH",
"PYTHON_VERSION",
"PULUMI_TEST_DEBUG_LOG_LEVEL",
"PULUMI_TEST_REPORT_CONFIG",
"PULUMI_TEST_TRACE_ENDPOINT"
] |
[]
|
["PULUMI_ACCESS_TOKEN", "PULUMI_FAILED_TESTS_DIR", "PULUMI_TEST_DEBUG_UPDATES", "PULUMI_API", "PULUMI_GO_DEP_ROOT", "PULUMI_LOCAL_NUGET", "PULUMI_TEST_OWNER", "GOPATH", "PYTHON_VERSION", "PULUMI_TEST_DEBUG_LOG_LEVEL", "PULUMI_TEST_REPORT_CONFIG", "PULUMI_TEST_TRACE_ENDPOINT"]
|
go
| 12 | 0 | |
ugahacks6/ugahacks6/asgi.py
|
"""
ASGI config for ugahacks6 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ugahacks6.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/kfserving/kfserving/storage.py
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import logging
import tempfile
import os
import re
from urllib.parse import urlparse
from azure.storage.blob import BlockBlobService
from google.auth import exceptions
from google.cloud import storage
from minio import Minio
_GCS_PREFIX = "gs://"
_S3_PREFIX = "s3://"
_BLOB_RE = "https://(.+?).blob.core.windows.net/(.+)"
_LOCAL_PREFIX = "file://"
class Storage(object): # pylint: disable=too-few-public-methods
@staticmethod
def download(uri: str, out_dir: str = None) -> str:
logging.info("Copying contents of %s to local", uri)
is_local = False
if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
is_local = True
if out_dir is None:
if is_local:
# noop if out_dir is not set and the path is local
return Storage._download_local(uri)
out_dir = tempfile.mkdtemp()
if uri.startswith(_GCS_PREFIX):
Storage._download_gcs(uri, out_dir)
elif uri.startswith(_S3_PREFIX):
Storage._download_s3(uri, out_dir)
elif re.search(_BLOB_RE, uri):
Storage._download_blob(uri, out_dir)
elif is_local:
return Storage._download_local(uri, out_dir)
else:
raise Exception("Cannot recognize storage type for " + uri +
"\n'%s', '%s', and '%s' are the current available storage type." %
(_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))
logging.info("Successfully copied %s to %s", uri, out_dir)
return out_dir
@staticmethod
def _download_s3(uri, temp_dir: str):
client = Storage._create_minio_client()
bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
bucket_name = bucket_args[0]
bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
count = 0
for obj in objects:
# Replace any prefix from the object key with temp_dir
subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
# fget_object handles directory creation if does not exist
if not obj.is_dir:
if subdir_object_key == "":
subdir_object_key = obj.object_name
client.fget_object(bucket_name, obj.object_name,
os.path.join(temp_dir, subdir_object_key))
count = count + 1
if count == 0:
raise RuntimeError("Failed to fetch model. \
The path or model %s does not exist." % (uri))
@staticmethod
def _download_gcs(uri, temp_dir: str):
try:
storage_client = storage.Client()
except exceptions.DefaultCredentialsError:
storage_client = storage.Client.create_anonymous_client()
bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
bucket_name = bucket_args[0]
bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
bucket = storage_client.bucket(bucket_name)
prefix = bucket_path
if not prefix.endswith("/"):
prefix = prefix + "/"
blobs = bucket.list_blobs(prefix=prefix)
count = 0
for blob in blobs:
# Replace any prefix from the object key with temp_dir
subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
# Create necessary subdirectory to store the object locally
if "/" in subdir_object_key:
local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
if not os.path.isdir(local_object_dir):
os.makedirs(local_object_dir, exist_ok=True)
if subdir_object_key.strip() != "":
dest_path = os.path.join(temp_dir, subdir_object_key)
logging.info("Downloading: %s", dest_path)
blob.download_to_filename(dest_path)
count = count + 1
if count == 0:
raise RuntimeError("Failed to fetch model. \
The path or model %s does not exist." % (uri))
@staticmethod
def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals
match = re.search(_BLOB_RE, uri)
account_name = match.group(1)
storage_url = match.group(2)
container_name, prefix = storage_url.split("/", 1)
logging.info("Connecting to BLOB account: [%s], container: [%s], prefix: [%s]",
account_name,
container_name,
prefix)
try:
block_blob_service = BlockBlobService(account_name=account_name)
blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
except Exception: # pylint: disable=broad-except
token = Storage._get_azure_storage_token()
if token is None:
logging.warning("Azure credentials not found, retrying anonymous access")
block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)
blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
count = 0
for blob in blobs:
dest_path = os.path.join(out_dir, blob.name)
if "/" in blob.name:
head, tail = os.path.split(blob.name)
if prefix is not None:
head = head[len(prefix):]
if head.startswith('/'):
head = head[1:]
dir_path = os.path.join(out_dir, head)
dest_path = os.path.join(dir_path, tail)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
logging.info("Downloading: %s to %s", blob.name, dest_path)
block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)
count = count + 1
if count == 0:
raise RuntimeError("Failed to fetch model. \
The path or model %s does not exist." % (uri))
@staticmethod
def _get_azure_storage_token():
tenant_id = os.getenv("AZ_TENANT_ID", "")
client_id = os.getenv("AZ_CLIENT_ID", "")
client_secret = os.getenv("AZ_CLIENT_SECRET", "")
subscription_id = os.getenv("AZ_SUBSCRIPTION_ID", "")
if tenant_id == "" or client_id == "" or client_secret == "" or subscription_id == "":
return None
# note the SP must have "Storage Blob Data Owner" perms for this to work
import adal
from azure.storage.common import TokenCredential
authority_url = "https://login.microsoftonline.com/" + tenant_id
context = adal.AuthenticationContext(authority_url)
token = context.acquire_token_with_client_credentials(
"https://storage.azure.com/",
client_id,
client_secret)
token_credential = TokenCredential(token["accessToken"])
logging.info("Retrieved SP token credential for client_id: %s", client_id)
return token_credential
@staticmethod
def _download_local(uri, out_dir=None):
local_path = uri.replace(_LOCAL_PREFIX, "", 1)
if not os.path.exists(local_path):
raise RuntimeError("Local path %s does not exist." % (uri))
if out_dir is None:
return local_path
elif not os.path.isdir(out_dir):
os.makedirs(out_dir)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, "*")
for src in glob.glob(local_path):
_, tail = os.path.split(src)
dest_path = os.path.join(out_dir, tail)
logging.info("Linking: %s to %s", src, dest_path)
os.symlink(src, dest_path)
return out_dir
@staticmethod
def _create_minio_client():
# Remove possible http scheme for Minio
url = urlparse(os.getenv("AWS_ENDPOINT_URL", ""))
use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv("S3_USE_HTTPS", "true"))
return Minio(url.netloc,
access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
secure=use_ssl)
|
[] |
[] |
[
"AZ_SUBSCRIPTION_ID",
"AWS_SECRET_ACCESS_KEY",
"AZ_TENANT_ID",
"AZ_CLIENT_SECRET",
"AWS_ENDPOINT_URL",
"AWS_ACCESS_KEY_ID",
"S3_USE_HTTPS",
"AZ_CLIENT_ID"
] |
[]
|
["AZ_SUBSCRIPTION_ID", "AWS_SECRET_ACCESS_KEY", "AZ_TENANT_ID", "AZ_CLIENT_SECRET", "AWS_ENDPOINT_URL", "AWS_ACCESS_KEY_ID", "S3_USE_HTTPS", "AZ_CLIENT_ID"]
|
python
| 8 | 0 | |
oase-root/libs/backyardlibs/monitoring_adapter/Grafana/Grafana_api.py
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
Grafana apiを使って現在発生している障害を取得する。
"""
import json
import urllib
import requests
import os
import sys
import hashlib
import pprint
import django
import traceback
import base64
# import検索パス追加
my_path = os.path.dirname(os.path.abspath(__file__))
tmp_path = my_path.split('oase-root')
root_dir_path = tmp_path[0] + 'oase-root'
sys.path.append(root_dir_path)
# OASE モジュール import
# #LOCAL_PATCH#
os.environ['DJANGO_SETTINGS_MODULE'] = 'confs.frameworkconfs.settings'
django.setup()
from django.conf import settings
from libs.commonlibs.oase_logger import OaseLogger
from libs.commonlibs.aes_cipher import AESCipher
from web_app.models.Grafana_monitoring_models import GrafanaAdapter
logger = OaseLogger.get_instance() # ロガー初期化
class GrafanaApi(object):
def __init__(self, request_rec):
"""
Grafana API インスタンスを返す
[引数]
request_rec: 対象Grafana監視マスタレコード
"""
self.request_id = 1
self.uri = request_rec.uri
self.auth_token = None
self.username = ''
self.passwd = ''
if request_rec.username and request_rec.password:
req_pw = request_rec.password
cipher = AESCipher(settings.AES_KEY)
decrypted_password = cipher.decrypt(req_pw)
self.passwd = decrypted_password
self.username = request_rec.username
def _request(self, params):
"""
Grafana API にリクエストを送信する
id は現行特に必要ないため単純にインクリメントした数値を代入している。
[引数]
params: Grafana API のメソッドのパラメータ
auth_token: Grafana API の認証トークン
[戻り値]
dict型に変換したレスポンス
"""
data = params
basic_auth = None
headers = {"Content-Type": "application/x-www-form-urlencoded"}
if self.auth_token:
headers['Authorization'] = 'Bearer %s' % (self.auth_token)
elif self.username and self.passwd:
basic_auth = requests.auth.HTTPBasicAuth(self.username, self.passwd)
try:
if data:
response = requests.post(self.uri, data=data, headers=headers, auth=basic_auth)
else:
response = requests.get(self.uri, headers=headers, auth=basic_auth)
self.request_id += 1
except requests.exceptions.ConnectTimeout:
# "リトライについて検討すべき"
logger.system_log('LOSM30026', 'Grafana', 'Grafana Timeout error.')
raise
except requests.exceptions.RequestException:
logger.system_log('LOSM30026', 'Grafana', 'RequestException error.')
raise
if response.status_code != 200:
# 200以外は関知しない 上位で処理
raise Exception('Failed to get response. sts_code=%s, reason=%s' % (response.status_code, getattr(response, 'text', '')))
try:
resp = json.loads(response.text)
except Exception as e:
logger.system_log('LOSM30026', 'Grafana', 'JSON decode error. response=%s' % response)
logger.logic_log('LOSI00005', traceback.format_exc())
raise
return resp
def get_active_triggers(self, last_change_since=None, now=None):
"""
現在発生している障害を全て取得する。
発生しているhostidとhost名も要求する。
[戻り値]
result: 発生中の障害情報
"""
params = {}
try:
response = self._request(params)
except Exception as e:
raise
if self._has_error(response):
logger.system_log('LOSM30026', 'Grafana', 'GetResponse error.')
raise Exception('response error')
return response
def logout(self):
"""
ログアウトを行う。
成功した場合レスポンスの'result'キーの値がTrueになる。
[戻り値]
bool
"""
try:
response = self._request('user.logout', [])
except:
raise
return response
def _has_error(self, response):
if response != None:
return True if 'error' in response else False
else:
return True
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"archive/zip"
"bytes"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"syscall"
"github.com/go-sql-driver/mysql"
)
func main() {
if err := download(); err != nil {
log.Fatalf("download latest version of kolide: %s\n", err)
}
if err := setEnv(); err != nil {
log.Fatalf("setting environment: %s\n", err)
}
if err := execBin(); err != nil {
log.Fatalf("exec kolide binary: %s\n", err)
}
}
func setEnv() error {
dsn := os.Getenv("JAWSDB_URL")
if dsn == "" {
return errors.New("required JAWSDB_URL env variable not found")
}
port := os.Getenv("PORT")
if port == "" {
return errors.New("required env variable PORT not set")
}
redisURL := os.Getenv("REDIS_URL")
if redisURL == "" {
return errors.New("required env variable REDIS_URL not set")
}
rc, err := parseRedisURL(redisURL)
if err != nil {
return fmt.Errorf("parsing redis url %s:", err)
}
cfg, err := parseDSN(dsn)
if err != nil {
return fmt.Errorf("parsing dsn: %s", err)
}
jwtKey, err := randomText(24)
if err != nil {
return fmt.Errorf("generating jwt key: %s", err)
}
os.Setenv("KOLIDE_MYSQL_ADDRESS", cfg.Addr)
os.Setenv("KOLIDE_MYSQL_PASSWORD", cfg.Passwd)
os.Setenv("KOLIDE_MYSQL_USERNAME", cfg.User)
os.Setenv("KOLIDE_MYSQL_DATABASE", cfg.DBName)
os.Setenv("KOLIDE_REDIS_ADDRESS", rc.addr)
os.Setenv("KOLIDE_REDIS_PASSWORD", rc.password)
os.Setenv("KOLIDE_SERVER_ADDRESS", "0.0.0.0:"+port)
os.Setenv("KOLIDE_SERVER_TLS", "false")
os.Setenv("KOLIDE_AUTH_JWT_KEY", jwtKey)
return nil
}
// parseDSN formats the JAWSDB_URL into mysql DSN and calls mysql.ParseDSN.
// in order for mysql.ParseDSN to correctly parse the JAWSDB_URL, the host part
// must be wrapped with `tcp()`
func parseDSN(dsn string) (*mysql.Config, error) {
dsn = strings.TrimPrefix(dsn, "mysql://")
pre := strings.SplitAfter(dsn, "@")
if len(pre) < 2 {
return nil, errors.New("unable to split mysql DSN")
}
pre[0] = pre[0] + "tcp("
pre[1] = strings.Replace(pre[1], "/", ")/", -1)
dsn = strings.Join(pre, "")
cfg, err := mysql.ParseDSN(dsn)
if err != nil {
return nil, fmt.Errorf("parsing jawsdb DSN %s", err)
}
return cfg, nil
}
func execBin() error {
cmd, err := exec.LookPath("bin/fleet")
if err != nil {
return fmt.Errorf("looking up kolide path: %s", err)
}
// run migrations
prepareCmd := exec.Command(cmd, "prepare", "db", "--no-prompt")
_, err = prepareCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("run prepare db %s", err)
}
// exec kolide binary. The first arg to syscall.Exec is the
// path of the kolide binary, and the first elemnt of args[] is
// also the kolide binary.
args := []string{"kolide", "serve"}
if err := syscall.Exec(cmd, args, os.Environ()); err != nil {
return fmt.Errorf("exec binary: %s", err)
}
return nil
}
type redisConn struct {
addr string
password string
}
func parseRedisURL(redisURL string) (*redisConn, error) {
ur, err := url.Parse(redisURL)
if err != nil {
return nil, fmt.Errorf("parsing redis URL %s", err)
}
password, _ := ur.User.Password()
conn := &redisConn{
addr: ur.Host,
password: password,
}
return conn, nil
}
func download() error {
resp, err := http.Get("https://dl.kolide.co/bin/fleet_latest.zip")
if err != nil {
return fmt.Errorf("get latest fleet zip: %s", err)
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("reading response body: %s", err)
}
readerAt := bytes.NewReader(b)
zr, err := zip.NewReader(readerAt, int64(len(b)))
if err != nil {
return fmt.Errorf("create zip reader: %s", err)
}
// create bin/kolide file with the executable flag.
out, err := os.OpenFile("bin/fleet", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return fmt.Errorf("create bin/fleet file: %s", err)
}
defer out.Close()
// extract the linux binary from the zip and copy it to
// bin/kolide
for _, f := range zr.File {
if f.Name != "linux/fleet_linux_amd64" {
continue
}
src, err := f.Open()
if err != nil {
return fmt.Errorf("opening zipped file: %s", err)
}
defer src.Close()
if _, err := io.Copy(out, src); err != nil {
return fmt.Errorf("copying binary from zip: %s", err)
}
}
return nil
}
func randomText(keySize int) (string, error) {
key := make([]byte, keySize)
_, err := rand.Read(key)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(key), nil
}
|
[
"\"JAWSDB_URL\"",
"\"PORT\"",
"\"REDIS_URL\""
] |
[] |
[
"PORT",
"JAWSDB_URL",
"REDIS_URL"
] |
[]
|
["PORT", "JAWSDB_URL", "REDIS_URL"]
|
go
| 3 | 0 | |
src/main.go
|
package main
import (
"flag"
"fmt"
"os"
"strconv"
"sync"
"time"
//_ "github.com/go-sql-driver/mysql"
_ "github.com/mattn/go-sqlite3"
"github.com/cloud-barista/cb-tumblebug/src/core/common"
"github.com/cloud-barista/cb-tumblebug/src/core/mcir"
"github.com/cloud-barista/cb-tumblebug/src/core/mcis"
grpcserver "github.com/cloud-barista/cb-tumblebug/src/api/grpc/server"
restapiserver "github.com/cloud-barista/cb-tumblebug/src/api/rest/server"
"xorm.io/xorm"
"xorm.io/xorm/names"
)
// Main Body
// @title CB-Tumblebug REST API
// @version latest
// @description CB-Tumblebug REST API
// @contact.name API Support
// @contact.url http://cloud-barista.github.io
// @contact.email [email protected]
// @license.name Apache 2.0
// @license.url http://www.apache.org/licenses/LICENSE-2.0.html
// @host localhost:1323
// @BasePath /tumblebug
// @securityDefinitions.basic BasicAuth
func main() {
fmt.Println("")
// giving a default value of "1323"
port := flag.String("port", "1323", "port number for the restapiserver to listen to")
flag.Parse()
// validate arguments from flag
validationFlag := true
// validation: port
// set validationFlag to false if your number is not in [1-65535] range
if portInt, err := strconv.Atoi(*port); err == nil {
if portInt < 1 || portInt > 65535 {
validationFlag = false
}
} else {
validationFlag = false
}
if !validationFlag {
fmt.Printf("%s is not a valid port number.\n", *port)
fmt.Printf("Please retry with a valid port number (ex: -port=[1-65535]).\n")
os.Exit(1)
}
common.SpiderRestUrl = common.NVL(os.Getenv("SPIDER_REST_URL"), "http://localhost:1024/spider")
common.DragonflyRestUrl = common.NVL(os.Getenv("DRAGONFLY_REST_URL"), "http://localhost:9090/dragonfly")
common.DBUrl = common.NVL(os.Getenv("DB_URL"), "localhost:3306")
common.DBDatabase = common.NVL(os.Getenv("DB_DATABASE"), "cb_tumblebug")
common.DBUser = common.NVL(os.Getenv("DB_USER"), "cb_tumblebug")
common.DBPassword = common.NVL(os.Getenv("DB_PASSWORD"), "cb_tumblebug")
common.AutocontrolDurationMs = common.NVL(os.Getenv("AUTOCONTROL_DURATION_MS"), "10000")
// load the latest configuration from DB (if exist)
fmt.Println("")
fmt.Println("[Update system environment]")
common.UpdateGlobalVariable(common.StrDragonflyRestUrl)
common.UpdateGlobalVariable(common.StrSpiderRestUrl)
common.UpdateGlobalVariable(common.StrAutocontrolDurationMs)
// load config
//Setup database (meta_db/dat/cbtumblebug.s3db)
fmt.Println("")
fmt.Println("[Setup SQL Database]")
err := os.MkdirAll("../meta_db/dat/", os.ModePerm)
if err != nil {
fmt.Println(err.Error())
}
common.ORM, err = xorm.NewEngine("sqlite3", "../meta_db/dat/cbtumblebug.s3db")
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Database access info set successfully")
}
common.ORM.SetTableMapper(names.SameMapper{})
common.ORM.SetColumnMapper(names.SameMapper{})
// "CREATE Table IF NOT EXISTS spec(...)"
err = common.ORM.Sync2(new(mcir.TbSpecInfo))
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Table spec set successfully..")
}
// "CREATE Table IF NOT EXISTS image(...)"
err = common.ORM.Sync2(new(mcir.TbImageInfo))
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Table image set successfully..")
}
//Ticker for MCIS Orchestration Policy
fmt.Println("")
fmt.Println("[Initiate Multi-Cloud Orchestration]")
autoControlDuration, _ := strconv.Atoi(common.AutocontrolDurationMs) //ms
ticker := time.NewTicker(time.Millisecond * time.Duration(autoControlDuration))
go func() {
for t := range ticker.C {
//display ticker if you need (remove '_ = t')
_ = t
mcis.OrchestrationController()
}
}()
defer ticker.Stop()
// Launch API servers (REST and gRPC)
wg := new(sync.WaitGroup)
wg.Add(2)
// Start REST Server
go func() {
restapiserver.ApiServer(*port)
wg.Done()
}()
// Start gRPC Server
go func() {
grpcserver.RunServer()
wg.Done()
}()
wg.Wait()
}
|
[
"\"SPIDER_REST_URL\"",
"\"DRAGONFLY_REST_URL\"",
"\"DB_URL\"",
"\"DB_DATABASE\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"AUTOCONTROL_DURATION_MS\""
] |
[] |
[
"DB_PASSWORD",
"AUTOCONTROL_DURATION_MS",
"DB_DATABASE",
"DRAGONFLY_REST_URL",
"DB_USER",
"DB_URL",
"SPIDER_REST_URL"
] |
[]
|
["DB_PASSWORD", "AUTOCONTROL_DURATION_MS", "DB_DATABASE", "DRAGONFLY_REST_URL", "DB_USER", "DB_URL", "SPIDER_REST_URL"]
|
go
| 7 | 0 | |
Godeps/_workspace/src/github.com/IBM-Bluemix/cf_deployment_tracker_client_go/cf_deployment_tracker.go
|
package cf_deployment_tracker
import (
"encoding/json"
"fmt"
"github.com/parnurzeal/gorequest"
"io/ioutil"
"os"
"time"
)
var deploymentTrackerURL = "https://deployment-tracker.mybluemix.net/api/v1/track"
type Repository struct {
Url string
}
type Package struct {
Name string
Version string
Repository Repository
}
type Event struct {
DateSent string `json:"date_sent"`
CodeVersion string `json:"code_version"`
RepositoryURL string `json:"repository_url"`
ApplicationName string `json:"application_name"`
SpaceID string `json:"space_id"`
ApplicationVersion string `json:"application_version"`
ApplicatonURIs []string `json:"application_uris"`
}
func Track() (errs []error) {
content, err := ioutil.ReadFile("package.json")
//exit early if we cant read the file
if err != nil {
return
}
var info Package
err = json.Unmarshal(content, &info)
//exit early if we can't parse the file
if err != nil {
return
}
vcapApplication := os.Getenv("VCAP_APPLICATION")
var event Event
err = json.Unmarshal([]byte(vcapApplication), &event)
//exit early if we are not running in CF
if err != nil {
return
}
if info.Repository.Url != "" {
event.RepositoryURL = info.Repository.Url
}
if info.Name != "" {
event.ApplicationName = info.Name
}
if info.Version != "" {
event.CodeVersion = info.Version
}
now := time.Now()
event.DateSent = now.UTC().Format("2006-01-02T15:04:05.999Z")
request := gorequest.New()
resp, _, errs := request.Post(deploymentTrackerURL).
Send(event).
End()
if errs != nil {
return errs
}
fmt.Println(resp.Status)
return nil
}
|
[
"\"VCAP_APPLICATION\""
] |
[] |
[
"VCAP_APPLICATION"
] |
[]
|
["VCAP_APPLICATION"]
|
go
| 1 | 0 | |
kedro/framework/session/session.py
|
# pylint: disable=invalid-name,global-statement
"""This module implements Kedro session responsible for project lifecycle."""
import getpass
import logging
import logging.config
import os
import subprocess
import traceback
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Union
import click
from kedro import __version__ as kedro_version
from kedro.framework.context import KedroContext
from kedro.framework.context.context import (
KedroContextError,
_convert_paths_to_absolute_posix,
)
from kedro.framework.hooks import get_hook_manager
from kedro.framework.project import (
configure_project,
pipelines,
settings,
validate_settings,
)
from kedro.framework.session.store import BaseSessionStore
from kedro.io.core import generate_timestamp
from kedro.runner import AbstractRunner, SequentialRunner
_active_session = None
def get_current_session(silent: bool = False) -> Optional["KedroSession"]:
"""Fetch the active ``KedroSession`` instance.
Args:
silent: Indicates to suppress the error if no active session was found.
Raises:
RuntimeError: If no active session was found and `silent` is False.
Returns:
KedroSession instance.
"""
if not _active_session and not silent:
raise RuntimeError("There is no active Kedro session.")
return _active_session
def _activate_session(session: "KedroSession", force: bool = False) -> None:
global _active_session
if _active_session and not force and session is not _active_session:
raise RuntimeError(
"Cannot activate the session as another active session already exists."
)
_active_session = session
def _deactivate_session() -> None:
global _active_session
_active_session = None
def _describe_git(project_path: Path) -> Dict[str, Dict[str, str]]:
project_path = str(project_path)
try:
res = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], cwd=project_path
)
# `subprocess.check_output()` raises `NotADirectoryError` on Windows
except (subprocess.CalledProcessError, FileNotFoundError, NotADirectoryError):
logging.getLogger(__name__).warning("Unable to git describe %s", project_path)
return {}
git_data = {"commit_sha": res.decode().strip()}
res = subprocess.check_output(["git", "status", "--short"], cwd=project_path)
git_data["dirty"] = bool(res.decode().strip())
return {"git": git_data}
def _jsonify_cli_context(ctx: click.core.Context) -> Dict[str, Any]:
return {
"args": ctx.args,
"params": ctx.params,
"command_name": ctx.command.name,
"command_path": ctx.command_path,
}
class KedroSession:
"""``KedroSession`` is the object that is responsible for managing the lifecycle
of a Kedro run.
- Use `KedroSession.create("<your-kedro-project-package-name>")` as
a context manager to construct a new KedroSession with session data
provided (see the example below).
- Use `KedroSession(session_id=<id>)` to instantiate an existing session with a given
ID.
Example:
::
>>> from kedro.framework.session import KedroSession
>>>
>>> with KedroSession.create("<your-kedro-project-package-name>") as session:
>>> session.run()
>>>
"""
def __init__(
self,
session_id: str,
package_name: str = None,
project_path: Union[Path, str] = None,
save_on_close: bool = False,
):
self._project_path = Path(project_path or Path.cwd()).resolve()
self.session_id = session_id
self.save_on_close = save_on_close
self._package_name = package_name
self._store = self._init_store()
@classmethod
def create( # pylint: disable=too-many-arguments
cls,
package_name: str = None,
project_path: Union[Path, str] = None,
save_on_close: bool = True,
env: str = None,
extra_params: Dict[str, Any] = None,
) -> "KedroSession":
"""Create a new instance of ``KedroSession`` with the session data.
Args:
package_name: Package name for the Kedro project the session is
created for.
project_path: Path to the project root directory. Default is
current working directory Path.cwd().
save_on_close: Whether or not to save the session when it's closed.
env: Environment for the KedroContext.
extra_params: Optional dictionary containing extra project parameters
for underlying KedroContext. If specified, will update (and therefore
take precedence over) the parameters retrieved from the project
configuration.
Returns:
A new ``KedroSession`` instance.
"""
# This is to make sure that for workflows that manually create session
# without going through one of our known entrypoints, e.g. some plugins
# like kedro-airflow, the project is still properly configured. This
# is for backward compatibility and should be removed in 0.18.
if package_name is not None:
configure_project(package_name)
validate_settings()
session = cls(
package_name=package_name,
project_path=project_path,
session_id=generate_timestamp(),
save_on_close=save_on_close,
)
# have to explicitly type session_data otherwise mypy will complain
# possibly related to this: https://github.com/python/mypy/issues/1430
session_data: Dict[str, Any] = {
"package_name": session._package_name,
"project_path": session._project_path,
"session_id": session.session_id,
**_describe_git(session._project_path),
}
ctx = click.get_current_context(silent=True)
if ctx:
session_data["cli"] = _jsonify_cli_context(ctx)
env = env or os.getenv("KEDRO_ENV")
if env:
session_data["env"] = env
if extra_params:
session_data["extra_params"] = extra_params
session_data["username"] = getpass.getuser()
session._store.update(session_data)
# we need a ConfigLoader registered in order to be able to set up logging
session._setup_logging()
return session
def _get_logging_config(self) -> Dict[str, Any]:
context = self.load_context()
conf_logging = context.config_loader.get(
"logging*", "logging*/**", "**/logging*"
)
# turn relative paths in logging config into absolute path
# before initialising loggers
conf_logging = _convert_paths_to_absolute_posix(
project_path=self._project_path, conf_dictionary=conf_logging
)
return conf_logging
def _setup_logging(self) -> None:
"""Register logging specified in logging directory."""
conf_logging = self._get_logging_config()
logging.config.dictConfig(conf_logging)
def _init_store(self) -> BaseSessionStore:
store_class = settings.SESSION_STORE_CLASS
classpath = f"{store_class.__module__}.{store_class.__qualname__}"
store_args = deepcopy(settings.SESSION_STORE_ARGS)
store_args.setdefault("path", (self._project_path / "sessions").as_posix())
store_args["session_id"] = self.session_id
try:
return store_class(**store_args)
except TypeError as err:
raise ValueError(
f"\n{err}.\nStore config must only contain arguments valid "
f"for the constructor of `{classpath}`."
) from err
except Exception as err:
raise ValueError(
f"\n{err}.\nFailed to instantiate session store of type `{classpath}`."
) from err
def _log_exception(self, exc_type, exc_value, exc_tb):
type_ = [] if exc_type.__module__ == "builtins" else [exc_type.__module__]
type_.append(exc_type.__qualname__)
exc_data = {
"type": ".".join(type_),
"value": str(exc_value),
"traceback": traceback.format_tb(exc_tb),
}
self._store["exception"] = exc_data
@property
def _logger(self) -> logging.Logger:
return logging.getLogger(__name__)
@property
def store(self) -> Dict[str, Any]:
"""Return a copy of internal store."""
return dict(self._store)
def load_context(self) -> KedroContext:
"""An instance of the project context."""
env = self.store.get("env")
extra_params = self.store.get("extra_params")
context_class = settings.CONTEXT_CLASS
context = context_class(
package_name=self._package_name,
project_path=self._project_path,
env=env,
extra_params=extra_params,
)
return context
def close(self):
"""Close the current session and save its store to disk
if `save_on_close` attribute is True.
"""
if self.save_on_close:
self._store.save()
if get_current_session(silent=True) is self:
_deactivate_session()
def __enter__(self):
if get_current_session(silent=True) is not self:
_activate_session(self)
return self
def __exit__(self, exc_type, exc_value, tb_):
if exc_type:
self._log_exception(exc_type, exc_value, tb_)
self.close()
def run( # pylint: disable=too-many-arguments,too-many-locals
self,
pipeline_name: str = None,
tags: Iterable[str] = None,
runner: AbstractRunner = None,
node_names: Iterable[str] = None,
from_nodes: Iterable[str] = None,
to_nodes: Iterable[str] = None,
from_inputs: Iterable[str] = None,
to_outputs: Iterable[str] = None,
load_versions: Dict[str, str] = None,
) -> Dict[str, Any]:
"""Runs the pipeline with a specified runner.
Args:
pipeline_name: Name of the pipeline that is being run.
tags: An optional list of node tags which should be used to
filter the nodes of the ``Pipeline``. If specified, only the nodes
containing *any* of these tags will be run.
runner: An optional parameter specifying the runner that you want to run
the pipeline with.
node_names: An optional list of node names which should be used to
filter the nodes of the ``Pipeline``. If specified, only the nodes
with these names will be run.
from_nodes: An optional list of node names which should be used as a
starting point of the new ``Pipeline``.
to_nodes: An optional list of node names which should be used as an
end point of the new ``Pipeline``.
from_inputs: An optional list of input datasets which should be
used as a starting point of the new ``Pipeline``.
to_outputs: An optional list of output datasets which should be
used as an end point of the new ``Pipeline``.
load_versions: An optional flag to specify a particular dataset
version timestamp to load.
Raises:
KedroContextError: If the named or `__default__` pipeline is not
defined by `register_pipelines`.
Exception: Any uncaught exception during the run will be re-raised
after being passed to ``on_pipeline_error`` hook.
Returns:
Any node outputs that cannot be processed by the ``DataCatalog``.
These are returned in a dictionary, where the keys are defined
by the node outputs.
"""
# pylint: disable=protected-access,no-member
# Report project name
self._logger.info("** Kedro project %s", self._project_path.name)
save_version = run_id = self.store["session_id"]
extra_params = self.store.get("extra_params") or {}
context = self.load_context()
name = pipeline_name or "__default__"
try:
pipeline = pipelines[name]
except KeyError as exc:
raise KedroContextError(
f"Failed to find the pipeline named '{name}'. "
f"It needs to be generated and returned "
f"by the 'register_pipelines' function."
) from exc
filtered_pipeline = context._filter_pipeline(
pipeline=pipeline,
tags=tags,
from_nodes=from_nodes,
to_nodes=to_nodes,
node_names=node_names,
from_inputs=from_inputs,
to_outputs=to_outputs,
)
record_data = {
"run_id": run_id,
"project_path": self._project_path.as_posix(),
"env": context.env,
"kedro_version": kedro_version,
"tags": tags,
"from_nodes": from_nodes,
"to_nodes": to_nodes,
"node_names": node_names,
"from_inputs": from_inputs,
"to_outputs": to_outputs,
"load_versions": load_versions,
"extra_params": extra_params,
"pipeline_name": pipeline_name,
}
catalog = context._get_catalog(
save_version=save_version, load_versions=load_versions
)
# Run the runner
runner = runner or SequentialRunner()
hook_manager = get_hook_manager()
hook_manager.hook.before_pipeline_run( # pylint: disable=no-member
run_params=record_data, pipeline=filtered_pipeline, catalog=catalog
)
try:
run_result = runner.run(filtered_pipeline, catalog, run_id)
except Exception as error:
hook_manager.hook.on_pipeline_error(
error=error,
run_params=record_data,
pipeline=filtered_pipeline,
catalog=catalog,
)
raise
hook_manager.hook.after_pipeline_run(
run_params=record_data,
run_result=run_result,
pipeline=filtered_pipeline,
catalog=catalog,
)
return run_result
|
[] |
[] |
[
"KEDRO_ENV"
] |
[]
|
["KEDRO_ENV"]
|
python
| 1 | 0 | |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a AVC test script.
Individual AVC test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave AVCds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop AVCds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing AVCd/AVC-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: AVCds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a AVCd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple AVCds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a AVCd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple AVCd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'AVCd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "AVCd should have exited with an error"
else:
assert_msg = "AVCd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as AVCd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "AVCd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some AVCd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "AVCd"),
help="AVCd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "AVCd"),
help="AVCd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
[] |
[] |
[
"PATH",
"BITCOIND"
] |
[]
|
["PATH", "BITCOIND"]
|
python
| 2 | 0 | |
lib/kb_jorg/kb_jorgImpl.py
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import os
import json
from kb_jorg.Utils.JorgUtil import JorgUtil
#END_HEADER
class kb_jorg:
'''
Module Name:
kb_jorg
Module Description:
A KBase module: kb_jorg
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "1.0.0"
GIT_URL = "https://github.com/jungbluth/kb_jorg"
GIT_COMMIT_HASH = "a0b04ac1561ebbed05152b2a9b4fdf7d8c019940"
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.config = config
self.config['SDK_CALLBACK_URL'] = os.environ['SDK_CALLBACK_URL']
self.config['KB_AUTH_TOKEN'] = os.environ['KB_AUTH_TOKEN']
#END_CONSTRUCTOR
pass
def run_kb_jorg(self, ctx, params):
"""
:param params: instance of type "JorgInputParams" (required
params: assembly_ref: Genome assembly object reference
binned_contig_name: BinnedContig object name and output file
header workspace_name: the name of the workspace it gets saved to.
reads_list: list of reads object
(PairedEndLibrary/SingleEndLibrary) upon which CONCOCT will be run
optional params: thread: number of threads; default 1
min_contig_length: minimum contig length; default 1000
contig_split_length: length to split long contigs; default 10000
ref: https://github.com/BinPro/CONCOCT) -> structure: parameter
"assembly_ref" of type "obj_ref" (An X/Y/Z style reference),
parameter "binned_contig_name" of String, parameter
"workspace_name" of String, parameter "reads_list" of list of type
"obj_ref" (An X/Y/Z style reference), parameter "thread" of Long,
parameter "min_contig_length" of Long, parameter
"contig_split_length" of Long
:returns: instance of type "JorgResult" (result_folder: folder
path that holds all files generated by run_kb_jorg report_name:
report name generated by KBaseReport report_ref: report reference
generated by KBaseReport) -> structure: parameter
"result_directory" of String, parameter "binned_contig_obj_ref" of
type "obj_ref" (An X/Y/Z style reference), parameter "report_name"
of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN run_kb_jorg
print('--->\nRunning kb_jorg.kb_jorg\nparams:')
print(json.dumps(params, indent=1))
for key, value in params.items():
if isinstance(value, str):
params[key] = value.strip()
jorg_runner = JorgUtil(self.config)
returnVal = jorg_runner.run_jorg(params)
#END run_kb_jorg
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method run_kb_jorg return value ' +
'returnVal is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
|
[] |
[] |
[
"SDK_CALLBACK_URL",
"KB_AUTH_TOKEN"
] |
[]
|
["SDK_CALLBACK_URL", "KB_AUTH_TOKEN"]
|
python
| 2 | 0 | |
localtest/main.go
|
package main
import (
"fmt"
"log"
"github.com/mhewedy/ews"
)
func main() {
c := ews.NewClient(
"https://outlook.office365.com/EWS/Exchange.asmx",
"[email protected]",
"examplepassword",
&ews.Config{Dump: true, NTLM: false},
)
folders, err := ews.FindFolders(c, "")
if err != nil {
log.Fatal("err>: ", err.Error())
}
for _, folder := range folders {
fmt.Printf("%s %s\n", folder.DisplayName, folder.FolderId.Id)
}
fmt.Println("--- success ---")
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.