filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
cmd/main.go | package main
import (
"context"
"database/sql"
"log"
"net"
"net/http"
"os"
"github.com/Asymmetriq/url_shortener/internal/service"
"github.com/Asymmetriq/url_shortener/internal/storage"
"github.com/Asymmetriq/url_shortener/pkg/api"
gw "github.com/Asymmetriq/url_shortener/pkg/pb/api"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
_ "github.com/lib/pq"
"google.golang.org/grpc"
)
func main() {
dsn := os.Getenv("DATABASE_URL")
db, err := sql.Open("postgres", dsn)
if err != nil {
log.Fatal(err)
}
storageDB := &storage.StorageDB{DB: db}
// server
srv := &service.Service{
Storage: storageDB,
}
// GRPC
listener, err := net.Listen("tcp", ":5000")
if err != nil {
log.Fatal(err)
}
grpcOpts := []grpc.ServerOption{}
grpcServer := grpc.NewServer(grpcOpts...)
api.RegisterServiceServer(grpcServer, srv)
log.Println("gRPC server listening on :5000")
go grpcServer.Serve(listener)
// HTTP
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Register gRPC server endpoint
// Note: Make sure the gRPC server is running properly and accessible
gwmux := runtime.NewServeMux()
err = gw.RegisterServiceHandlerServer(ctx, gwmux, srv)
if err != nil {
log.Fatal(err)
}
// Create a gRPC Gateway server
httpServer := &http.Server{
Addr: ":8000",
Handler: gwmux,
}
log.Println("gRPC server listening on :8000")
httpServer.ListenAndServe()
}
| [
"\"DATABASE_URL\""
] | [] | [
"DATABASE_URL"
] | [] | ["DATABASE_URL"] | go | 1 | 0 | |
editregions/admin/forms.py | # -*- coding: utf-8 -*-
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import F
from django.forms import Form, Media
try:
from django.forms.utils import ErrorList
except ImportError: # < Django 1.7 ... pragma: no cover
from django.forms.util import ErrorList
from django.forms.fields import IntegerField, CharField
try:
from django.utils.encoding import force_text
except ImportError: # pragma: no cover ... < Django 1.5
from django.utils.encoding import force_unicode as force_text
from editregions.utils.data import attach_configuration, get_configuration
from editregions.models import EditRegionChunk, EditRegionConfiguration
from editregions.admin.utils import shared_media
from editregions.utils.regions import validate_region_name
logger = logging.getLogger(__name__)
class EditRegionInlineForm(object):
"""
Used by EditRegionInlineFormSet
"""
media = Media()
class EditRegionInlineFormSet(object):
"""
A minimal representation of a FormSet as called by the Django inline
admin code. The most importand bit is our media definition, everything else
is literally just to appear correct.
"""
initial_forms = []
extra_forms = []
media = shared_media
empty_form = EditRegionInlineForm()
errors = {}
# used for constructing change messages
new_objects = []
changed_objects = []
deleted_objects = []
@classmethod
def get_default_prefix(cls):
return 'edit_region_chunk_formset'
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def get_queryset(self, *args, **kwargs):
return self.kwargs['queryset']
def is_valid(self, *args, **kwargs):
return True
def save(self, *args, **kwargs):
return True
def non_form_errors(self):
return ErrorList()
class MovementForm(Form):
"""
Move a chunk from one place to another.
"""
pk = IntegerField(min_value=1)
position = IntegerField(min_value=0)
#: if region is set, then we're probably in an inline'd changelist, and
#: we may be wanting to move region ...
region = CharField(required=False, validators=[validate_region_name])
def __init__(self, *args, **kwargs):
super(MovementForm, self).__init__(*args, **kwargs)
self.fields['pk'].max_value = self.get_model().objects.all().count()
def get_model(self):
return EditRegionChunk
def clean(self):
cd = super(MovementForm, self).clean()
pk = cd.get('pk', None)
model = self.get_model()
try:
if pk is None:
raise model.DoesNotExist("Don't even bother querying")
cd['pk'] = model.objects.get(pk=pk)
except model.DoesNotExist as e:
cd['pk'] = None
name = force_text(self.get_model()._meta.verbose_name)
msg = '{0} does not exist'.format(name)
self._errors['pk'] = msg
# rather than raise an error for an invalid region, just set it
# back to whatever the region says it should be. Trust no-one.
if 'region' in cd and cd['pk'] is not None:
attach_configuration(cd['pk'].content_object,
EditRegionConfiguration)
erc = get_configuration(cd['pk'].content_object)
if cd['region'] not in erc.config:
msg = '{0} is not a valid region'.format(cd['region'])
self._errors['region'] = msg
return cd
def save(self):
"""
Updates the current object, and all other objects in the same region.
"""
obj = self.cleaned_data['pk']
old_position = obj.position
old_region = obj.region
new_position = max(self.cleaned_data['position'], 0)
new_region = self.cleaned_data.get('region', old_region)
return self.get_model().objects.move(
obj=obj, from_position=old_position, to_position=new_position,
from_region=old_region, to_region=new_region)
def change_message(self):
obj = self.cleaned_data['pk']
msg = 'Moved to position {obj.position} in region "{obj.region}"'.format(obj=obj)
logger.info(msg)
return obj, msg
def parent_change_message(self):
obj = self.cleaned_data['pk']
msg = 'Moved {vname} (pk: {obj.pk}) to position {obj.position} in ' \
'region "{obj.region}"'.format(vname=force_text(obj._meta.verbose_name),
obj=obj)
logger.info(msg)
return obj.content_object, msg
| [] | [] | [] | [] | [] | python | null | null | null |
py38-win/Scripts/waf3-2.0.22-fee054444594956e012048d7248f6b4b/waflib/Utils.py | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from __future__ import with_statement
import atexit,os,sys,errno,inspect,re,datetime,platform,base64,signal,functools,time,shlex
try:
import cPickle
except ImportError:
import pickle as cPickle
if os.name=='posix'and sys.version_info[0]<3:
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
else:
import subprocess
try:
TimeoutExpired=subprocess.TimeoutExpired
except AttributeError:
class TimeoutExpired(Exception):
pass
from collections import deque,defaultdict
try:
import _winreg as winreg
except ImportError:
try:
import winreg
except ImportError:
winreg=None
from waflib import Errors
try:
from hashlib import md5
except ImportError:
try:
from hashlib import sha1 as md5
except ImportError:
pass
else:
try:
md5().digest()
except ValueError:
from hashlib import sha1 as md5
try:
import threading
except ImportError:
if not'JOBS'in os.environ:
os.environ['JOBS']='1'
class threading(object):
pass
class Lock(object):
def acquire(self):
pass
def release(self):
pass
threading.Lock=threading.Thread=Lock
SIG_NIL='SIG_NIL_SIG_NIL_'.encode()
O644=420
O755=493
rot_chr=['\\','|','/','-']
rot_idx=0
class ordered_iter_dict(dict):
def __init__(self,*k,**kw):
self.lst=deque()
dict.__init__(self,*k,**kw)
def clear(self):
dict.clear(self)
self.lst=deque()
def __setitem__(self,key,value):
if key in dict.keys(self):
self.lst.remove(key)
dict.__setitem__(self,key,value)
self.lst.append(key)
def __delitem__(self,key):
dict.__delitem__(self,key)
try:
self.lst.remove(key)
except ValueError:
pass
def __iter__(self):
return reversed(self.lst)
def keys(self):
return reversed(self.lst)
class lru_node(object):
__slots__=('next','prev','key','val')
def __init__(self):
self.next=self
self.prev=self
self.key=None
self.val=None
class lru_cache(object):
__slots__=('maxlen','table','head')
def __init__(self,maxlen=100):
self.maxlen=maxlen
self.table={}
self.head=lru_node()
self.head.next=self.head
self.head.prev=self.head
def __getitem__(self,key):
node=self.table[key]
if node is self.head:
return node.val
node.prev.next=node.next
node.next.prev=node.prev
node.next=self.head.next
node.prev=self.head
self.head=node.next.prev=node.prev.next=node
return node.val
def __setitem__(self,key,val):
if key in self.table:
node=self.table[key]
node.val=val
self.__getitem__(key)
else:
if len(self.table)<self.maxlen:
node=lru_node()
node.prev=self.head
node.next=self.head.next
node.prev.next=node.next.prev=node
else:
node=self.head=self.head.next
try:
del self.table[node.key]
except KeyError:
pass
node.key=key
node.val=val
self.table[key]=node
class lazy_generator(object):
def __init__(self,fun,params):
self.fun=fun
self.params=params
def __iter__(self):
return self
def __next__(self):
try:
it=self.it
except AttributeError:
it=self.it=self.fun(*self.params)
return next(it)
next=__next__
is_win32=os.sep=='\\'or sys.platform=='win32'or os.name=='nt'
def readf(fname,m='r',encoding='latin-1'):
if sys.hexversion>0x3000000 and not'b'in m:
m+='b'
with open(fname,m)as f:
txt=f.read()
if encoding:
txt=txt.decode(encoding)
else:
txt=txt.decode()
else:
with open(fname,m)as f:
txt=f.read()
return txt
def writef(fname,data,m='w',encoding='latin-1'):
if sys.hexversion>0x3000000 and not'b'in m:
data=data.encode(encoding)
m+='b'
with open(fname,m)as f:
f.write(data)
def h_file(fname):
m=md5()
with open(fname,'rb')as f:
while fname:
fname=f.read(200000)
m.update(fname)
return m.digest()
def readf_win32(f,m='r',encoding='latin-1'):
flags=os.O_NOINHERIT|os.O_RDONLY
if'b'in m:
flags|=os.O_BINARY
if'+'in m:
flags|=os.O_RDWR
try:
fd=os.open(f,flags)
except OSError:
raise IOError('Cannot read from %r'%f)
if sys.hexversion>0x3000000 and not'b'in m:
m+='b'
with os.fdopen(fd,m)as f:
txt=f.read()
if encoding:
txt=txt.decode(encoding)
else:
txt=txt.decode()
else:
with os.fdopen(fd,m)as f:
txt=f.read()
return txt
def writef_win32(f,data,m='w',encoding='latin-1'):
if sys.hexversion>0x3000000 and not'b'in m:
data=data.encode(encoding)
m+='b'
flags=os.O_CREAT|os.O_TRUNC|os.O_WRONLY|os.O_NOINHERIT
if'b'in m:
flags|=os.O_BINARY
if'+'in m:
flags|=os.O_RDWR
try:
fd=os.open(f,flags)
except OSError:
raise OSError('Cannot write to %r'%f)
with os.fdopen(fd,m)as f:
f.write(data)
def h_file_win32(fname):
try:
fd=os.open(fname,os.O_BINARY|os.O_RDONLY|os.O_NOINHERIT)
except OSError:
raise OSError('Cannot read from %r'%fname)
m=md5()
with os.fdopen(fd,'rb')as f:
while fname:
fname=f.read(200000)
m.update(fname)
return m.digest()
readf_unix=readf
writef_unix=writef
h_file_unix=h_file
if hasattr(os,'O_NOINHERIT')and sys.hexversion<0x3040000:
readf=readf_win32
writef=writef_win32
h_file=h_file_win32
try:
x=''.encode('hex')
except LookupError:
import binascii
def to_hex(s):
ret=binascii.hexlify(s)
if not isinstance(ret,str):
ret=ret.decode('utf-8')
return ret
else:
def to_hex(s):
return s.encode('hex')
to_hex.__doc__="""
Return the hexadecimal representation of a string
:param s: string to convert
:type s: string
"""
def listdir_win32(s):
if not s:
try:
import ctypes
except ImportError:
return[x+':\\'for x in'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
else:
dlen=4
maxdrives=26
buf=ctypes.create_string_buffer(maxdrives*dlen)
ndrives=ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen,ctypes.byref(buf))
return[str(buf.raw[4*i:4*i+2].decode('ascii'))for i in range(int(ndrives/dlen))]
if len(s)==2 and s[1]==":":
s+=os.sep
if not os.path.isdir(s):
e=OSError('%s is not a directory'%s)
e.errno=errno.ENOENT
raise e
return os.listdir(s)
listdir=os.listdir
if is_win32:
listdir=listdir_win32
def num2ver(ver):
if isinstance(ver,str):
ver=tuple(ver.split('.'))
if isinstance(ver,tuple):
ret=0
for i in range(4):
if i<len(ver):
ret+=256**(3-i)*int(ver[i])
return ret
return ver
def to_list(val):
if isinstance(val,str):
return val.split()
else:
return val
def console_encoding():
try:
import ctypes
except ImportError:
pass
else:
try:
codepage=ctypes.windll.kernel32.GetConsoleCP()
except AttributeError:
pass
else:
if codepage:
return'cp%d'%codepage
return sys.stdout.encoding or('cp1252'if is_win32 else'latin-1')
def split_path_unix(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret=path.split('/')[2:]
ret[0]='/'+ret[0]
return ret
return path.split('/')
re_sp=re.compile('[/\\\\]+')
def split_path_win32(path):
if path.startswith('\\\\'):
ret=re_sp.split(path)[1:]
ret[0]='\\\\'+ret[0]
if ret[0]=='\\\\?':
return ret[1:]
return ret
return re_sp.split(path)
msysroot=None
def split_path_msys(path):
if path.startswith(('/','\\'))and not path.startswith(('//','\\\\')):
global msysroot
if not msysroot:
msysroot=subprocess.check_output(['cygpath','-w','/']).decode(sys.stdout.encoding or'latin-1')
msysroot=msysroot.strip()
path=os.path.normpath(msysroot+os.sep+path)
return split_path_win32(path)
if sys.platform=='cygwin':
split_path=split_path_cygwin
elif is_win32:
if os.environ.get('MSYSTEM')and sys.executable.startswith('/'):
split_path=split_path_msys
else:
split_path=split_path_win32
else:
split_path=split_path_unix
split_path.__doc__="""
Splits a path by / or \\; do not confuse this function with with ``os.path.split``
:type path: string
:param path: path to split
:return: list of string
"""
def check_dir(path):
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
if not os.path.isdir(path):
raise Errors.WafError('Cannot create the folder %r'%path,ex=e)
def check_exe(name,env=None):
if not name:
raise ValueError('Cannot execute an empty string!')
def is_exe(fpath):
return os.path.isfile(fpath)and os.access(fpath,os.X_OK)
fpath,fname=os.path.split(name)
if fpath and is_exe(name):
return os.path.abspath(name)
else:
env=env or os.environ
for path in env['PATH'].split(os.pathsep):
path=path.strip('"')
exe_file=os.path.join(path,name)
if is_exe(exe_file):
return os.path.abspath(exe_file)
return None
def def_attrs(cls,**kw):
for k,v in kw.items():
if not hasattr(cls,k):
setattr(cls,k,v)
def quote_define_name(s):
fu=re.sub('[^a-zA-Z0-9]','_',s)
fu=re.sub('_+','_',fu)
fu=fu.upper()
return fu
try:
shell_quote=shlex.quote
except AttributeError:
import pipes
shell_quote=pipes.quote
def shell_escape(cmd):
if isinstance(cmd,str):
return cmd
return' '.join(shell_quote(x)for x in cmd)
def h_list(lst):
return md5(repr(lst).encode()).digest()
if sys.hexversion<0x3000000:
def h_list_python2(lst):
return md5(repr(lst)).digest()
h_list_python2.__doc__=h_list.__doc__
h_list=h_list_python2
def h_fun(fun):
try:
return fun.code
except AttributeError:
if isinstance(fun,functools.partial):
code=list(fun.args)
code.extend(sorted(fun.keywords.items()))
code.append(h_fun(fun.func))
fun.code=h_list(code)
return fun.code
try:
h=inspect.getsource(fun)
except EnvironmentError:
h='nocode'
try:
fun.code=h
except AttributeError:
pass
return h
def h_cmd(ins):
if isinstance(ins,str):
ret=ins
elif isinstance(ins,list)or isinstance(ins,tuple):
ret=str([h_cmd(x)for x in ins])
else:
ret=str(h_fun(ins))
if sys.hexversion>0x3000000:
ret=ret.encode('latin-1','xmlcharrefreplace')
return ret
reg_subst=re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr,params):
def repl_var(m):
if m.group(1):
return'\\'
if m.group(2):
return'$'
try:
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var,expr)
def destos_to_binfmt(key):
if key=='darwin':
return'mac-o'
elif key in('win32','cygwin','uwin','msys'):
return'pe'
return'elf'
def unversioned_sys_platform():
s=sys.platform
if s.startswith('java'):
from java.lang import System
s=System.getProperty('os.name')
if s=='Mac OS X':
return'darwin'
elif s.startswith('Windows '):
return'win32'
elif s=='OS/2':
return'os2'
elif s=='HP-UX':
return'hp-ux'
elif s in('SunOS','Solaris'):
return'sunos'
else:s=s.lower()
if s=='powerpc':
return'darwin'
if s=='win32'or s=='os2':
return s
if s=='cli'and os.name=='nt':
return'win32'
return re.split(r'\d+$',s)[0]
def nada(*k,**kw):
pass
class Timer(object):
def __init__(self):
self.start_time=self.now()
def __str__(self):
delta=self.now()-self.start_time
if not isinstance(delta,datetime.timedelta):
delta=datetime.timedelta(seconds=delta)
days=delta.days
hours,rem=divmod(delta.seconds,3600)
minutes,seconds=divmod(rem,60)
seconds+=delta.microseconds*1e-6
result=''
if days:
result+='%dd'%days
if days or hours:
result+='%dh'%hours
if days or hours or minutes:
result+='%dm'%minutes
return'%s%.3fs'%(result,seconds)
def now(self):
return datetime.datetime.utcnow()
if hasattr(time,'perf_counter'):
def now(self):
return time.perf_counter()
def read_la_file(path):
sp=re.compile(r'^([^=]+)=\'(.*)\'$')
dc={}
for line in readf(path).splitlines():
try:
_,left,right,_=sp.split(line.strip())
dc[left]=right
except ValueError:
pass
return dc
def run_once(fun):
cache={}
def wrap(*k):
try:
return cache[k]
except KeyError:
ret=fun(*k)
cache[k]=ret
return ret
wrap.__cache__=cache
wrap.__name__=fun.__name__
return wrap
def get_registry_app_path(key,filename):
if not winreg:
return None
try:
result=winreg.QueryValue(key,"Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe"%filename[0])
except OSError:
pass
else:
if os.path.isfile(result):
return result
def lib64():
if os.sep=='/':
if platform.architecture()[0]=='64bit':
if os.path.exists('/usr/lib64')and not os.path.exists('/usr/lib32'):
return'64'
return''
def sane_path(p):
return os.path.abspath(os.path.expanduser(p))
process_pool=[]
def get_process():
try:
return process_pool.pop()
except IndexError:
filepath=os.path.dirname(os.path.abspath(__file__))+os.sep+'processor.py'
cmd=[sys.executable,'-c',readf(filepath)]
return subprocess.Popen(cmd,stdout=subprocess.PIPE,stdin=subprocess.PIPE,bufsize=0,close_fds=not is_win32)
def run_prefork_process(cmd,kwargs,cargs):
if not kwargs.get('env'):
kwargs['env']=dict(os.environ)
try:
obj=base64.b64encode(cPickle.dumps([cmd,kwargs,cargs]))
except(TypeError,AttributeError):
return run_regular_process(cmd,kwargs,cargs)
proc=get_process()
if not proc:
return run_regular_process(cmd,kwargs,cargs)
proc.stdin.write(obj)
proc.stdin.write('\n'.encode())
proc.stdin.flush()
obj=proc.stdout.readline()
if not obj:
raise OSError('Preforked sub-process %r died'%proc.pid)
process_pool.append(proc)
lst=cPickle.loads(base64.b64decode(obj))
assert len(lst)==5
ret,out,err,ex,trace=lst
if ex:
if ex=='OSError':
raise OSError(trace)
elif ex=='ValueError':
raise ValueError(trace)
elif ex=='TimeoutExpired':
exc=TimeoutExpired(cmd,timeout=cargs['timeout'],output=out)
exc.stderr=err
raise exc
else:
raise Exception(trace)
return ret,out,err
def lchown(path,user=-1,group=-1):
if isinstance(user,str):
import pwd
entry=pwd.getpwnam(user)
if not entry:
raise OSError('Unknown user %r'%user)
user=entry[2]
if isinstance(group,str):
import grp
entry=grp.getgrnam(group)
if not entry:
raise OSError('Unknown group %r'%group)
group=entry[2]
return os.lchown(path,user,group)
def run_regular_process(cmd,kwargs,cargs={}):
proc=subprocess.Popen(cmd,**kwargs)
if kwargs.get('stdout')or kwargs.get('stderr'):
try:
out,err=proc.communicate(**cargs)
except TimeoutExpired:
if kwargs.get('start_new_session')and hasattr(os,'killpg'):
os.killpg(proc.pid,signal.SIGKILL)
else:
proc.kill()
out,err=proc.communicate()
exc=TimeoutExpired(proc.args,timeout=cargs['timeout'],output=out)
exc.stderr=err
raise exc
status=proc.returncode
else:
out,err=(None,None)
try:
status=proc.wait(**cargs)
except TimeoutExpired as e:
if kwargs.get('start_new_session')and hasattr(os,'killpg'):
os.killpg(proc.pid,signal.SIGKILL)
else:
proc.kill()
proc.wait()
raise e
return status,out,err
def run_process(cmd,kwargs,cargs={}):
if kwargs.get('stdout')and kwargs.get('stderr'):
return run_prefork_process(cmd,kwargs,cargs)
else:
return run_regular_process(cmd,kwargs,cargs)
def alloc_process_pool(n,force=False):
global run_process,get_process,alloc_process_pool
if not force:
n=max(n-len(process_pool),0)
try:
lst=[get_process()for x in range(n)]
except OSError:
run_process=run_regular_process
get_process=alloc_process_pool=nada
else:
for x in lst:
process_pool.append(x)
def atexit_pool():
for k in process_pool:
try:
os.kill(k.pid,9)
except OSError:
pass
else:
k.wait()
if(sys.hexversion<0x207000f and not is_win32)or sys.hexversion>=0x306000f:
atexit.register(atexit_pool)
if os.environ.get('WAF_NO_PREFORK')or sys.platform=='cli'or not sys.executable:
run_process=run_regular_process
get_process=alloc_process_pool=nada
| [] | [] | [
"MSYSTEM",
"WAF_NO_PREFORK",
"JOBS"
] | [] | ["MSYSTEM", "WAF_NO_PREFORK", "JOBS"] | python | 3 | 0 | |
jira/examples/issue-type-screen-schemes/create/create.go | package main
import (
"context"
"github.com/ctreminiom/go-atlassian/jira"
"log"
"os"
)
func main() {
/*
----------- Set an environment variable in git bash -----------
export HOST="https://ctreminiom.atlassian.net/"
export MAIL="MAIL_ADDRESS"
export TOKEN="TOKEN_API"
Docs: https://stackoverflow.com/questions/34169721/set-an-environment-variable-in-git-bash
*/
var (
host = os.Getenv("HOST")
mail = os.Getenv("MAIL")
token = os.Getenv("TOKEN")
)
atlassian, err := jira.New(nil, host)
if err != nil {
log.Fatal(err)
}
atlassian.Auth.SetBasicAuth(mail, token)
payload := jira.IssueTypeScreenSchemePayloadScheme{
Name: "FX 2 Issue Type Screen Scheme",
IssueTypeMappings: []*jira.IssueTypeScreenSchemeMappingPayloadScheme{
{
IssueTypeID: "default",
ScreenSchemeID: "10000",
},
{
IssueTypeID: "10004", // Bug
ScreenSchemeID: "10002",
},
},
}
issueTypeScreenSchemeID, response, err := atlassian.Issue.Type.ScreenScheme.Create(context.Background(), &payload)
if err != nil {
if response != nil {
log.Println("Response HTTP Response", string(response.BodyAsBytes))
}
log.Fatal(err)
}
log.Println("Response HTTP Code", response.StatusCode)
log.Println("HTTP Endpoint Used", response.Endpoint)
log.Println(issueTypeScreenSchemeID)
}
| [
"\"HOST\"",
"\"MAIL\"",
"\"TOKEN\""
] | [] | [
"MAIL",
"HOST",
"TOKEN"
] | [] | ["MAIL", "HOST", "TOKEN"] | go | 3 | 0 | |
source/api/controlplane/profile/infrastructure/lambda/WorkflowErrorHandler/lambda_function.py | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
##############################################################################
#
# PURPOSE:
# Handle exceptions caught by the AWS Step Function workflow
# and optionally update the execution status of the Classifier plugin
#
##############################################################################
import os
import json
import traceback
import boto3
from MediaReplayEngineWorkflowHelper import ControlPlane
from MediaReplayEnginePluginHelper import MREExecutionError
eb_client = boto3.client("events")
def lambda_handler(event, context):
print("Lambda got the following event:\n", event)
program = event["Event"]["Program"]
p_event = event["Event"]["Name"]
key = event["Input"]["Media"]["S3Key"]
filename = os.path.split(key)[-1]
classifier_plugin_name = event["Profile"]["Classifier"]["Name"]
try:
print(f"Checking the execution status of '{classifier_plugin_name}' plugin in the current workflow execution for program '{program}' and event '{p_event}'")
controlplane = ControlPlane()
status = controlplane.get_plugin_execution_status(p_event, program, filename, classifier_plugin_name)
if not status or status in ["Waiting", "In Progress"]:
print(f"Updating the execution status of '{classifier_plugin_name}' from '{status}' to 'Error' to unblock upcoming workflow executions")
controlplane.put_plugin_execution_status(p_event, program, filename, classifier_plugin_name, "Error")
else:
print(f"No update is needed as the execution status of '{classifier_plugin_name}' is already '{status}'")
print(f"Notifying the workflow failure to MRE Event Bus with the caught exception message")
detail = {
"State": "WORKFLOW_FAILED",
"Payload": event
}
eb_client.put_events(
Entries=[
{
"Source": "awsmre",
"DetailType": "Workflow Status",
"Detail": json.dumps(detail),
"EventBusName": os.environ["EB_EVENT_BUS_NAME"]
}
]
)
print("Raising an exception back to the Step Function with the caught exception message to mark it as failed")
raise MREExecutionError(event["Error"])
except MREExecutionError:
raise
except Exception as e:
print(traceback.format_exc())
raise
| [] | [] | [
"EB_EVENT_BUS_NAME"
] | [] | ["EB_EVENT_BUS_NAME"] | python | 1 | 0 | |
cmd/controller-manager/main.go | /*
* Copyright 2018-2019, EnMasse authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package main
import (
"context"
"flag"
"fmt"
"strings"
"time"
"github.com/enmasseproject/enmasse/pkg/monitoring"
"os"
"runtime"
"github.com/enmasseproject/enmasse/pkg/util"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
"github.com/enmasseproject/enmasse/pkg/cache"
"github.com/enmasseproject/enmasse/pkg/controller"
"github.com/enmasseproject/enmasse/version"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
"github.com/operator-framework/operator-sdk/pkg/metrics"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/intstr"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/openshift/api"
enmassescheme "github.com/enmasseproject/enmasse/pkg/client/clientset/versioned/scheme"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Change below variables to serve metrics on different host or port.
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
operatorMetricsPort int32 = 8686
)
var log = logf.Log.WithName("cmd")
func printVersion() {
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
}
func main() {
// Add the zap logger flag set to the CLI. The flag set must
// be added before calling pflag.Parse().
pflag.CommandLine.AddFlagSet(zap.FlagSet())
// Add flags registered by imported packages (e.g. glog and
// controller-runtime)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
// Use a zap logr.Logger implementation. If none of the zap
// flags are configured (or if the zap flag set is not being
// used), this defaults to a production zap logger.
//
// The logger instantiated here can be changed to any logger
// implementing the logr.Logger interface. This logger will
// be propagated through the whole operator, generating
// uniform and structured logs.
logf.SetLogger(zap.Logger())
printVersion()
namespace := os.Getenv("NAMESPACE")
log.Info("Watching on namespace", "namespace", namespace)
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "Failed to get configuration")
os.Exit(1)
}
ctx := context.TODO()
// Become the leader before proceeding
err = leader.Become(ctx, "enmasse-lock")
if err != nil {
log.Error(err, "")
os.Exit(1)
}
// prime cache with isOpenShift information
log.Info("Running on OpenShift", "result", util.IsOpenshift())
log.Info("Running on OpenShift 4", "result", util.IsOpenshift4())
monitoringEnabled := monitoring.IsEnabled()
// Install monitoring resources
if monitoringEnabled {
// Attempt to install the monitoring resources when the operator starts, and every 5 minutes thereafter
go func() {
ticker := time.NewTicker(5 * time.Minute)
for ; true; <-ticker.C {
serverClient, err := client.New(cfg, client.Options{})
if err != nil {
log.Info(fmt.Sprintf("Failed to install monitoring resources: %s", err))
} else {
err = installMonitoring(ctx, serverClient)
if err != nil {
log.Info(fmt.Sprintf("Failed to install monitoring resources: %s", err))
} else {
log.Info("Successfully installed monitoring resources")
ticker.Stop()
}
}
}
}()
}
globalGvks := make([]schema.GroupVersionKind, 0)
// IoTInfrastructure is only processed in the namespace of the operator
// FIXME: this may change in the future
/*
if util.IsModuleEnabled("IOT_CONFIG") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "iot.enmasse.io",
Version: "v1",
Kind: "IoTInfrastructure",
},
schema.GroupVersionKind{
Group: "iot.enmasse.io",
Version: "v1",
Kind: "IoTInfrastructureList",
})
}
*/
if util.IsModuleEnabled("IOT_TENANT") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "iot.enmasse.io",
Version: "v1",
Kind: "IoTTenant",
},
schema.GroupVersionKind{
Group: "iot.enmasse.io",
Version: "v1",
Kind: "IoTTenantList",
})
}
if util.IsModuleEnabled("MESSAGING_INFRASTRUCTURE") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingInfrastructure",
},
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingInfrastructureList",
},
)
}
if util.IsModuleEnabled("MESSAGING_PROJECT") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingProject",
},
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingProjectList",
},
)
}
if util.IsModuleEnabled("MESSAGING_ADDRESS") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingAddress",
},
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingAddressList",
},
)
}
if util.IsModuleEnabled("MESSAGING_ENDPOINT") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingEndpoint",
},
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingEndpointList",
},
)
}
if util.IsModuleEnabled("MESSAGING_PLAN") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingPlan",
},
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingPlanList",
},
)
}
if util.IsModuleEnabled("MESSAGING_ADDRESS_PLAN") {
globalGvks = append(globalGvks,
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingAddressPlan",
},
schema.GroupVersionKind{
Group: "enmasse.io",
Version: "v1",
Kind: "MessagingAddressPlanList",
},
)
}
// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, manager.Options{
Namespace: namespace,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
NewCache: cache.NewDelegateCacheBuilder(namespace, globalGvks...),
})
if err != nil {
log.Error(err, "Failed to create manager")
os.Exit(1)
}
log.Info("Registering components...")
// register APIs
if err := api.Install(mgr.GetScheme()); err != nil {
log.Error(err, "Failed to register OpenShift schema")
os.Exit(1)
}
if err := enmassescheme.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "Failed to register EnMasse schema")
os.Exit(1)
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "Failed to register monitoring schema")
os.Exit(1)
}
if err := controller.CheckUpgrade(mgr); err != nil {
log.Error(err, "Failed to upgrade")
os.Exit(1)
}
// register controller
if err := controller.AddToManager(mgr); err != nil {
log.Error(err, "Failed to register controller")
os.Exit(1)
}
// Add the Metrics Service
if monitoringEnabled {
monitoring.StartIoTMetrics(mgr)
addMetrics(ctx, cfg, namespace)
}
log.Info("Starting the operator")
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
log.Error(err, "manager exited non-zero")
os.Exit(1)
}
}
// the Prometheus operator
func addMetrics(ctx context.Context, cfg *rest.Config, namespace string) {
if err := serveCRMetrics(cfg); err != nil {
log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
}
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []v1.ServicePort{
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
}
// Create Service object to expose the metrics port(s).
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
if err != nil {
log.Info("Could not create metrics Service", "error", err.Error())
}
// Adding the monitoring-key:middleware to the operator service which will get propagated to the serviceMonitor
err = addMonitoringKeyLabelToOperatorService(ctx, cfg, service)
if err != nil {
log.Error(err, "Could not add monitoring-key label to operator metrics Service")
}
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
// necessary to configure Prometheus to scrape metrics from this operator.
services := []*v1.Service{service}
_, err = metrics.CreateServiceMonitors(cfg, namespace, services, func(monitor *monitoringv1.ServiceMonitor) error {
for i, _ := range monitor.Spec.Endpoints {
monitor.Spec.Endpoints[i].MetricRelabelConfigs = []*monitoringv1.RelabelConfig{
&monitoringv1.RelabelConfig{
SourceLabels: []string{
"__name__",
},
TargetLabel: "__name__",
Replacement: "enmasse_${1}",
},
}
}
return nil
})
if err != nil {
log.Info("Could not create ServiceMonitor object", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}
}
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
func serveCRMetrics(cfg *rest.Config) error {
// Below function returns all GVKs for EnMasse.
allGVK, err := k8sutil.GetGVKsFromAddToScheme(enmassescheme.AddToScheme)
if err != nil {
return err
}
filteredGVK := make([]schema.GroupVersionKind, 0)
for _, gvk := range allGVK {
if (!util.IsModuleEnabled("MESSAGING_INFRASTRUCTURE") && strings.HasPrefix(gvk.Kind, "MessagingInfrastructure")) ||
(!util.IsModuleEnabled("MESSAGING_PROJECT") && strings.HasPrefix(gvk.Kind, "MessagingProject")) ||
(!util.IsModuleEnabled("MESSAGING_ENDPOINT") && strings.HasPrefix(gvk.Kind, "MessagingEndpoint")) ||
(!util.IsModuleEnabled("MESSAGING_ADDRESS") && strings.HasPrefix(gvk.Kind, "MessagingAddress")) ||
(!util.IsModuleEnabled("MESSAGING_PLAN") && strings.HasPrefix(gvk.Kind, "MessagingPlan")) ||
(!util.IsModuleEnabled("MESSAGING_ADDRESS_PLAN") && strings.HasPrefix(gvk.Kind, "MessagingAddressPlan")) {
log.Info("Skipping adding metric because module is not enabled", "gkv", gvk)
} else {
filteredGVK = append(filteredGVK, gvk)
}
}
// Get the namespace the operator is currently deployed in.
operatorNs, err := k8sutil.GetOperatorNamespace()
if err != nil {
return err
}
// To generate metrics in other namespaces, add the values below.
ns := []string{operatorNs}
// Generate and serve custom resource specific metrics.
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
if err != nil {
return err
}
return nil
}
func addMonitoringKeyLabelToOperatorService(ctx context.Context, cfg *rest.Config, service *v1.Service) error {
if service == nil {
return fmt.Errorf("service doesn't exist")
}
kclient, err := client.New(cfg, client.Options{})
if err != nil {
return err
}
updatedLabels := map[string]string{"monitoring-key": "middleware"}
for k, v := range service.ObjectMeta.Labels {
updatedLabels[k] = v
}
service.ObjectMeta.Labels = updatedLabels
err = kclient.Update(ctx, service)
if err != nil {
return err
}
return nil
}
func installMonitoring(ctx context.Context, client client.Client) error {
log.Info("Installing monitoring resources")
params := map[string]string{"Namespace": os.Getenv("NAMESPACE")}
templateHelper := util.NewTemplateHelper(params)
for _, template := range templateHelper.TemplateList {
resource, err := templateHelper.CreateResource(template)
if err != nil {
return fmt.Errorf("createResource failed: %s", err)
}
err = client.Create(ctx, resource)
if err != nil {
if !kerrors.IsAlreadyExists(err) {
return fmt.Errorf("error creating resource: %s", err)
}
}
}
return nil
}
| [
"\"NAMESPACE\"",
"\"NAMESPACE\""
] | [] | [
"NAMESPACE"
] | [] | ["NAMESPACE"] | go | 1 | 0 | |
tests/test_multiplexer.py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the Multiplexer."""
import asyncio
import logging
import os
import shutil
import sys
import tempfile
import time
import unittest.mock
from pathlib import Path
from threading import Thread
from unittest import mock
from unittest.mock import MagicMock, Mock, call, patch
import pytest
from pexpect.exceptions import EOF # type: ignore
import aea
from aea.cli.core import cli
from aea.configurations.constants import DEFAULT_LEDGER
from aea.connections.base import ConnectionStates
from aea.helpers.exception_policy import ExceptionPolicyEnum
from aea.identity.base import Identity
from aea.mail.base import AEAConnectionError, Envelope, EnvelopeContext
from aea.multiplexer import AsyncMultiplexer, InBox, Multiplexer, OutBox
from aea.test_tools.click_testing import CliRunner
from packages.fetchai.connections.local.connection import LocalNode
from packages.fetchai.connections.p2p_libp2p.connection import (
PUBLIC_ID as P2P_PUBLIC_ID,
)
from packages.fetchai.connections.stub.connection import PUBLIC_ID as STUB_CONNECTION_ID
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.fipa.message import FipaMessage
from .conftest import (
AUTHOR,
CLI_LOG_OPTION,
ROOT_DIR,
UNKNOWN_CONNECTION_PUBLIC_ID,
UNKNOWN_PROTOCOL_PUBLIC_ID,
_make_dummy_connection,
_make_local_connection,
_make_stub_connection,
logger,
)
from tests.common.pexpect_popen import PexpectWrapper
from tests.common.utils import wait_for_condition
UnknownProtocolMock = Mock()
UnknownProtocolMock.protocol_id = UNKNOWN_PROTOCOL_PUBLIC_ID
UnknownProtocolMock.protocol_specification_id = UNKNOWN_PROTOCOL_PUBLIC_ID
@pytest.mark.asyncio
async def test_receiving_loop_terminated():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
multiplexer.connection_status.set(ConnectionStates.disconnected)
await multiplexer._receiving_loop()
mock_logger_debug.assert_called_with("Receiving loop terminated.")
multiplexer.connection_status.set(ConnectionStates.connected)
multiplexer.disconnect()
def test_connect_twice():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
def test_disconnect_twice():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
multiplexer.disconnect()
def test_connect_twice_with_loop():
"""Test that connecting twice the multiplexer behaves correctly."""
running_loop = asyncio.new_event_loop()
thread_loop = Thread(target=running_loop.run_forever)
thread_loop.start()
try:
multiplexer = Multiplexer([_make_dummy_connection()], loop=running_loop)
with unittest.mock.patch.object(
multiplexer.logger, "debug"
) as mock_logger_debug:
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
mock_logger_debug.assert_called_with("Multiplexer already connected.")
multiplexer.disconnect()
running_loop.call_soon_threadsafe(running_loop.stop)
finally:
thread_loop.join()
@pytest.mark.asyncio
async def test_connect_twice_a_single_connection():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
assert not multiplexer.connection_status.is_connected
await multiplexer._connect_one(connection.connection_id)
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
await multiplexer._connect_one(connection.connection_id)
mock_logger_debug.assert_called_with(
"Connection fetchai/dummy:0.1.0 already established."
)
await multiplexer._disconnect_one(connection.connection_id)
@pytest.mark.asyncio
async def test_run_bad_conneect():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = AsyncMultiplexer([connection])
f = asyncio.Future()
f.set_result(None)
with unittest.mock.patch.object(multiplexer, "connect", return_value=f):
with pytest.raises(ValueError, match="Multiplexer is not connected properly."):
await multiplexer.run()
def test_multiplexer_connect_all_raises_error():
"""Test the case when the multiplexer raises an exception while connecting."""
multiplexer = Multiplexer([_make_dummy_connection()])
with unittest.mock.patch.object(multiplexer, "_connect_all", side_effect=Exception):
with pytest.raises(
AEAConnectionError, match="Failed to connect the multiplexer."
):
multiplexer.connect()
multiplexer.disconnect()
def test_multiplexer_connect_one_raises_error_many_connections():
"""Test the case when the multiplexer raises an exception while attempting the connection of one connection."""
node = LocalNode()
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
multiplexer = Multiplexer([connection_1, connection_2, connection_3])
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
with unittest.mock.patch.object(connection_3, "connect", side_effect=Exception):
with pytest.raises(
AEAConnectionError, match="Failed to connect the multiplexer."
):
multiplexer.connect()
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
multiplexer.disconnect()
try:
shutil.rmtree(tmpdir)
except OSError as e:
logger.warning("Couldn't delete {}".format(tmpdir))
logger.exception(e)
@pytest.mark.asyncio
async def test_disconnect_twice_a_single_connection():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
await multiplexer._disconnect_one(connection.connection_id)
mock_logger_debug.assert_called_with(
"Connection fetchai/dummy:0.1.0 already disconnected."
)
def test_multiplexer_disconnect_all_raises_error():
"""Test the case when the multiplexer raises an exception while disconnecting."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
assert multiplexer.connection_status.is_connected
with unittest.mock.patch.object(
multiplexer, "_disconnect_all", side_effect=Exception
):
with pytest.raises(
AEAConnectionError, match="Failed to disconnect the multiplexer."
):
multiplexer.disconnect()
# # do the true disconnection - for clean the test up
assert multiplexer.connection_status.is_disconnecting
multiplexer.disconnect()
assert multiplexer.connection_status.is_disconnected
@pytest.mark.asyncio
async def test_multiplexer_disconnect_one_raises_error_many_connections():
"""Test the case when the multiplexer raises an exception while attempting the disconnection of one connection."""
with LocalNode() as node:
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
multiplexer = Multiplexer([connection_1, connection_2, connection_3])
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
multiplexer.connect()
assert connection_1.is_connected
assert connection_2.is_connected
assert connection_3.is_connected
with unittest.mock.patch.object(
connection_3, "disconnect", side_effect=Exception
):
with pytest.raises(
AEAConnectionError, match="Failed to disconnect the multiplexer."
):
multiplexer.disconnect()
assert not connection_1.is_connected
assert not connection_2.is_connected
assert connection_3.is_connected
# clean the test up.
await connection_3.disconnect()
multiplexer.disconnect()
try:
shutil.rmtree(tmpdir)
except OSError as e:
logger.warning("Couldn't delete {}".format(tmpdir))
logger.exception(e)
@pytest.mark.asyncio
async def test_sending_loop_does_not_start_if_multiplexer_not_connected():
"""Test that the sending loop is stopped does not start if the multiplexer is not connected."""
multiplexer = Multiplexer([_make_dummy_connection()])
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
await multiplexer._send_loop()
mock_logger_debug.assert_called_with(
"Sending loop not started. The multiplexer is not connected."
)
@pytest.mark.asyncio
async def test_sending_loop_cancelled():
"""Test the case when the sending loop is cancelled."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
await asyncio.sleep(0.1)
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
multiplexer.disconnect()
mock_logger_debug.assert_any_call("Sending loop cancelled.")
@pytest.mark.asyncio
async def test_receiving_loop_raises_exception():
"""Test the case when an error occurs when a receive is started."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
with unittest.mock.patch("asyncio.wait", side_effect=Exception("a weird error.")):
with unittest.mock.patch.object(
multiplexer.logger, "error"
) as mock_logger_error:
multiplexer.connect()
time.sleep(0.1)
mock_logger_error.assert_called_with(
"Error in the receiving loop: a weird error.", exc_info=True
)
multiplexer.disconnect()
@pytest.mark.asyncio
async def test_send_envelope_with_non_registered_connection():
"""Test that sending an envelope with an unregistered connection raises an exception."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection], protocols=[DefaultProtocolMock])
multiplexer.connect()
envelope = Envelope(
to="",
sender="",
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
context=EnvelopeContext(connection_id=UNKNOWN_CONNECTION_PUBLIC_ID),
)
with unittest.mock.patch.object(
multiplexer.logger, "warning"
) as mock_logger_warning:
await multiplexer._send(envelope)
mock_logger_warning.assert_called_with(
f"Dropping envelope, no connection available for sending: {envelope}"
)
multiplexer.disconnect()
@pytest.mark.asyncio
async def test_send_envelope_when_no_connection():
"""Test that sending an envelope with no connection logs a warning."""
multiplexer = Multiplexer([], protocols=[DefaultProtocolMock])
multiplexer.connect()
envelope = Envelope(
to="",
sender="",
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
)
with unittest.mock.patch.object(
multiplexer.logger, "warning"
) as mock_logger_warning:
await multiplexer._send(envelope)
mock_logger_warning.assert_called_with(
f"Dropping envelope, no connection available for sending: {envelope}"
)
multiplexer.disconnect()
def test_send_envelope_error_is_logged_by_send_loop():
"""Test that the AEAConnectionError in the '_send' method is logged by the '_send_loop'."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection], protocols=[DefaultProtocolMock])
multiplexer.connect()
fake_connection_id = UNKNOWN_CONNECTION_PUBLIC_ID
envelope = Envelope(
to="",
sender="",
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
context=EnvelopeContext(connection_id=fake_connection_id),
)
with unittest.mock.patch.object(multiplexer.logger, "error") as mock_logger_error:
multiplexer.put(envelope)
time.sleep(0.1)
mock_logger_error.assert_called_with(
"No connection registered with id: {}".format(fake_connection_id)
)
multiplexer.disconnect()
def test_get_from_multiplexer_when_empty():
"""Test that getting an envelope from the multiplexer when the input queue is empty raises an exception."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
with pytest.raises(aea.mail.base.Empty):
multiplexer.get()
def test_send_message_no_supported_protocol():
"""Test the case when we send an envelope with a specific connection that does not support the protocol."""
with LocalNode() as node:
identity_1 = Identity("identity", address="address_1")
connection_1 = _make_local_connection(
identity_1.address,
node,
restricted_to_protocols={DefaultMessage.protocol_id},
excluded_protocols={FipaMessage.protocol_id},
)
multiplexer = Multiplexer(
[connection_1], protocols=[DefaultMessage, FipaMessage, UnknownProtocolMock]
)
multiplexer.connect()
with mock.patch.object(multiplexer.logger, "warning") as mock_logger_warning:
envelope = Envelope(
to=identity_1.address,
sender=identity_1.address,
protocol_specification_id=FipaMessage.protocol_specification_id,
message=b"some bytes",
)
multiplexer.put(envelope)
time.sleep(0.5)
mock_logger_warning.assert_called_with(
"Connection {} does not support protocol {}. It is explicitly excluded.".format(
connection_1.connection_id, FipaMessage.protocol_id
)
)
with mock.patch.object(multiplexer.logger, "warning") as mock_logger_warning:
envelope = Envelope(
to=identity_1.address,
sender=identity_1.address,
protocol_specification_id=UnknownProtocolMock.protocol_specification_id,
message=b"some bytes",
)
multiplexer.put(envelope)
time.sleep(0.5)
mock_logger_warning.assert_called_with(
"Connection {} does not support protocol {}. The connection is restricted to protocols in {}.".format(
connection_1.connection_id,
UnknownProtocolMock.protocol_id,
connection_1.restricted_to_protocols,
)
)
multiplexer.disconnect()
def test_protocol_not_resolved():
"""Test multiplexer raises ValueError on protocol not resolved."""
multiplexer = Multiplexer([Mock()])
envelope = Envelope(
to="1",
sender="2",
protocol_specification_id=FipaMessage.protocol_specification_id,
message=b"some bytes",
)
with pytest.raises(ValueError):
multiplexer._get_protocol_id_for_envelope(envelope)
def test_autoset_default_connection():
"""Set default connection automatically."""
connection_1 = _make_dummy_connection()
connection_2 = _make_dummy_connection()
connections = [connection_1, connection_2]
multiplexer = Multiplexer(connections)
multiplexer._default_connection = None
multiplexer._set_default_connection_if_none()
assert multiplexer._default_connection == connections[0]
def test__get_connection():
"""Test the method _get_connection."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = Multiplexer(connections)
conn_ = multiplexer._get_connection(connection_1.connection_id.to_any())
assert conn_ == connection_1
@pytest.mark.asyncio
async def test_disconnect_when_not_connected():
"""Test disconnect when not connected."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections)
with patch.object(multiplexer, "_disconnect_all") as disconnect_all_mocked:
await multiplexer.disconnect()
disconnect_all_mocked.assert_not_called()
@pytest.mark.asyncio
async def test_exit_on_none_envelope():
"""Test sending task exit on None envelope."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
try:
await multiplexer.connect()
assert multiplexer.is_connected
multiplexer.put(None)
await asyncio.sleep(0.5)
assert multiplexer._send_loop_task.done()
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_inbox_outbox():
"""Test InBox OutBox objects."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
msg.to = "to"
msg.sender = "sender"
envelope = Envelope(to="to", sender="sender", message=msg,)
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = OutBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
outbox.put(envelope)
received = await inbox.async_get()
assert received == envelope
assert inbox.empty()
assert outbox.empty()
outbox.put_message(msg)
await inbox.async_wait()
received = inbox.get_nowait()
assert received == envelope
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_threaded_mode():
"""Test InBox OutBox objects in threaded mode."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, threaded=True)
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
msg.to = "to"
msg.sender = "sender"
envelope = Envelope(to="to", sender="sender", message=msg)
try:
await multiplexer.connect()
await asyncio.sleep(0.5)
inbox = InBox(multiplexer)
outbox = OutBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
outbox.put(envelope)
received = await inbox.async_get()
assert received == envelope
assert inbox.empty()
assert outbox.empty()
outbox.put_message(msg)
await inbox.async_wait()
received = inbox.get_nowait()
assert received == envelope
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_outbox_negative():
"""Test InBox OutBox objects."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
context = EnvelopeContext(connection_id=connection_1.connection_id)
envelope = Envelope(
to="to",
sender="sender",
protocol_specification_id=msg.protocol_specification_id,
message=b"",
context=context,
)
try:
await multiplexer.connect()
outbox = OutBox(multiplexer)
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put(envelope)
assert (
str(execinfo.value)
== "Only Message type allowed in envelope message field when putting into outbox."
)
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put_message("")
assert str(execinfo.value) == "Provided message not of type Message."
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put_message(msg)
assert str(execinfo.value) == "Provided message has message.to not set."
assert outbox.empty()
msg.to = "to"
with pytest.raises(ValueError) as execinfo:
outbox.put_message(msg)
assert str(execinfo.value) == "Provided message has message.sender not set."
finally:
await multiplexer.disconnect()
DefaultProtocolMock = Mock()
DefaultProtocolMock.protocol_id = DefaultMessage.protocol_id
DefaultProtocolMock.protocol_specification_id = DefaultMessage.protocol_specification_id
@pytest.mark.asyncio
async def test_default_route_applied(caplog):
"""Test default route is selected automatically."""
logger = logging.getLogger("aea.multiplexer")
with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"):
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(
connections, loop=asyncio.get_event_loop(), protocols=[DefaultProtocolMock]
)
multiplexer.logger = logger
envelope = Envelope(
to="",
sender="",
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
context=EnvelopeContext(),
)
multiplexer.default_routing = {
DefaultMessage.protocol_id: connection_1.connection_id
}
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = InBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
multiplexer.put(envelope)
await outbox.async_get()
finally:
await multiplexer.disconnect()
assert "Using default routing:" in caplog.text
@pytest.mark.asyncio
async def test_connection_id_in_to_field_detected(caplog):
"""Test to field is parsed correctly and used for routing."""
logger = logging.getLogger("aea.multiplexer")
with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"):
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(
connections, loop=asyncio.get_event_loop(), protocols=[DefaultProtocolMock]
)
multiplexer.logger = logger
envelope = Envelope(
to=str(connection_1.connection_id),
sender="some_author/some_skill:0.1.0",
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
)
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = InBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
multiplexer.put(envelope)
await outbox.async_get()
finally:
await multiplexer.disconnect()
assert "Using envelope `to` field as connection_id:" in caplog.text
@pytest.mark.asyncio
async def test_routing_helper_applied(caplog):
"""Test the routing helper is used for routing."""
logger = logging.getLogger("aea.multiplexer")
with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"):
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(
connections, loop=asyncio.get_event_loop(), protocols=[DefaultProtocolMock]
)
multiplexer.logger = logger
envelope = Envelope(
to="test",
sender="",
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
)
multiplexer._routing_helper[envelope.to] = connection_1.connection_id
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = InBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
multiplexer.put(envelope)
await outbox.async_get()
finally:
await multiplexer.disconnect()
assert (
f"Using routing helper with connection_id: {connection_1.connection_id}"
in caplog.text
)
def test_multiplexer_setup():
"""Test multiplexer setup to set connections."""
node = LocalNode()
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
connections = [connection_1, connection_2, connection_3]
multiplexer = Multiplexer([])
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
multiplexer._connection_consistency_checks()
mock_logger_debug.assert_called_with("List of connections is empty.")
multiplexer._setup(connections, default_routing=None)
multiplexer._connection_consistency_checks()
class TestExceptionHandlingOnConnectionSend:
"""Test exception handling policy on connection.send."""
def setup(self):
"""Set up test case."""
self.connection = _make_dummy_connection()
self.multiplexer = Multiplexer(
[self.connection], protocols=[DefaultProtocolMock]
)
self.multiplexer.connect()
self.envelope = Envelope(
to="",
sender="",
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
context=EnvelopeContext(connection_id=self.connection.connection_id),
)
self.exception = ValueError("expected")
def teardown(self):
"""Tear down test case."""
self.multiplexer.disconnect()
def test_log_policy(self):
"""Test just log exception."""
with patch.object(self.connection, "send", side_effect=self.exception):
self.multiplexer._exception_policy = ExceptionPolicyEnum.just_log
self.multiplexer.put(self.envelope)
time.sleep(1)
assert not self.multiplexer._send_loop_task.done()
def test_propagate_policy(self):
"""Test propagate exception."""
assert self.multiplexer._exception_policy == ExceptionPolicyEnum.propagate
with patch.object(self.connection, "send", side_effect=self.exception):
self.multiplexer.put(self.envelope)
time.sleep(1)
wait_for_condition(
lambda: self.multiplexer._send_loop_task.done(), timeout=5
)
assert self.multiplexer._send_loop_task.exception() == self.exception
def test_stop_policy(self):
"""Test stop multiplexer on exception."""
with patch.object(self.connection, "send", side_effect=self.exception):
self.multiplexer._exception_policy = ExceptionPolicyEnum.stop_and_exit
self.multiplexer.put(self.envelope)
time.sleep(1)
wait_for_condition(
lambda: self.multiplexer.connection_status.is_disconnected, timeout=5
)
def test_disconnect_order(self):
"""Test disconnect order: tasks first, disconnect_all next."""
parent = MagicMock()
async def fn():
return
with patch.object(
self.multiplexer, "_stop_receive_send_loops", return_value=fn()
) as stop_loops, patch.object(
self.multiplexer, "_disconnect_all", return_value=fn()
) as disconnect_all, patch.object(
self.multiplexer, "_check_and_set_disconnected_state"
) as check_and_set_disconnected_state:
parent.attach_mock(stop_loops, "stop_loops")
parent.attach_mock(disconnect_all, "disconnect_all")
parent.attach_mock(
check_and_set_disconnected_state, "check_and_set_disconnected_state"
)
self.multiplexer.disconnect()
assert parent.mock_calls == [
call.stop_loops(),
call.disconnect_all(),
call.check_and_set_disconnected_state(),
]
class TestMultiplexerDisconnectsOnTermination: # pylint: disable=attribute-defined-outside-init
"""Test multiplexer disconnects on agent process keyboard interrupted."""
def setup(self):
"""Set the test up."""
self.proc = None
self.runner = CliRunner()
self.agent_name = "myagent"
self.cwd = os.getcwd()
self.t = tempfile.mkdtemp()
shutil.copytree(Path(ROOT_DIR, "packages"), Path(self.t, "packages"))
os.chdir(self.t)
self.key_path = os.path.join(self.t, "fetchai_private_key.txt")
self.conn_key_path = os.path.join(self.t, "conn_private_key.txt")
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR]
)
assert result.exit_code == 0
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "create", "--local", self.agent_name]
)
assert result.exit_code == 0
os.chdir(Path(self.t, self.agent_name))
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", DEFAULT_LEDGER, self.key_path]
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "add-key", DEFAULT_LEDGER, self.key_path]
)
assert result.exit_code == 0, result.stdout_bytes
def test_multiplexer_disconnected_on_early_interruption(self):
"""Test multiplexer disconnected properly on termination before connected."""
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "add", "--local", "connection", str(P2P_PUBLIC_ID)]
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "build"])
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", DEFAULT_LEDGER, self.conn_key_path]
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"add-key",
DEFAULT_LEDGER,
self.conn_key_path,
"--connection",
],
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "issue-certificates"])
assert result.exit_code == 0, result.stdout_bytes
self.proc = PexpectWrapper( # nosec
[sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"],
env=os.environ,
maxread=10000,
encoding="utf-8",
logfile=sys.stdout,
)
self.proc.expect_all(
["Starting libp2p node..."], timeout=50,
)
self.proc.control_c()
self.proc.expect_all(
["Multiplexer .*disconnected."], timeout=20, strict=False,
)
self.proc.expect_all(
[EOF], timeout=20,
)
def test_multiplexer_disconnected_on_termination_after_connected_no_connection(
self,
):
"""Test multiplexer disconnected properly on termination after connected."""
self.proc = PexpectWrapper( # nosec
[sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"],
env=os.environ,
maxread=10000,
encoding="utf-8",
logfile=sys.stdout,
)
self.proc.expect_all(
["Start processing messages..."], timeout=20,
)
self.proc.control_c()
self.proc.expect_all(
["Multiplexer disconnecting...", "Multiplexer disconnected.", EOF],
timeout=20,
)
def test_multiplexer_disconnected_on_termination_after_connected_one_connection(
self,
):
"""Test multiplexer disconnected properly on termination after connected."""
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "add", "--local", "connection", str(STUB_CONNECTION_ID)],
)
assert result.exit_code == 0, result.stdout_bytes
self.proc = PexpectWrapper( # nosec
[sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"],
env=os.environ,
maxread=10000,
encoding="utf-8",
logfile=sys.stdout,
)
self.proc.expect_all(
["Start processing messages..."], timeout=20,
)
self.proc.control_c()
self.proc.expect_all(
["Multiplexer disconnecting...", "Multiplexer disconnected.", EOF],
timeout=20,
)
def teardown(self):
"""Tear the test down."""
if self.proc:
self.proc.wait_to_complete(10)
os.chdir(self.cwd)
try:
shutil.rmtree(self.t)
except (OSError, IOError):
pass
def test_multiplexer_setup_replaces_connections():
"""Test proper connections reset on setup call."""
m = AsyncMultiplexer([MagicMock(), MagicMock(), MagicMock()])
assert len(m._id_to_connection) == 3
assert len(m._connections) == 3
m._setup([MagicMock()], MagicMock())
assert len(m._id_to_connection) == 1
assert len(m._connections) == 1
def test_connect_after_disconnect_sync():
"""Test connect-disconnect-connect again for threaded multiplexer."""
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
assert not multiplexer.connection_status.is_connected
@pytest.mark.asyncio
async def test_connect_after_disconnect_async():
"""Test connect-disconnect-connect again for async multiplexer."""
multiplexer = AsyncMultiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
await multiplexer.connect()
assert multiplexer.connection_status.is_connected
await multiplexer.disconnect()
assert not multiplexer.connection_status.is_connected
await multiplexer.connect()
assert multiplexer.connection_status.is_connected
await multiplexer.disconnect()
assert not multiplexer.connection_status.is_connected
@pytest.mark.asyncio
async def test_connection_timeouts():
"""Test connect,send, disconnect timeouts for connections."""
async def slow_fn(*asrgs, **kwargs):
await asyncio.sleep(100)
connection = _make_dummy_connection()
envelope = Envelope(
to="",
sender="",
message=DefaultMessage(performative=DefaultMessage.Performative.BYTES),
context=EnvelopeContext(connection_id=connection.connection_id),
)
connection = _make_dummy_connection()
connection.connect = slow_fn
multiplexer = AsyncMultiplexer([connection])
multiplexer.CONNECT_TIMEOUT = 0.1
with pytest.raises(AEAConnectionError, match=r"TimeoutError"):
await multiplexer.connect()
connection = _make_dummy_connection()
connection.send = slow_fn
multiplexer = AsyncMultiplexer([connection])
multiplexer.SEND_TIMEOUT = 0.1
await multiplexer.connect()
with pytest.raises(asyncio.TimeoutError):
await multiplexer._send(envelope)
await multiplexer.disconnect()
connection = _make_dummy_connection()
connection.disconnect = slow_fn
multiplexer = AsyncMultiplexer([connection])
multiplexer.DISCONNECT_TIMEOUT = 0.1
await multiplexer.connect()
with pytest.raises(
AEAConnectionError,
match=f"Failed to disconnect multiplexer, some connections are not disconnected.*{str(connection.connection_id)}",
):
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_stops_on_connectionerror_during_connect():
"""Test multiplexer stopped and reraise exception on connect fails on conection.connect with AEAConnectionError."""
connection = _make_dummy_connection()
multiplexer = AsyncMultiplexer([connection])
with patch.object(
connection, "connect", side_effect=AEAConnectionError("expected")
):
with pytest.raises(AEAConnectionError, match=r"expected"):
await multiplexer.connect()
assert multiplexer.connection_status.is_disconnected
| [] | [] | [] | [] | [] | python | 0 | 0 | |
lib-src/lv2/suil/waflib/Tools/c_osx.py | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2008-2018 (ita)
"""
MacOSX related tools
"""
import os, shutil, platform
from waflib import Task, Utils
from waflib.TaskGen import taskgen_method, feature, after_method, before_method
app_info = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>{app_name}</string>
</dict>
</plist>
'''
"""
plist template
"""
@feature('c', 'cxx')
def set_macosx_deployment_target(self):
"""
see WAF issue 285 and also and also http://trac.macports.org/ticket/17059
"""
if self.env.MACOSX_DEPLOYMENT_TARGET:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET
elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
if Utils.unversioned_sys_platform() == 'darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self, name, out):
"""
Creates bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp`
"""
dir = out.parent.find_or_declare(name)
dir.mkdir()
macos = dir.find_or_declare(['Contents', 'MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name = out.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + '.app'
else:
name = name + '.app'
return name
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
"""
To compile an executable into a Mac application (a .app), set its *mac_app* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_app=True)
To force *all* executables to be transformed into Mac applications::
def build(bld):
bld.env.MACAPP = True
bld.shlib(source='a.c', target='foo')
"""
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
self.apptask = self.create_task('macapp', self.link_task.outputs, n1)
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name
self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755)
if getattr(self, 'mac_files', None):
# this only accepts files; they will be installed as seen from mac_files_root
mac_files_root = getattr(self, 'mac_files_root', None)
if isinstance(mac_files_root, str):
mac_files_root = self.path.find_node(mac_files_root)
if not mac_files_root:
self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root)
res_dir = n1.parent.parent.make_node('Resources')
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name
for node in self.to_nodes(self.mac_files):
relpath = node.path_from(mac_files_root or node.parent)
self.create_task('macapp', node, res_dir.make_node(relpath))
self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node)
if getattr(self.bld, 'is_install', None):
# disable regular binary installation
self.install_task.hasrun = Task.SKIP_ME
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
"""
Creates a :py:class:`waflib.Tools.c_osx.macplist` instance.
"""
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'Info.plist'])
self.plisttask = plisttask = self.create_task('macplist', [], n1)
plisttask.context = {
'app_name': self.link_task.outputs[0].name,
'env': self.env
}
plist_ctx = getattr(self, 'plist_context', None)
if (plist_ctx):
plisttask.context.update(plist_ctx)
if getattr(self, 'mac_plist', False):
node = self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code = self.mac_plist
else:
plisttask.code = app_info
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name
self.add_install_files(install_to=inst_to, install_from=n1)
@feature('cshlib', 'cxxshlib')
@before_method('apply_link', 'propagate_uselib_vars')
def apply_bundle(self):
"""
To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_bundle = True)
To force *all* executables to be transformed into bundles::
def build(bld):
bld.env.MACBUNDLE = True
bld.shlib(source='a.c', target='foo')
"""
if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False):
self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = [] # disable the '-dynamiclib' flag
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN
use = self.use = self.to_list(getattr(self, 'use', []))
if not 'MACBUNDLE' in use:
use.append('MACBUNDLE')
app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
class macapp(Task.Task):
"""
Creates mac applications
"""
color = 'PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath())
class macplist(Task.Task):
"""
Creates plist files
"""
color = 'PINK'
ext_in = ['.bin']
def run(self):
if getattr(self, 'code', None):
txt = self.code
else:
txt = self.inputs[0].read()
context = getattr(self, 'context', {})
txt = txt.format(**context)
self.outputs[0].write(txt)
| [] | [] | [
"MACOSX_DEPLOYMENT_TARGET"
] | [] | ["MACOSX_DEPLOYMENT_TARGET"] | python | 1 | 0 | |
src/coreclr/scripts/superpmi_aspnet.py | #!/usr/bin/env python3
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
#
#
# Title: superpmi_aspnet.py
#
# Notes:
#
# Script to perform the superpmi collection for Techempower Benchmarks
# via "crank" (https://github.com/dotnet/crank)
import argparse
import logging
import shutil
import sys
import zipfile
from os import path
from coreclr_arguments import *
from superpmi import TempDir, determine_mcs_tool_path, determine_superpmi_tool_path, is_nonzero_length_file
from jitutil import run_command
# Start of parser object creation.
is_windows = platform.system() == "Windows"
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-source_directory", help="path to source directory")
parser.add_argument("-output_mch_path", help="Absolute path to the mch file to produce")
parser.add_argument("-arch", help="Architecture")
def setup_args(args):
""" Setup the args for SuperPMI to use.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=True, require_built_product_dir=True,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"source_directory",
lambda source_directory: os.path.isdir(source_directory),
"source_directory doesn't exist")
coreclr_args.verify(args,
"output_mch_path",
lambda output_mch_path: not os.path.isfile(output_mch_path),
"output_mch_path already exist")
coreclr_args.verify(args,
"arch",
lambda arch: arch.lower() in ["x64", "arm64"],
"Unable to set arch")
return coreclr_args
def determine_native_name(coreclr_args, base_lib_name, target_os):
""" Determine the name of the native lib based on the OS.
Args:
coreclr_args (CoreclrArguments): parsed args
base_lib_name (str) : root name of the lib
target_os (str) : os to run tests on
Return:
(str) : name of the native lib for this OS
"""
if target_os == "OSX":
return "lib" + base_lib_name + ".dylib"
elif target_os == "Linux":
return "lib" + base_lib_name + ".so"
elif target_os == "windows":
return base_lib_name + ".dll"
else:
raise RuntimeError("Unknown OS.")
# Where there is an option, we generally target the less performant machines
# See https://github.com/aspnet/Benchmarks/tree/master/scenarios
#
def determine_benchmark_machine(coreclr_args):
""" Determine the name of the benchmark machine to use
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) : name of the benchmnark machine
"""
if coreclr_args.arch == "x64":
if coreclr_args.host_os == "windows":
# return "aspnet-perf-win"
return "aspnet-citrine-win"
elif coreclr_args.host_os == "Linux":
return "aspnet-perf-lin"
else:
raise RuntimeError("Invalid OS for x64.")
elif coreclr_args.arch == "arm64":
if coreclr_args.host_os == "Linux":
return "aspnet-citrine-arm"
else:
raise RuntimeError("Invalid OS for arm64.")
else:
raise RuntimeError("Invalid arch.")
def build_and_run(coreclr_args):
"""Run perf scenarios under crank and collect data with SPMI"
Args:
coreclr_args (CoreClrArguments): Arguments use to drive
output_mch_name (string): Name of output mch file name
"""
source_directory = coreclr_args.source_directory
target_arch = coreclr_args.arch
target_os = coreclr_args.host_os
checked_root = path.join(source_directory, "artifacts", "bin", "coreclr", target_os + "." + coreclr_args.arch + ".Checked")
release_root = path.join(source_directory, "artifacts", "bin", "coreclr", target_os + "." + coreclr_args.arch + ".Release")
# We'll use repo script to install dotnet
dotnet_install_script_name = "dotnet-install.cmd" if is_windows else "dotnet-install.sh"
dotnet_install_script_path = path.join(source_directory, "eng", "common", dotnet_install_script_name)
with TempDir(skip_cleanup=True) as temp_location:
print ("Executing in " + temp_location)
# install dotnet 6.0
run_command([dotnet_install_script_path, "-Version", "6.0.4"], temp_location, _exit_on_fail=True)
os.environ['DOTNET_MULTILEVEL_LOOKUP'] = '0'
os.environ['DOTNET_SKIP_FIRST_TIME_EXPERIENCE'] = '1'
dotnet_path = path.join(source_directory, ".dotnet")
dotnet_exe = path.join(dotnet_path, "dotnet.exe") if is_windows else path.join(dotnet_path, "dotnet")
run_command([dotnet_exe, "--info"], temp_location, _exit_on_fail=True)
os.environ['DOTNET_ROOT'] = dotnet_path
## install crank as local tool
run_command(
[dotnet_exe, "tool", "install", "Microsoft.Crank.Controller", "--version", "0.2.0-*", "--tool-path", temp_location], _exit_on_fail=True)
## ideally just do sparse clone, but this doesn't work locally
## git clone --filter=blob:none --no-checkout https://github.com/aspnet/benchmarks
## cd benchmarks
## git sparse-checkout init --cone
## git sparse-checkout set scenarios
## could probably just pass a URL and avoid this
run_command(
["git", "clone", "--quiet", "--depth", "1", "https://github.com/aspnet/benchmarks"], temp_location, _exit_on_fail=True)
crank_app = path.join(temp_location, "crank")
mcs_path = determine_mcs_tool_path(coreclr_args)
superpmi_path = determine_superpmi_tool_path(coreclr_args)
# todo: add grpc/signalr, perhaps
configname_scenario_list = [("platform", "plaintext"),
("json", "json"),
("plaintext", "mvc"),
("database", "fortunes_dapper"),
("database", "fortunes_ef_mvc_https"),
("proxy", "proxy-yarp"),
("staticfiles", "static")]
# configname_scenario_list = [("platform", "plaintext")]
# note tricks to get one element tuples
runtime_options_list = [("Dummy=0",), ("TieredCompilation=0", ), ("TieredPGO=1", "TC_QuickJitForLoops=1"), ("TieredPGO=1", "TC_QuickJitForLoops=1", "ReadyToRun=0"),
("TC_QuickJitForLoops=1", "ReadyToRun=0", "TC_OnStackReplacement=1", "OSR_HitLimit=0", "TC_OnStackReplacement_InitialCounter=0"),
("TieredPGO=1", "TC_QuickJitForLoops=1", "ReadyToRun=0", "TC_OnStackReplacement=1", "OSR_HitLimit=0", "TC_OnStackReplacement_InitialCounter=100")]
# runtime_options_list = [("TieredCompilation=0", )]
mch_file = path.join(coreclr_args.output_mch_path, "aspnet.run." + target_os + "." + target_arch + ".checked.mch")
benchmark_machine = determine_benchmark_machine(coreclr_args)
jitname = determine_native_name(coreclr_args, "clrjit", target_os)
coreclrname = determine_native_name(coreclr_args, "coreclr", target_os)
spminame = determine_native_name(coreclr_args, "superpmi-shim-collector", target_os)
corelibname = "System.Private.CoreLib.dll"
jitpath = path.join(".", jitname)
jitlib = path.join(checked_root, jitname)
coreclr = path.join(release_root, coreclrname)
corelib = path.join(release_root, corelibname)
spmilib = path.join(checked_root, spminame)
for (configName, scenario) in configname_scenario_list:
configYml = configName + ".benchmarks.yml"
configFile = path.join(temp_location, "benchmarks", "scenarios", configYml)
crank_arguments = ["--config", configFile,
"--profile", benchmark_machine,
"--scenario", scenario,
"--application.framework", "net7.0",
"--application.channel", "edge",
"--application.sdkVersion", "latest",
"--application.environmentVariables", "COMPlus_JitName=" + spminame,
"--application.environmentVariables", "SuperPMIShimLogPath=.",
"--application.environmentVariables", "SuperPMIShimPath=" + jitpath,
"--application.environmentVariables", "COMPlus_EnableExtraSuperPmiQueries=1",
"--application.options.downloadFiles", "*.mc",
"--application.options.displayOutput", "true",
# "--application.options.dumpType", "full",
# "--application.options.fetch", "true",
"--application.options.outputFiles", spmilib,
"--application.options.outputFiles", jitlib,
"--application.options.outputFiles", coreclr,
"--application.options.outputFiles", corelib]
for runtime_options in runtime_options_list:
runtime_arguments = []
for runtime_option in runtime_options:
runtime_arguments.append("--application.environmentVariables")
runtime_arguments.append("COMPlus_" + runtime_option)
print("")
print("================================")
print("Config: " + configName + " scenario: " + scenario + " options: " + " ".join(runtime_options))
print("================================")
print("")
description = ["--description", configName + "-" + scenario + "-" + "-".join(runtime_options)]
subprocess.run([crank_app] + crank_arguments + description + runtime_arguments, cwd=temp_location)
# merge
command = [mcs_path, "-merge", "temp.mch", "*.mc", "-dedup", "-thin"]
run_command(command, temp_location)
# clean
command = [superpmi_path, "-v", "ewmi", "-f", "fail.mcl", jitlib, "temp.mch"]
run_command(command, temp_location)
# strip
if is_nonzero_length_file("fail.mcl"):
print("Replay had failures, cleaning...");
fail_file = path.join(coreclr_args.output_mch_path, "fail.mcl");
command = [mcs_path, "-strip", "fail.mcl", "temp.mch", mch_file]
run_command(command, temp_location)
else:
print("Replay was clean...");
shutil.copy2("temp.mch", mch_file)
# index
command = [mcs_path, "-toc", mch_file]
run_command(command, temp_location)
# overall summary
print("Merged summary for " + mch_file)
command = [mcs_path, "-jitflags", mch_file]
run_command(command, temp_location)
def main(main_args):
""" Main entry point
Args:
main_args ([type]): Arguments to the script
"""
print (sys.version)
coreclr_args = setup_args(main_args)
build_and_run(coreclr_args)
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
| [] | [] | [
"DOTNET_MULTILEVEL_LOOKUP",
"DOTNET_ROOT",
"DOTNET_SKIP_FIRST_TIME_EXPERIENCE"
] | [] | ["DOTNET_MULTILEVEL_LOOKUP", "DOTNET_ROOT", "DOTNET_SKIP_FIRST_TIME_EXPERIENCE"] | python | 3 | 0 | |
pkg/installation/install.go | // Copyright 2019 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package installation
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
"sigs.k8s.io/krew/pkg/constants"
"sigs.k8s.io/krew/pkg/download"
"sigs.k8s.io/krew/pkg/environment"
"sigs.k8s.io/krew/pkg/index"
"sigs.k8s.io/krew/pkg/installation/receipt"
"sigs.k8s.io/krew/pkg/pathutil"
)
// InstallOpts specifies options for plugin installation operation.
type InstallOpts struct {
ArchiveFileOverride string
}
type installOperation struct {
pluginName string
platform index.Platform
downloadStagingDir string
installDir string
binDir string
}
// Plugin lifecycle errors
var (
ErrIsAlreadyInstalled = errors.New("can't install, the newest version is already installed")
ErrIsNotInstalled = errors.New("plugin is not installed")
ErrIsAlreadyUpgraded = errors.New("can't upgrade, the newest version is already installed")
)
// Install will download and install a plugin. The operation tries
// to not get the plugin dir in a bad state if it fails during the process.
func Install(p environment.Paths, plugin index.Plugin, opts InstallOpts) error {
glog.V(2).Infof("Looking for installed versions")
_, err := receipt.Load(p.PluginInstallReceiptPath(plugin.Name))
if err == nil {
return ErrIsAlreadyInstalled
} else if !os.IsNotExist(err) {
return errors.Wrap(err, "failed to look up plugin receipt")
}
// Find available installation candidate
candidate, ok, err := GetMatchingPlatform(plugin.Spec.Platforms)
if err != nil {
return errors.Wrap(err, "failed trying to find a matching platform in plugin spec")
}
if !ok {
return errors.Errorf("plugin %q does not offer installation for this platform", plugin.Name)
}
// The actual install should be the last action so that a failure during receipt
// saving does not result in an installed plugin without receipt.
glog.V(3).Infof("Install plugin %s at version=%s", plugin.Name, plugin.Spec.Version)
if err := install(installOperation{
pluginName: plugin.Name,
platform: candidate,
downloadStagingDir: filepath.Join(p.DownloadPath(), plugin.Name),
binDir: p.BinPath(),
installDir: p.PluginVersionInstallPath(plugin.Name, plugin.Spec.Version),
}, opts); err != nil {
return errors.Wrap(err, "install failed")
}
glog.V(3).Infof("Storing install receipt for plugin %s", plugin.Name)
err = receipt.Store(plugin, p.PluginInstallReceiptPath(plugin.Name))
return errors.Wrap(err, "installation receipt could not be stored, uninstall may fail")
}
func install(op installOperation, opts InstallOpts) error {
// Download and extract
glog.V(3).Infof("Creating download staging directory %q", op.downloadStagingDir)
if err := os.MkdirAll(op.downloadStagingDir, 0755); err != nil {
return errors.Wrapf(err, "could not create download path %q", op.downloadStagingDir)
}
defer func() {
glog.V(3).Infof("Deleting the download staging directory %s", op.downloadStagingDir)
if err := os.RemoveAll(op.downloadStagingDir); err != nil {
glog.Warningf("failed to clean up download staging directory: %s", err)
}
}()
if err := downloadAndExtract(op.downloadStagingDir, op.platform.URI, op.platform.Sha256, opts.ArchiveFileOverride); err != nil {
return errors.Wrap(err, "failed to download and extract")
}
applyDefaults(&op.platform)
if err := moveToInstallDir(op.downloadStagingDir, op.installDir, op.platform.Files); err != nil {
return errors.Wrap(err, "failed while moving files to the installation directory")
}
subPathAbs, err := filepath.Abs(op.installDir)
if err != nil {
return errors.Wrapf(err, "failed to get the absolute fullPath of %q", op.installDir)
}
fullPath := filepath.Join(op.installDir, filepath.FromSlash(op.platform.Bin))
pathAbs, err := filepath.Abs(fullPath)
if err != nil {
return errors.Wrapf(err, "failed to get the absolute fullPath of %q", fullPath)
}
if _, ok := pathutil.IsSubPath(subPathAbs, pathAbs); !ok {
return errors.Wrapf(err, "the fullPath %q does not extend the sub-fullPath %q", fullPath, op.installDir)
}
err = createOrUpdateLink(op.binDir, fullPath, op.pluginName)
return errors.Wrap(err, "failed to link installed plugin")
}
func applyDefaults(platform *index.Platform) {
if platform.Files == nil {
platform.Files = []index.FileOperation{{From: "*", To: "."}}
glog.V(4).Infof("file operation not specified, assuming %v", platform.Files)
}
}
// downloadAndExtract downloads the specified archive uri (or uses the provided overrideFile, if a non-empty value)
// while validating its checksum with the provided sha256sum, and extracts its contents to extractDir that must be.
// created.
func downloadAndExtract(extractDir, uri, sha256sum, overrideFile string) error {
var fetcher download.Fetcher = download.HTTPFetcher{}
if overrideFile != "" {
fetcher = download.NewFileFetcher(overrideFile)
}
verifier := download.NewSha256Verifier(sha256sum)
err := download.NewDownloader(verifier, fetcher).Get(uri, extractDir)
return errors.Wrap(err, "failed to download and verify file")
}
// Uninstall will uninstall a plugin.
func Uninstall(p environment.Paths, name string) error {
if name == constants.KrewPluginName {
glog.Errorf("Removing krew through krew is not supported.")
if !IsWindows() { // assume POSIX-like
glog.Errorf("If you’d like to uninstall krew altogether, run:\n\trm -rf -- %q", p.BasePath())
}
return errors.New("self-uninstall not allowed")
}
glog.V(3).Infof("Finding installed version to delete")
if _, err := receipt.Load(p.PluginInstallReceiptPath(name)); err != nil {
if os.IsNotExist(err) {
return ErrIsNotInstalled
}
return errors.Wrapf(err, "failed to look up install receipt for plugin %q", name)
}
glog.V(1).Infof("Deleting plugin %s", name)
symlinkPath := filepath.Join(p.BinPath(), pluginNameToBin(name, IsWindows()))
glog.V(3).Infof("Unlink %q", symlinkPath)
if err := removeLink(symlinkPath); err != nil {
return errors.Wrap(err, "could not uninstall symlink of plugin")
}
pluginInstallPath := p.PluginInstallPath(name)
glog.V(3).Infof("Deleting path %q", pluginInstallPath)
if err := os.RemoveAll(pluginInstallPath); err != nil {
return errors.Wrapf(err, "could not remove plugin directory %q", pluginInstallPath)
}
pluginReceiptPath := p.PluginInstallReceiptPath(name)
glog.V(3).Infof("Deleting plugin receipt %q", pluginReceiptPath)
err := os.Remove(pluginReceiptPath)
return errors.Wrapf(err, "could not remove plugin receipt %q", pluginReceiptPath)
}
func createOrUpdateLink(binDir string, binary string, plugin string) error {
dst := filepath.Join(binDir, pluginNameToBin(plugin, IsWindows()))
if err := removeLink(dst); err != nil {
return errors.Wrap(err, "failed to remove old symlink")
}
if _, err := os.Stat(binary); os.IsNotExist(err) {
return errors.Wrapf(err, "can't create symbolic link, source binary (%q) cannot be found in extracted archive", binary)
}
// Create new
glog.V(2).Infof("Creating symlink to %q at %q", binary, dst)
if err := os.Symlink(binary, dst); err != nil {
return errors.Wrapf(err, "failed to create a symlink form %q to %q", binDir, dst)
}
glog.V(2).Infof("Created symlink at %q", dst)
return nil
}
// removeLink removes a symlink reference if exists.
func removeLink(path string) error {
fi, err := os.Lstat(path)
if os.IsNotExist(err) {
glog.V(3).Infof("No file found at %q", path)
return nil
} else if err != nil {
return errors.Wrapf(err, "failed to read the symlink in %q", path)
}
if fi.Mode()&os.ModeSymlink == 0 {
return errors.Errorf("file %q is not a symlink (mode=%s)", path, fi.Mode())
}
if err := os.Remove(path); err != nil {
return errors.Wrapf(err, "failed to remove the symlink in %q", path)
}
glog.V(3).Infof("Removed symlink from %q", path)
return nil
}
// IsWindows sees if KREW_OS or runtime.GOOS to find out if current execution mode is win32.
func IsWindows() bool {
goos := runtime.GOOS
if env := os.Getenv("KREW_OS"); env != "" {
goos = env
}
return goos == "windows"
}
// pluginNameToBin creates the name of the symlink file for the plugin name.
// It converts dashes to underscores.
func pluginNameToBin(name string, isWindows bool) string {
name = strings.Replace(name, "-", "_", -1)
name = "kubectl-" + name
if isWindows {
name += ".exe"
}
return name
}
// CleanupStaleKrewInstallations removes the versions that aren't the current version.
func CleanupStaleKrewInstallations(dir string, currentVersion string) error {
ls, err := ioutil.ReadDir(dir)
if err != nil {
return errors.Wrap(err, "failed to read krew store directory")
}
glog.V(2).Infof("Found %d entries in krew store directory", len(ls))
for _, d := range ls {
glog.V(2).Infof("Found a krew installation: %s (%s)", d.Name(), d.Mode())
if d.IsDir() && d.Name() != currentVersion {
glog.V(1).Infof("Deleting stale krew install directory: %s", d.Name())
p := filepath.Join(dir, d.Name())
if err := os.RemoveAll(p); err != nil {
return errors.Wrapf(err, "failed to remove stale krew version at path '%s'", p)
}
glog.V(1).Infof("Stale installation directory removed")
}
}
return nil
}
| [
"\"KREW_OS\""
] | [] | [
"KREW_OS"
] | [] | ["KREW_OS"] | go | 1 | 0 | |
examples/k8petstore/web-server/PetStoreBook.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"strings"
"github.com/codegangsta/negroni"
"github.com/gorilla/mux"
"github.com/xyproto/simpleredis"
)
//return the path to static assets (i.e. index.html)
func pathToStaticContents() string {
var static_content = os.Getenv("STATIC_FILES")
// Take a wild guess. This will work in dev environment.
if static_content == "" {
println("*********** WARNING: DIDNT FIND ENV VAR 'STATIC_FILES', guessing your running in dev.")
static_content = "../../static/"
} else {
println("=========== Read ENV 'STATIC_FILES', path to assets : " + static_content)
}
//Die if no the static files are missing.
_, err := os.Stat(static_content)
if err != nil {
println("*********** os.Stat failed on " + static_content + " This means no static files are available. Dying...")
os.Exit(2)
}
return static_content
}
func main() {
var connection = os.Getenv("REDISMASTER_SERVICE_HOST") + ":" + os.Getenv("REDISMASTER_SERVICE_PORT")
if connection == ":" {
print("WARNING ::: If in kube, this is a failure: Missing env variable REDISMASTER_SERVICE_HOST")
print("WARNING ::: Attempting to connect redis localhost.")
connection = "127.0.0.1:6379"
} else {
print("Found redis master host " + os.Getenv("REDISMASTER_SERVICE_PORT"))
connection = os.Getenv("REDISMASTER_SERVICE_HOST") + ":" + os.Getenv("REDISMASTER_SERVICE_PORT")
}
println("Now connecting to : " + connection)
/**
* Create a connection pool. ?The pool pointer will otherwise
* not be of any use.?https://gist.github.com/jayunit100/1d00e6d343056401ef00
*/
pool = simpleredis.NewConnectionPoolHost(connection)
println("Connection pool established : " + connection)
defer pool.Close()
r := mux.NewRouter()
println("Router created ")
/**
* Define a REST path.
* - The parameters (key) can be accessed via mux.Vars.
* - The Methods (GET) will be bound to a handler function.
*/
r.Path("/info").Methods("GET").HandlerFunc(InfoHandler)
r.Path("/lrange/{key}").Methods("GET").HandlerFunc(ListRangeHandler)
r.Path("/rpush/{key}/{value}").Methods("GET").HandlerFunc(ListPushHandler)
r.Path("/llen").Methods("GET").HandlerFunc(LLENHandler)
//for dev environment, the site is one level up...
r.PathPrefix("/").Handler(http.FileServer(http.Dir(pathToStaticContents())))
r.Path("/env").Methods("GET").HandlerFunc(EnvHandler)
list := simpleredis.NewList(pool, "k8petstore")
HandleError(nil, list.Add("jayunit100"))
HandleError(nil, list.Add("tstclaire"))
HandleError(nil, list.Add("rsquared"))
// Verify that this is 3 on startup.
infoL := HandleError(pool.Get(0).Do("LLEN", "k8petstore")).(int64)
fmt.Printf("\n=========== Starting DB has %d elements \n", infoL)
if infoL < 3 {
print("Not enough entries in DB. something is wrong w/ redis querying")
print(infoL)
panic("Failed ... ")
}
println("=========== Now launching negroni...this might take a second...")
n := negroni.Classic()
n.UseHandler(r)
n.Run(":3000")
println("Done ! Web app is now running.")
}
/**
* the Pool will be populated on startup,
* it will be an instance of a connection pool.
* Hence, we reference its address rather than copying.
*/
var pool *simpleredis.ConnectionPool
/**
* REST
* input: key
*
* Writes all members to JSON.
*/
func ListRangeHandler(rw http.ResponseWriter, req *http.Request) {
println("ListRangeHandler")
key := mux.Vars(req)["key"]
list := simpleredis.NewList(pool, key)
//members := HandleError(list.GetAll()).([]string)
members := HandleError(list.GetLastN(4)).([]string)
print(members)
membersJSON := HandleError(json.MarshalIndent(members, "", " ")).([]byte)
print("RETURN MEMBERS = " + string(membersJSON))
rw.Write(membersJSON)
}
func LLENHandler(rw http.ResponseWriter, req *http.Request) {
println("=========== LLEN HANDLER")
infoL := HandleError(pool.Get(0).Do("LLEN", "k8petstore")).(int64)
fmt.Printf("=========== LLEN is %d ", infoL)
lengthJSON := HandleError(json.MarshalIndent(infoL, "", " ")).([]byte)
fmt.Printf("================ LLEN json is %s", infoL)
print("RETURN LEN = " + string(lengthJSON))
rw.Write(lengthJSON)
}
func ListPushHandler(rw http.ResponseWriter, req *http.Request) {
println("ListPushHandler")
/**
* Expect a key and value as input.
*
*/
key := mux.Vars(req)["key"]
value := mux.Vars(req)["value"]
println("New list " + key + " " + value)
list := simpleredis.NewList(pool, key)
HandleError(nil, list.Add(value))
ListRangeHandler(rw, req)
}
func InfoHandler(rw http.ResponseWriter, req *http.Request) {
println("InfoHandler")
info := HandleError(pool.Get(0).Do("INFO")).([]byte)
rw.Write(info)
}
func EnvHandler(rw http.ResponseWriter, req *http.Request) {
println("EnvHandler")
environment := make(map[string]string)
for _, item := range os.Environ() {
splits := strings.Split(item, "=")
key := splits[0]
val := strings.Join(splits[1:], "=")
environment[key] = val
}
envJSON := HandleError(json.MarshalIndent(environment, "", " ")).([]byte)
rw.Write(envJSON)
}
func HandleError(result interface{}, err error) (r interface{}) {
if err != nil {
print("ERROR : " + err.Error())
//panic(err)
}
return result
}
| [
"\"STATIC_FILES\"",
"\"REDISMASTER_SERVICE_HOST\"",
"\"REDISMASTER_SERVICE_PORT\"",
"\"REDISMASTER_SERVICE_PORT\"",
"\"REDISMASTER_SERVICE_HOST\"",
"\"REDISMASTER_SERVICE_PORT\""
] | [] | [
"STATIC_FILES",
"REDISMASTER_SERVICE_HOST",
"REDISMASTER_SERVICE_PORT"
] | [] | ["STATIC_FILES", "REDISMASTER_SERVICE_HOST", "REDISMASTER_SERVICE_PORT"] | go | 3 | 0 | |
jh-Search/src/main/java/com/mycompany/myapp/Application.java | package com.mycompany.myapp;
import com.mycompany.myapp.config.Constants;
import com.mycompany.myapp.config.JHipsterProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.actuate.autoconfigure.MetricFilterAutoConfiguration;
import org.springframework.boot.actuate.autoconfigure.MetricRepositoryAutoConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.liquibase.LiquibaseProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.core.env.Environment;
import org.springframework.core.env.SimpleCommandLinePropertySource;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Collection;
@ComponentScan
@EnableAutoConfiguration(exclude = { MetricFilterAutoConfiguration.class, MetricRepositoryAutoConfiguration.class })
@EnableConfigurationProperties({ JHipsterProperties.class, LiquibaseProperties.class })
public class Application {
private static final Logger log = LoggerFactory.getLogger(Application.class);
@Inject
private Environment env;
/**
* Initializes jhipster.
* <p/>
* Spring profiles can be configured with a program arguments --spring.profiles.active=your-active-profile
* <p/>
* <p>
* You can find more information on how profiles work with JHipster on <a href="http://jhipster.github.io/profiles.html">http://jhipster.github.io/profiles.html</a>.
* </p>
*/
@PostConstruct
public void initApplication() throws IOException {
if (env.getActiveProfiles().length == 0) {
log.warn("No Spring profile configured, running with default configuration");
} else {
log.info("Running with Spring profile(s) : {}", Arrays.toString(env.getActiveProfiles()));
Collection<String> activeProfiles = Arrays.asList(env.getActiveProfiles());
if (activeProfiles.contains(Constants.SPRING_PROFILE_DEVELOPMENT) && activeProfiles.contains(Constants.SPRING_PROFILE_PRODUCTION)) {
log.error("You have misconfigured your application! " +
"It should not run with both the 'dev' and 'prod' profiles at the same time.");
}
if (activeProfiles.contains(Constants.SPRING_PROFILE_PRODUCTION) && activeProfiles.contains(Constants.SPRING_PROFILE_FAST)) {
log.error("You have misconfigured your application! " +
"It should not run with both the 'prod' and 'fast' profiles at the same time.");
}
if (activeProfiles.contains(Constants.SPRING_PROFILE_DEVELOPMENT) && activeProfiles.contains(Constants.SPRING_PROFILE_CLOUD)) {
log.error("You have misconfigured your application! " +
"It should not run with both the 'dev' and 'cloud' profiles at the same time.");
}
}
}
/**
* Main method, used to run the application.
*/
public static void main(String[] args) throws UnknownHostException {
SpringApplication app = new SpringApplication(Application.class);
SimpleCommandLinePropertySource source = new SimpleCommandLinePropertySource(args);
addDefaultProfile(app, source);
Environment env = app.run(args).getEnvironment();
log.info("Access URLs:\n----------------------------------------------------------\n\t" +
"Local: \t\thttp://127.0.0.1:{}\n\t" +
"External: \thttp://{}:{}\n----------------------------------------------------------",
env.getProperty("server.port"),
InetAddress.getLocalHost().getHostAddress(),
env.getProperty("server.port"));
}
/**
* If no profile has been configured, set by default the "dev" profile.
*/
private static void addDefaultProfile(SpringApplication app, SimpleCommandLinePropertySource source) {
if (!source.containsProperty("spring.profiles.active") &&
!System.getenv().containsKey("SPRING_PROFILES_ACTIVE")) {
app.setAdditionalProfiles(Constants.SPRING_PROFILE_DEVELOPMENT);
}
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
cli/cmd/root.go | package cmd
import (
"bytes"
"errors"
"fmt"
"net"
"os"
"regexp"
"strings"
"time"
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/clientcmd"
"github.com/fatih/color"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
k8sResource "k8s.io/apimachinery/pkg/api/resource"
)
const (
defaultLinkerdNamespace = "linkerd"
defaultCNINamespace = "linkerd-cni"
defaultClusterDomain = "cluster.local"
defaultDockerRegistry = "gcr.io/linkerd-io"
jsonOutput = "json"
tableOutput = "table"
wideOutput = "wide"
maxRps = 100.0
)
var (
// special handling for Windows, on all other platforms these resolve to
// os.Stdout and os.Stderr, thanks to https://github.com/mattn/go-colorable
stdout = color.Output
stderr = color.Error
okStatus = color.New(color.FgGreen, color.Bold).SprintFunc()("\u221A") // √
warnStatus = color.New(color.FgYellow, color.Bold).SprintFunc()("\u203C") // ‼
failStatus = color.New(color.FgRed, color.Bold).SprintFunc()("\u00D7") // ×
controlPlaneNamespace string
cniNamespace string
apiAddr string // An empty value means "use the Kubernetes configuration"
kubeconfigPath string
kubeContext string
defaultNamespace string // Default namespace taken from current kubectl context
impersonate string
impersonateGroup []string
verbose bool
// These regexs are not as strict as they could be, but are a quick and dirty
// sanity check against illegal characters.
alphaNumDash = regexp.MustCompile(`^[a-zA-Z0-9-]+$`)
alphaNumDashDot = regexp.MustCompile(`^[\.a-zA-Z0-9-]+$`)
alphaNumDashDotSlashColon = regexp.MustCompile(`^[\./a-zA-Z0-9-:]+$`)
alphaNumDashDotSlashColonUnderscore = regexp.MustCompile(`^[\./a-zA-Z0-9-:_]+$`)
// Full Rust log level syntax at
// https://docs.rs/env_logger/0.6.0/env_logger/#enabling-logging
r = strings.NewReplacer("\t", "", "\n", "")
validProxyLogLevel = regexp.MustCompile(r.Replace(`
^(
(
(trace|debug|warn|info|error)|
(\w|::)+|
((\w|::)+=(trace|debug|warn|info|error))
)(?:,|$)
)+$`))
)
// RootCmd represents the root Cobra command
var RootCmd = &cobra.Command{
Use: "linkerd",
Short: "linkerd manages the Linkerd service mesh",
Long: `linkerd manages the Linkerd service mesh.`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// enable / disable logging
if verbose {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.PanicLevel)
}
controlPlaneNamespaceFromEnv := os.Getenv("LINKERD_NAMESPACE")
if controlPlaneNamespace == defaultLinkerdNamespace && controlPlaneNamespaceFromEnv != "" {
controlPlaneNamespace = controlPlaneNamespaceFromEnv
}
if !alphaNumDash.MatchString(controlPlaneNamespace) {
return fmt.Errorf("%s is not a valid namespace", controlPlaneNamespace)
}
return nil
},
}
func init() {
defaultNamespace = getDefaultNamespace()
RootCmd.PersistentFlags().StringVarP(&controlPlaneNamespace, "linkerd-namespace", "L", defaultLinkerdNamespace, "Namespace in which Linkerd is installed [$LINKERD_NAMESPACE]")
RootCmd.PersistentFlags().StringVarP(&cniNamespace, "cni-namespace", "", defaultCNINamespace, "Namespace in which the Linkerd CNI plugin is installed")
RootCmd.PersistentFlags().StringVar(&kubeconfigPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests")
RootCmd.PersistentFlags().StringVar(&kubeContext, "context", "", "Name of the kubeconfig context to use")
RootCmd.PersistentFlags().StringVar(&impersonate, "as", "", "Username to impersonate for Kubernetes operations")
RootCmd.PersistentFlags().StringArrayVar(&impersonateGroup, "as-group", []string{}, "Group to impersonate for Kubernetes operations")
RootCmd.PersistentFlags().StringVar(&apiAddr, "api-addr", "", "Override kubeconfig and communicate directly with the control plane at host:port (mostly for testing)")
RootCmd.PersistentFlags().BoolVar(&verbose, "verbose", false, "Turn on debug logging")
RootCmd.AddCommand(newCmdAlpha())
RootCmd.AddCommand(newCmdCheck())
RootCmd.AddCommand(newCmdCompletion())
RootCmd.AddCommand(newCmdDashboard())
RootCmd.AddCommand(newCmdDiagnostics())
RootCmd.AddCommand(newCmdDoc())
RootCmd.AddCommand(newCmdEdges())
RootCmd.AddCommand(newCmdEndpoints())
RootCmd.AddCommand(newCmdGet())
RootCmd.AddCommand(newCmdInject())
RootCmd.AddCommand(newCmdInstall())
RootCmd.AddCommand(newCmdInstallCNIPlugin())
RootCmd.AddCommand(newCmdInstallSP())
RootCmd.AddCommand(newCmdLogs())
RootCmd.AddCommand(newCmdMetrics())
RootCmd.AddCommand(newCmdProfile())
RootCmd.AddCommand(newCmdRoutes())
RootCmd.AddCommand(newCmdStat())
RootCmd.AddCommand(newCmdTap())
RootCmd.AddCommand(newCmdTop())
RootCmd.AddCommand(newCmdUninject())
RootCmd.AddCommand(newCmdUpgrade())
RootCmd.AddCommand(newCmdVersion())
RootCmd.AddCommand(newCmdMulticluster())
RootCmd.AddCommand(newCmdUninstall())
}
type statOptionsBase struct {
namespace string
timeWindow string
outputFormat string
}
func newStatOptionsBase() *statOptionsBase {
return &statOptionsBase{
namespace: defaultNamespace,
timeWindow: "1m",
outputFormat: tableOutput,
}
}
func (o *statOptionsBase) validateOutputFormat() error {
switch o.outputFormat {
case tableOutput, jsonOutput, wideOutput:
return nil
default:
return fmt.Errorf("--output currently only supports %s, %s and %s", tableOutput, jsonOutput, wideOutput)
}
}
func renderStats(buffer bytes.Buffer, options *statOptionsBase) string {
var out string
switch options.outputFormat {
case jsonOutput:
out = buffer.String()
default:
// strip left padding on the first column
b := buffer.Bytes()
if len(b) > padding {
out = string(b[padding:])
}
out = strings.Replace(out, "\n"+strings.Repeat(" ", padding), "\n", -1)
}
return out
}
// getRequestRate calculates request rate from Public API BasicStats.
func getRequestRate(success, failure uint64, timeWindow string) float64 {
windowLength, err := time.ParseDuration(timeWindow)
if err != nil {
log.Error(err.Error())
return 0.0
}
return float64(success+failure) / windowLength.Seconds()
}
// getSuccessRate calculates success rate from Public API BasicStats.
func getSuccessRate(success, failure uint64) float64 {
if success+failure == 0 {
return 0.0
}
return float64(success) / float64(success+failure)
}
// getDefaultNamespace fetches the default namespace
// used in the current KubeConfig context
func getDefaultNamespace() string {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
if kubeconfigPath != "" {
rules.ExplicitPath = kubeconfigPath
}
overrides := &clientcmd.ConfigOverrides{CurrentContext: kubeContext}
kubeCfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
ns, _, err := kubeCfg.Namespace()
if err != nil {
log.Errorf("could not set namespace from kubectl context: ensure a valid KUBECONFIG path has been set")
return corev1.NamespaceDefault
}
return ns
}
// proxyConfigOptions holds values for command line flags that apply to both the
// install and inject commands. All fields in this struct should have
// corresponding flags added in the addProxyConfigFlags func later in this file.
type proxyConfigOptions struct {
proxyVersion string
proxyImage string
initImage string
initImageVersion string
debugImage string
debugImageVersion string
dockerRegistry string
imagePullPolicy string
destinationGetNetworks []string
ignoreInboundPorts []string
ignoreOutboundPorts []string
proxyUID int64
proxyLogLevel string
proxyLogFormat string
proxyInboundPort uint
proxyOutboundPort uint
proxyControlPort uint
proxyAdminPort uint
proxyCPURequest string
proxyMemoryRequest string
proxyCPULimit string
proxyMemoryLimit string
enableExternalProfiles bool
traceCollector string
traceCollectorSvcAccount string
waitBeforeExitSeconds uint64
ignoreCluster bool // not validated by validate()
disableIdentity bool
requireIdentityOnInboundPorts []string
disableTap bool
}
func (options *proxyConfigOptions) validate() error {
for _, network := range options.destinationGetNetworks {
if _, _, err := net.ParseCIDR(network); err != nil {
return fmt.Errorf("cannot parse destination get networks: %s", err)
}
}
if options.disableIdentity && len(options.requireIdentityOnInboundPorts) > 0 {
return errors.New("Identity must be enabled when --require-identity-on-inbound-ports is specified")
}
if options.proxyVersion != "" && !alphaNumDashDot.MatchString(options.proxyVersion) {
return fmt.Errorf("%s is not a valid version", options.proxyVersion)
}
if options.initImageVersion != "" && !alphaNumDashDot.MatchString(options.initImageVersion) {
return fmt.Errorf("%s is not a valid version", options.initImageVersion)
}
if options.dockerRegistry != "" && !alphaNumDashDotSlashColon.MatchString(options.dockerRegistry) {
return fmt.Errorf("%s is not a valid Docker registry. The url can contain only letters, numbers, dash, dot, slash and colon", options.dockerRegistry)
}
if options.imagePullPolicy != "" && options.imagePullPolicy != "Always" && options.imagePullPolicy != "IfNotPresent" && options.imagePullPolicy != "Never" {
return fmt.Errorf("--image-pull-policy must be one of: Always, IfNotPresent, Never")
}
if options.proxyCPURequest != "" {
if _, err := k8sResource.ParseQuantity(options.proxyCPURequest); err != nil {
return fmt.Errorf("Invalid cpu request '%s' for --proxy-cpu-request flag", options.proxyCPURequest)
}
}
if options.proxyMemoryRequest != "" {
if _, err := k8sResource.ParseQuantity(options.proxyMemoryRequest); err != nil {
return fmt.Errorf("Invalid memory request '%s' for --proxy-memory-request flag", options.proxyMemoryRequest)
}
}
if options.proxyCPULimit != "" {
cpuLimit, err := k8sResource.ParseQuantity(options.proxyCPULimit)
if err != nil {
return fmt.Errorf("Invalid cpu limit '%s' for --proxy-cpu-limit flag", options.proxyCPULimit)
}
if options.proxyCPURequest != "" {
// Not checking for error because option proxyCPURequest was already validated
if cpuRequest, _ := k8sResource.ParseQuantity(options.proxyCPURequest); cpuRequest.MilliValue() > cpuLimit.MilliValue() {
return fmt.Errorf("The cpu limit '%s' cannot be lower than the cpu request '%s'", options.proxyCPULimit, options.proxyCPURequest)
}
}
}
if options.proxyMemoryLimit != "" {
memoryLimit, err := k8sResource.ParseQuantity(options.proxyMemoryLimit)
if err != nil {
return fmt.Errorf("Invalid memory limit '%s' for --proxy-memory-limit flag", options.proxyMemoryLimit)
}
if options.proxyMemoryRequest != "" {
// Not checking for error because option proxyMemoryRequest was already validated
if memoryRequest, _ := k8sResource.ParseQuantity(options.proxyMemoryRequest); memoryRequest.Value() > memoryLimit.Value() {
return fmt.Errorf("The memory limit '%s' cannot be lower than the memory request '%s'", options.proxyMemoryLimit, options.proxyMemoryRequest)
}
}
}
if options.proxyLogLevel != "" && !validProxyLogLevel.MatchString(options.proxyLogLevel) {
return fmt.Errorf("\"%s\" is not a valid proxy log level - for allowed syntax check https://docs.rs/env_logger/0.6.0/env_logger/#enabling-logging",
options.proxyLogLevel)
}
if err := validateRangeSlice(options.ignoreInboundPorts); err != nil {
return err
}
if err := validateRangeSlice(options.ignoreOutboundPorts); err != nil {
return err
}
return nil
}
// registryOverride replaces the registry of the provided image if the image is
// using the default registry and the provided registry is not the default.
func registryOverride(image, registry string) string {
return strings.Replace(image, defaultDockerRegistry, registry, 1)
}
func (options *proxyConfigOptions) flagSet(e pflag.ErrorHandling) *pflag.FlagSet {
flags := pflag.NewFlagSet("proxy", e)
flags.StringVarP(&options.proxyVersion, "proxy-version", "v", options.proxyVersion, "Tag to be used for the Linkerd proxy images")
flags.StringVar(&options.proxyImage, "proxy-image", options.proxyImage, "Linkerd proxy container image name")
flags.StringVar(&options.initImage, "init-image", options.initImage, "Linkerd init container image name")
flags.StringVar(&options.initImageVersion, "init-image-version", options.initImageVersion, "Linkerd init container image version")
flags.StringVar(&options.dockerRegistry, "registry", options.dockerRegistry, "Docker registry to pull images from")
flags.StringVar(&options.imagePullPolicy, "image-pull-policy", options.imagePullPolicy, "Docker image pull policy")
flags.UintVar(&options.proxyInboundPort, "inbound-port", options.proxyInboundPort, "Proxy port to use for inbound traffic")
flags.UintVar(&options.proxyOutboundPort, "outbound-port", options.proxyOutboundPort, "Proxy port to use for outbound traffic")
flags.StringSliceVar(&options.ignoreInboundPorts, "skip-inbound-ports", options.ignoreInboundPorts, "Ports and/or port ranges (inclusive) that should skip the proxy and send directly to the application")
flags.StringSliceVar(&options.ignoreOutboundPorts, "skip-outbound-ports", options.ignoreOutboundPorts, "Outbound ports and/or port ranges (inclusive) that should skip the proxy")
flags.Int64Var(&options.proxyUID, "proxy-uid", options.proxyUID, "Run the proxy under this user ID")
flags.StringVar(&options.proxyLogLevel, "proxy-log-level", options.proxyLogLevel, "Log level for the proxy")
flags.UintVar(&options.proxyControlPort, "control-port", options.proxyControlPort, "Proxy port to use for control")
flags.UintVar(&options.proxyAdminPort, "admin-port", options.proxyAdminPort, "Proxy port to serve metrics on")
flags.StringVar(&options.proxyCPURequest, "proxy-cpu-request", options.proxyCPURequest, "Amount of CPU units that the proxy sidecar requests")
flags.StringVar(&options.proxyMemoryRequest, "proxy-memory-request", options.proxyMemoryRequest, "Amount of Memory that the proxy sidecar requests")
flags.StringVar(&options.proxyCPULimit, "proxy-cpu-limit", options.proxyCPULimit, "Maximum amount of CPU units that the proxy sidecar can use")
flags.StringVar(&options.proxyMemoryLimit, "proxy-memory-limit", options.proxyMemoryLimit, "Maximum amount of Memory that the proxy sidecar can use")
flags.BoolVar(&options.enableExternalProfiles, "enable-external-profiles", options.enableExternalProfiles, "Enable service profiles for non-Kubernetes services")
// Deprecated flags
flags.StringVar(&options.proxyMemoryRequest, "proxy-memory", options.proxyMemoryRequest, "Amount of Memory that the proxy sidecar requests")
flags.StringVar(&options.proxyCPURequest, "proxy-cpu", options.proxyCPURequest, "Amount of CPU units that the proxy sidecar requests")
flags.MarkDeprecated("proxy-memory", "use --proxy-memory-request instead")
flags.MarkDeprecated("proxy-cpu", "use --proxy-cpu-request instead")
return flags
}
| [
"\"LINKERD_NAMESPACE\""
] | [] | [
"LINKERD_NAMESPACE"
] | [] | ["LINKERD_NAMESPACE"] | go | 1 | 0 | |
pkg/session/session_test.go | package session
import (
"fmt"
"os"
"testing"
"time"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/trussworks/sesh/pkg/dbstore"
"github.com/trussworks/sesh/pkg/domain"
"github.com/trussworks/sesh/pkg/mock"
)
func dbURLFromEnv() string {
host := os.Getenv("DATABASE_HOST")
port := os.Getenv("DATABASE_PORT")
name := os.Getenv("DATABASE_NAME")
user := os.Getenv("DATABASE_USER")
// password := os.Getenv("DATABASE_PASSWORD")
sslmode := os.Getenv("DATABASE_SSL_MODE")
connStr := fmt.Sprintf("postgres://%s@%s:%s/%s?sslmode=%s", user, host, port, name, sslmode)
return connStr
}
func getTestStore(t *testing.T) domain.SessionStorageService {
t.Helper()
connStr := dbURLFromEnv()
connection, err := sqlx.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
return nil
}
return dbstore.NewDBStore(connection)
}
func TestAuthExists(t *testing.T) {
timeout := 5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := domain.FmtLogger(true)
session := NewSessionService(timeout, store, sessionLog)
session.UserDidAuthenticate("foo")
}
func TestLogSessionCreatedDestroyed(t *testing.T) {
timeout := 5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := mock.NewLogRecorder(domain.FmtLogger(true))
session := NewSessionService(timeout, store, &sessionLog)
accountID := uuid.New().String()
sessionKey, authErr := session.UserDidAuthenticate(accountID)
if authErr != nil {
t.Fatal(authErr)
}
createMsg, logErr := sessionLog.GetOnlyMatchingMessage(domain.SessionCreated)
if logErr != nil {
t.Fatal(logErr)
}
if createMsg.Level != "INFO" {
t.Fatal("Wrong Log Level", createMsg.Level)
}
sessionHash, ok := createMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if sessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
delErr := session.UserDidLogout(sessionKey)
if delErr != nil {
t.Fatal(delErr)
}
delMsg, delLogErr := sessionLog.GetOnlyMatchingMessage(domain.SessionDestroyed)
if delLogErr != nil {
t.Fatal(delLogErr)
}
if delMsg.Level != "INFO" {
t.Fatal("Wrong Log Level", delMsg.Level)
}
delSessionHash, ok := delMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if delSessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
_, getErr := session.GetSessionIfValid(sessionKey)
if getErr != domain.ErrValidSessionNotFound {
t.Fatal(getErr)
}
nonExistantMsg, logNonExistantErr := sessionLog.GetOnlyMatchingMessage(domain.SessionDoesNotExist)
if logNonExistantErr != nil {
t.Fatal(logNonExistantErr)
}
nonExistantSessionHash, ok := nonExistantMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if nonExistantSessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
}
func TestLogSessionExpired(t *testing.T) {
timeout := -5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := mock.NewLogRecorder(domain.FmtLogger(true))
session := NewSessionService(timeout, store, &sessionLog)
accountID := uuid.New().String()
sessionKey, authErr := session.UserDidAuthenticate(accountID)
if authErr != nil {
t.Fatal(authErr)
}
logCreateMsg, logCreateErr := sessionLog.GetOnlyMatchingMessage(domain.SessionCreated)
if logCreateErr != nil {
t.Fatal(logCreateErr)
}
if logCreateMsg.Level != "INFO" {
t.Fatal("Wrong Log Level", logCreateMsg.Level)
}
_, getErr := session.GetSessionIfValid(sessionKey)
if getErr != domain.ErrSessionExpired {
t.Fatal("didn't get the right error back getting the expired session:", getErr)
}
expiredMsg, logExpiredErr := sessionLog.GetOnlyMatchingMessage(domain.SessionExpired)
if logExpiredErr != nil {
t.Fatal(logExpiredErr)
}
expiredSessionHash, ok := expiredMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if expiredSessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
// make sure you can re-auth after ending a session
_, newAuthErr := session.UserDidAuthenticate(accountID)
if newAuthErr != nil {
t.Fatal(newAuthErr)
}
}
// TestLogConcurrentSession tests that if you create a session, then create a new session over it, we log something.
func TestLogConcurrentSession(t *testing.T) {
timeout := 5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := mock.NewLogRecorder(domain.FmtLogger(true))
session := NewSessionService(timeout, store, &sessionLog)
accountID := uuid.New().String()
_, authErr := session.UserDidAuthenticate(accountID)
if authErr != nil {
t.Fatal(authErr)
}
_, logCreateErr := sessionLog.GetOnlyMatchingMessage(domain.SessionCreated)
if logCreateErr != nil {
t.Fatal(logCreateErr)
}
// Now login again:
_, authAgainErr := session.UserDidAuthenticate(accountID)
if authAgainErr != nil {
t.Fatal(authAgainErr)
}
createMessages := sessionLog.MatchingMessages(domain.SessionCreated)
if len(createMessages) != 2 {
t.Fatal("Should have 2 create messages now")
}
_, logConcurrentErr := sessionLog.GetOnlyMatchingMessage(domain.SessionConcurrentLogin)
if logConcurrentErr != nil {
t.Fatal(logConcurrentErr)
}
}
| [
"\"DATABASE_HOST\"",
"\"DATABASE_PORT\"",
"\"DATABASE_NAME\"",
"\"DATABASE_USER\"",
"\"DATABASE_PASSWORD\"",
"\"DATABASE_SSL_MODE\""
] | [] | [
"DATABASE_SSL_MODE",
"DATABASE_PASSWORD",
"DATABASE_NAME",
"DATABASE_HOST",
"DATABASE_USER",
"DATABASE_PORT"
] | [] | ["DATABASE_SSL_MODE", "DATABASE_PASSWORD", "DATABASE_NAME", "DATABASE_HOST", "DATABASE_USER", "DATABASE_PORT"] | go | 6 | 0 | |
modules/selfserve/files/selfserve/lib/selfserve/__init__.py | #!/usr/bin/python
#
# UI for selfserve ss2.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import cgi
import cgitb
import logging
import logging.handlers
import os
import sys
import time
import re
import StringIO
import ezt
from ss2config import *
import selfserve.email
import selfserve.exceptions
import selfserve.ldap
import selfserve.tokens
class AppendHandler(logging.Handler):
def __init__(self, tdata, level=logging.DEBUG):
logging.Handler.__init__(self, level)
self._tdata = tdata
def emit(self, record):
self._tdata['_debug'] = self._tdata.pop('_debug', "") + self.format(record) + "\n"
def debug_dump():
for key, val in os.environ.iteritems():
print "%r: %r" % (key, val)
print `sys.argv`
def render_exception(e, tdata, lookup):
template = lookup.get_template('results.html')
msgs = {
selfserve.exceptions.BadUserOrPassword : "Wrong username '%s' or password" % str(e),
selfserve.exceptions.CorruptToken : 'Internal error: corrupt token %s' % str(e),
selfserve.exceptions.NoSuchToken : 'No such token %s' % str(e),
selfserve.exceptions.NonMatchingPasswords : "Passwords don't match, try again",
selfserve.exceptions.EncryptionError : "Encryption failed",
selfserve.exceptions.WeakPassword : "Password does not meet complexity policy",
selfserve.exceptions.InvalidInput : "Invalid input: %s" % str(e),
}
codes = {
selfserve.exceptions.BadUserOrPassword : 401,
selfserve.exceptions.CorruptToken : 500,
selfserve.exceptions.NoSuchToken : 404,
selfserve.exceptions.NonMatchingPasswords : 409,
selfserve.exceptions.EncryptionError : 500,
selfserve.exceptions.WeakPassword : 403,
selfserve.exceptions.InvalidInput : 405,
}
tdata['status'] = get_by_subclass(codes, e, 500)
tdata['message'] = get_by_subclass(msgs, e, "Unspecified exception; logged.")
if tdata['message'].endswith('; logged') or tdata['status'] >= 500:
logger.error("Unspecified exception: %s", `e`)
print template.render(**tdata)
def render_index(tdata, lookup, form, pathinfo):
template = lookup.get_template('index.html')
tdata['availid'] = form.getvalue('availid', '') # /details/$availid redirector
tdata['changepw_is_checked'] = (form.getvalue('changepw') and 'checked' or '')
print template.render(**tdata)
def render_reset(tdata, lookup, form, pathinfo):
"The 'Request a password reset email' screen."
if 'submit_sendmail' in form:
availid = form.getvalue('availid')
if availid is None:
template = lookup.get_template('results.html')
tdata['message'] = "No username given"
tdata['status'] = 500
else:
template = lookup.get_template('results.html')
remote24 = os.environ['REMOTE_ADDR']
m = re.search("(?:\d+\.){3}", remote24)
remote24 = m.group(0);
msgid = selfserve.email.send_email(availid, lookup.PATH, remote24,
tdata['base_url'])
tdata['message'] = "Email sent, Message-ID=<%s>." % msgid
else:
template = lookup.get_template('sendemail.html')
print template.render(**tdata)
def get_by_subclass(dict_, obj, default_value):
for k, v in dict_.iteritems():
if isinstance(obj, k):
return v
else:
return default_value
def render_token(tdata, lookup, form, pathinfo):
"The 'Here is the password reset token I got by email' screen."
token = pathinfo.split('/', 3)[-1]
# Validate the token
# ### layering violation?
remote24 = os.environ['REMOTE_ADDR']
m = re.search("(?:\d+\.){3}", remote24)
remote24 = m.group(0);
hextoken, availid, expiry = selfserve.tokens.has_token(PW_RESET_MAGIC, token, remote24)
tdata['availid'] = availid
tdata['hextoken'] = hextoken
if os.environ.get('REQUEST_METHOD') == 'POST':
assert form['hextoken'].value == hextoken
# SECURITY: The ONLY criterion for to accept this new pw is that
# the POST was made to the correct token URI.
logger.info('finishing password reset for valid token=%s', selfserve.tokens.censor_token(hextoken))
remote24 = os.environ['REMOTE_ADDR']
m = re.search("(?:\d+\.){3}", remote24)
remote24 = m.group(0);
selfserve.ldap.do_pw_reset(availid, form.getvalue('new_pw1'), form.getvalue('new_pw2'), hextoken, remote24)
template = lookup.get_template('results.html')
tdata['message'] = 'Password change successful'
print template.render(**tdata)
else:
logger.info('starting password reset for valid token=%s', selfserve.tokens.censor_token(hextoken))
template = lookup.get_template('resetpassword.html')
print template.render(**tdata)
def render_login_redirector(tdata, lookup, form, pathinfo):
template = lookup.get_template('results.html')
availid = form.getvalue('availid')
password = form.getvalue('password')
selfserve.ldap.bind_as_user(availid, password)
remote24 = os.environ['REMOTE_ADDR']
m = re.search("(?:\d+\.){3}", remote24)
remote24 = m.group(0);
sesskey = selfserve.tokens.make_token(SESSION_MAGIC, availid, seconds=60, cookie=remote24)[0] # one minute
tdata['session'] = sesskey
if 'changepw' in form:
tdata['location'] = '%s/details/%s/password?session=%s' % (tdata['base_url'], availid, sesskey)
else:
tdata['location'] = '%s/details/%s?session=%s' % (tdata['base_url'], availid, sesskey)
tdata['message'] = "Login successful"
print template.render(**tdata)
def make_attrs_dict(attributes):
retval = {}
for k, v in attributes:
retval[k] = v
return retval
class Attribute(object):
def __init__(self, key, name, editable, multivalue, values):
self.key = key
self.name = name
self.editable = editable
self.multivalue = multivalue
self.values = values
def render_edit_details(tdata, lookup, form, pathinfo):
"The 'edit my details or password' screens."
pathinfo_parts = pathinfo.split('/')
assert len(pathinfo_parts) == 3 or len(pathinfo_parts) == 4 and pathinfo_parts[-1] in [ 'password' , '' ]
# let's hope we don't have [email protected]
editing_the_password = (pathinfo_parts[-1] == 'password')
# validation
if os.environ.get('REQUEST_METHOD') == 'POST':
# submitting a filled form
availid = form['availid'].value
tdata['availid'] = availid
else:
if 'session' in form:
# came here via the login screen and the 1-minute redirector
remote24 = os.environ['REMOTE_ADDR']
m = re.search("(?:\d+\.){3}", remote24)
remote24 = m.group(0);
hextoken, availid, expiry = selfserve.tokens.has_token(SESSION_MAGIC, form.getvalue('session'), remote24)
tdata['availid'] = availid
# for the "logout" function
tdata['session'] = hextoken
else:
# redirect to login screen
template = lookup.get_template('results.html')
tdata['location'] = '%s?availid=%s&changepw=%s' % (
tdata['base_url'], pathinfo_parts[2], ['', '1'][editing_the_password])
tdata['message'] = "Redirecting..."
if editing_the_password:
# TODO
pass
print template.render(**tdata)
return
if editing_the_password:
if os.environ.get('REQUEST_METHOD') == 'POST':
template = lookup.get_template('results.html')
selfserve.ldap.do_pw_change(availid, form.getvalue('old_pw'),
form.getvalue('new_pw1'), form.getvalue('new_pw2'))
tdata['message'] = 'Password change successful'
print template.render(**tdata)
else:
template = lookup.get_template('changepassword.html')
print template.render(**tdata)
else:
attributesdict = make_attrs_dict(ATTRIBUTES)
if os.environ.get('REQUEST_METHOD') == 'POST':
template = lookup.get_template('results.html')
edits = {}
for k in form:
if k.endswith('_attr'):
attr = k[:-5]
if attributesdict[attr][1]:
edits[attr] = form.getlist(k)
for attr in attributesdict:
if attr not in edits and attributesdict[attr][1]:
edits[attr] = None
selfserve.ldap.do_details_change(availid, form.getvalue('old_pw'), edits)
# TODO: maybe kill the session token here?
tdata['message'] = "Details change successful"
print template.render(**tdata)
else:
template = lookup.get_template('changedetails.html')
values = selfserve.ldap.fetch_attributes(availid,
attributesdict.keys())
attrs = []
for key, (name, editable, multivalue) in ATTRIBUTES:
if editable:
editword = ''
else:
editword = 'readonly'
if key not in values:
editword = 'disabled'
attrs.append(Attribute(key, name, editword,
ezt.boolean(multivalue),
values.get(key, ['<not present>'])))
tdata['attributes'] = attrs
tdata['dn'] = USER_DN_T % availid
print template.render(**tdata)
def render_logout(tdata, lookup, form, pathinfo):
template = lookup.get_template('results.html')
sesskey = pathinfo.split('/')[-1]
try:
# don't check remote24/cookie
selfserve.tokens.kill_token(sesskey)
tdata['status'] = 200
tdata['message'] = "Bye, token '%s' removed" % sesskey
except selfserve.exceptions.NoSuchToken:
tdata['status'] = 200
tdata['message'] = "Bye"
except selfserve.exceptions.SS2Exception, ss2e:
raise
print template.render(**tdata)
def render_unknown_uri(tdata, lookup, form, pathinfo):
template = lookup.get_template('results.html')
tdata['status'] = 500
tdata['message'] = "Unknown URI '%s'" % pathinfo
print template.render(**tdata)
### this will go away, but it is handy for the conversion from Mako
class CompatTemplate(object):
def __init__(self, template):
self.template = template
def render(self, **kw):
buffer = StringIO.StringIO()
self.template.generate(buffer, kw)
return buffer.getvalue()
class CompatLookup(object):
PATH = '../templates'
def get_template(self, template_name):
template = ezt.Template(os.path.join(self.PATH, template_name),
base_format=ezt.FORMAT_HTML)
return CompatTemplate(template)
def start(tdata):
pathinfo = os.environ.get("PATH_INFO")
logger.debug("pathinfo = %r", pathinfo)
lookup = CompatLookup()
form = cgi.FieldStorage()
# some default values
tdata['location'] = None
tdata['status'] = None
tdata['toolbar'] = None
tdata['menu'] = None
tdata['messagebox'] = None
tdata['message'] = None
tdata['availid'] = None
tdata['_debug'] = ''
# to force a newline, in the presence of whitespace compression
tdata['newline'] = '\n'
try:
if DEBUG_EVERYTHING:
# this may include passwords!
logger.debug(`form`)
if pathinfo in [None, '/']:
render_index(tdata, lookup, form, pathinfo)
elif pathinfo == '/reset/enter':
render_reset(tdata, lookup, form, pathinfo)
elif pathinfo.startswith('/reset/token/'):
render_token(tdata, lookup, form, pathinfo)
elif pathinfo == '/details/login':
render_login_redirector(tdata, lookup, form, pathinfo)
elif pathinfo.startswith('/details/logout/'):
render_logout(tdata, lookup, form, pathinfo)
elif pathinfo.startswith('/details/'):
render_edit_details(tdata, lookup, form, pathinfo)
else:
render_unknown_uri(tdata, lookup, form, pathinfo)
except selfserve.exceptions.SS2Exception, ss2e:
return render_exception(ss2e, tdata, lookup)
except Exception, e:
return render_exception(e, tdata, lookup)
except BaseException, e:
return render_exception(e, tdata, lookup)
finally:
pass
class CustomSubjectHandler(logging.handlers.SMTPHandler):
def getSubject(self, logrecord):
# logging.handlers.SMTPHandler.getSubject(self, logrecord)
return logrecord.getMessage().split("\n")[0]
def main():
#print "Content-type: text/plain\n\n"; debug_dump()
if DEBUG_MODE:
cgitb.enable()
# TODO config file
# TODO don't spam stderr (presumably due to the root logger?)
logging.basicConfig(
level=logging.__dict__[LOG_LEVEL],
#self.setFormatter(logging.Formatter(
format=((
"[%(asctime)s]\t[%(processName)s:%(process)d]\t[%(levelname)s]\t[%(filename)s:%(lineno)d %(funcName)s()]\t%(message)s")))
global logger
logger = logging.getLogger("%s.app" % LOGGER_NAME)
#logger.setLevel(logging.__dict__[LOG_LEVEL])
SCRIPT_NAME = os.environ['SCRIPT_NAME']
tdata = {
'script_name' : SCRIPT_NAME,
# ensure SCRIPT_DIRNAME has a trailing slash
'script_dirname' : (os.path.dirname(SCRIPT_NAME)+'/').replace('//','/'),
'base_url' : '%s://%s%s' % (HTTP_PROTOCOL, os.environ['HTTP_HOST'], os.environ['SCRIPT_NAME']),
}
# add a root logger
if DEBUG_MODE:
ah = AppendHandler(tdata)
logging.getLogger().addHandler(ah)
basename = os.path.basename(os.getenv('SCRIPT_FILENAME'))
rootMailHandler = CustomSubjectHandler(
SMTP_HOST, '"Selfserve (%s)" <%s>' % (basename, FROM_ADDRESS), NOTIFY_TO,
"Selfserve <%s>" % basename # unused by the subclass
);
rootMailHandler.setLevel(logging.__dict__[LOG_LEVEL_MAIL])
logging.getLogger().addHandler(rootMailHandler)
start(tdata)
#raise Exception("Hello world")
| [] | [] | [
"PATH_INFO",
"HTTP_HOST",
"REQUEST_METHOD",
"SCRIPT_FILENAME",
"SCRIPT_NAME",
"REMOTE_ADDR"
] | [] | ["PATH_INFO", "HTTP_HOST", "REQUEST_METHOD", "SCRIPT_FILENAME", "SCRIPT_NAME", "REMOTE_ADDR"] | python | 6 | 0 | |
lor/test.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Testing support
This module contains helper functions that downstream workspaces might want to use for testing purposes.
"""
import os
import tempfile
from lor import workspace
from lor.generators.workspace import workspace_generator
class TemporaryEnv(object):
"""Enable the application's environment (env vars, pwd) to be temporarily mutated, resetting it afterwards.
"""
def __enter__(self):
self.env = os.environ.copy()
self.cwd = os.getcwd()
return os.environ
def __exit__(self, exc_type, exc_val, exc_tb):
os.environ.clear()
os.environ.update(self.env)
os.chdir(self.cwd)
if exc_val is not None:
raise exc_val
else:
return True
class ChangeWorkspace(object):
"""Set the global workspace temporarily.
"""
def __init__(self, ws_path):
self.ws_path = ws_path
def __enter__(self):
self.existing = workspace.get_path()
workspace._set_path(self.ws_path)
return self.ws_path
def __exit__(self, exc_type, exc_val, exc_tb):
workspace._set_path(self.existing)
if exc_val is not None:
raise exc_val
else:
return True
class TemporaryWorkspace(object):
"""Create a temporary workspace
Creates a workspace and sets it as the global workspace temporarily.
"""
def __enter__(self):
self.existing = workspace.get_path()
ws_path = os.path.join(tempfile.mkdtemp(), "ws")
ws = workspace_generator.create(ws_path)
workspace._set_path(ws)
return ws
def __exit__(self, exc_type, exc_val, exc_tb):
workspace._set_path(self.existing)
if exc_val is not None:
raise exc_val
else:
return True
| [] | [] | [] | [] | [] | python | 0 | 0 | |
mesos/mesos-master/src/main/java/com/bennavetta/aeneas/mesos/master/MesosMaster.java | /**
* Copyright 2015 Benjamin Navetta
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bennavetta.aeneas.mesos.master;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.zeroturnaround.exec.ProcessExecutor;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
/**
* Launch a Mesos master instance
* @see <a href="http://mesos.apache.org/documentation/latest/configuration/">Mesos Configuration</a>
*/
public final class MesosMaster
{
private static final Logger LOG = LoggerFactory.getLogger(MesosMaster.class);
private final Map<String, String> configuration = Maps.newHashMap();
private final Path masterExecutable;
private Process process;
/**
* Create a new Mesos master handler that will run the given {@code mesos-master} executable.
* @param masterExecutable the path to {@code mesos-master}
*/
public MesosMaster(Path masterExecutable)
{
this.masterExecutable = Preconditions.checkNotNull(masterExecutable);
}
/**
* The size of the quorum of replicas when using 'replicated_log' based registry. It is imperative to set this
* value to be a majority of masters i.e., quorum > (number of masters)/2.
*
* @param size the quorum size
*/
public void setQuorumSize(int size)
{
setOption("quorum", String.valueOf(size));
}
/**
* Where to store the persistent information stored in the Registry.
* @param workDir a directory path
*/
public void setWorkDir(String workDir)
{
setOption("work_dir", workDir);
}
/**
* IP address to listen on
* @param ip an IP address
*/
public void setIp(String ip)
{
setOption("ip", ip);
}
/**
* Port to listen on (master default: 5050 and slave default: 5051)
* @param port a port number
*/
public void setPort(int port)
{
setOption("port", String.valueOf(port));
}
/**
* ZooKeeper URL (used for leader election amongst masters) May be one of:
* <ul>
* <li>zk://host1:port1,host2:port2,.../path</li>
* <li>zk://username:password@host1:port1,host2:port2,.../path</li>
* <li>file://path/to/file (where file contains one of the above)</li>
* </ul>
* @param connection a ZooKeeper connection string
*/
public void setZk(String connection)
{
setOption("zk", connection);
}
public void setOption(String name, String value)
{
configuration.put(name, value);
}
public void configureFromEnvironment()
{
final ImmutableSet<String> ignoredKeys = ImmutableSet.of("version", "download_sha1", "download_url");
System.getenv().forEach((key, value) -> {
if(key.startsWith("MESOS_"))
{
String argName = key.substring(6).toLowerCase();
if(!ignoredKeys.contains(argName))
{
setOption(argName, value);
}
}
});
}
public void launch() throws IOException
{
Preconditions.checkState(process == null, "Mesos master already running");
ImmutableMap.Builder<String, String> environment = ImmutableMap.<String, String>builder();
configuration.forEach((key, value) -> environment.put("MESOS_" + key.toUpperCase(), value));
environment.put("MESOS_VERSION", "false"); // Mesos interprets the Dockerfile environment variable as the '--version' flag
LOG.info("Starting Mesos master '{}' with configuration {}", masterExecutable, environment.build());
process = new ProcessExecutor()
.command(masterExecutable.toAbsolutePath().toString())
.redirectError(System.err)
.redirectOutput(System.out)
.environment(environment.build())
.destroyOnExit()
.start().getProcess();
}
public void kill()
{
Preconditions.checkState(process != null, "Mesos master not running");
process.destroy();
}
public int waitFor() throws InterruptedException
{
Preconditions.checkState(process != null, "Mesos master not running");
return process.waitFor();
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
go/test/endtoend/mysqlctl/mysqlctl_test.go | /*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mysqlctl
import (
"flag"
"fmt"
"os"
"os/exec"
"testing"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/test/endtoend/cluster"
)
var (
clusterInstance *cluster.LocalProcessCluster
masterTablet cluster.Vttablet
replicaTablet cluster.Vttablet
hostname = "localhost"
keyspaceName = "test_keyspace"
shardName = "0"
cell = "zone1"
)
func TestMain(m *testing.M) {
flag.Parse()
exitCode := func() int {
clusterInstance = cluster.NewCluster(cell, hostname)
defer clusterInstance.Teardown()
// Start topo server
err := clusterInstance.StartTopo()
if err != nil {
return 1
}
if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName); err != nil {
return 1
}
initCluster([]string{"0"}, 2)
// Collect tablet paths and ports
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
for _, tablet := range tablets {
if tablet.Type == "master" {
masterTablet = *tablet
} else if tablet.Type != "rdonly" {
replicaTablet = *tablet
}
}
return m.Run()
}()
os.Exit(exitCode)
}
func initCluster(shardNames []string, totalTabletsRequired int) {
keyspace := cluster.Keyspace{
Name: keyspaceName,
}
for _, shardName := range shardNames {
shard := &cluster.Shard{
Name: shardName,
}
var mysqlCtlProcessList []*exec.Cmd
for i := 0; i < totalTabletsRequired; i++ {
// instantiate vttablet object with reserved ports
tabletUID := clusterInstance.GetAndReserveTabletUID()
tablet := &cluster.Vttablet{
TabletUID: tabletUID,
HTTPPort: clusterInstance.GetAndReservePort(),
GrpcPort: clusterInstance.GetAndReservePort(),
MySQLPort: clusterInstance.GetAndReservePort(),
Alias: fmt.Sprintf("%s-%010d", clusterInstance.Cell, tabletUID),
}
if i == 0 { // Make the first one as master
tablet.Type = "master"
}
// Start Mysqlctl process
tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory)
proc, err := tablet.MysqlctlProcess.StartProcess()
if err != nil {
return
}
mysqlCtlProcessList = append(mysqlCtlProcessList, proc)
// start vttablet process
tablet.VttabletProcess = cluster.VttabletProcessInstance(tablet.HTTPPort,
tablet.GrpcPort,
tablet.TabletUID,
clusterInstance.Cell,
shardName,
keyspaceName,
clusterInstance.VtctldProcess.Port,
tablet.Type,
clusterInstance.TopoProcess.Port,
clusterInstance.Hostname,
clusterInstance.TmpDirectory,
clusterInstance.VtTabletExtraArgs,
clusterInstance.EnableSemiSync)
tablet.Alias = tablet.VttabletProcess.TabletPath
shard.Vttablets = append(shard.Vttablets, tablet)
}
for _, proc := range mysqlCtlProcessList {
if err := proc.Wait(); err != nil {
return
}
}
for _, tablet := range shard.Vttablets {
if _, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("create database vt_%s", keyspace.Name), keyspace.Name, false); err != nil {
log.Error(err.Error())
return
}
}
keyspace.Shards = append(keyspace.Shards, *shard)
}
clusterInstance.Keyspaces = append(clusterInstance.Keyspaces, keyspace)
}
func TestRestart(t *testing.T) {
err := masterTablet.MysqlctlProcess.Stop()
require.Nil(t, err)
masterTablet.MysqlctlProcess.CleanupFiles(masterTablet.TabletUID)
err = masterTablet.MysqlctlProcess.Start()
require.Nil(t, err)
}
func TestAutoDetect(t *testing.T) {
// Start up tablets with an empty MYSQL_FLAVOR, which means auto-detect
sqlFlavor := os.Getenv("MYSQL_FLAVOR")
os.Setenv("MYSQL_FLAVOR", "")
err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup()
require.Nil(t, err, "error should be nil")
err = clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].VttabletProcess.Setup()
require.Nil(t, err, "error should be nil")
// Reparent tablets, which requires flavor detection
err = clusterInstance.VtctlclientProcess.InitShardMaster(keyspaceName, shardName, cell, masterTablet.TabletUID)
require.Nil(t, err, "error should be nil")
//Reset flavor
os.Setenv("MYSQL_FLAVOR", sqlFlavor)
}
| [
"\"MYSQL_FLAVOR\""
] | [] | [
"MYSQL_FLAVOR"
] | [] | ["MYSQL_FLAVOR"] | go | 1 | 0 | |
telebot_test.go | package telebot
import (
"fmt"
"os"
"testing"
)
func TestBot(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
token := os.Getenv("TELEBOT_SECRET")
if token == "" {
fmt.Println("ERROR: " +
"In order to test telebot functionality, you need to set up " +
"TELEBOT_SECRET environmental variable, which represents an API " +
"key to a Telegram bot.\n")
t.Fatal("Could't find TELEBOT_SECRET, aborting.")
}
_, err := NewBot(Settings{Token: token})
if err != nil {
t.Fatal("couldn't create bot:", err)
}
}
func TestRecipient(t *testing.T) {
token := os.Getenv("TELEBOT_SECRET")
if token == "" {
fmt.Println("ERROR: " +
"In order to test telebot functionality, you need to set up " +
"TELEBOT_SECRET environmental variable, which represents an API " +
"key to a Telegram bot.\n")
t.Fatal("Could't find TELEBOT_SECRET, aborting.")
}
bot, err := NewBot(Settings{Token: token})
if err != nil {
t.Fatal("couldn't create bot:", err)
}
bot.Send(&User{}, "")
bot.Send(&Chat{}, "")
}
func TestFile(t *testing.T) {
file := FromDisk("telebot.go")
if file.InCloud() {
t.Fatal("Newly created file can't exist on Telegram servers!")
}
file.FileID = "magic"
if file.FileLocal != "telebot.go" {
t.Fatal("File doesn't preserve its original filename.")
}
}
func TestCustomURL(t *testing.T) {
token := os.Getenv("TELEBOT_SECRET")
if token == "" {
fmt.Println("ERROR: " +
"In order to test telebot functionality, you need to set up " +
"TELEBOT_SECRET environmental variable, which represents an API " +
"key to a Telegram bot.\n")
t.Fatal("Could't find TELEBOT_SECRET, aborting.")
}
customURL := "http://api.telegram.org"
bot, err := NewBot(Settings{Token: token, URL: customURL})
if err != nil {
t.Fatal("couldn't create bot:", err)
}
if bot.URL != customURL {
t.Fatal("custom api url is not set")
}
}
| [
"\"TELEBOT_SECRET\"",
"\"TELEBOT_SECRET\"",
"\"TELEBOT_SECRET\""
] | [] | [
"TELEBOT_SECRET"
] | [] | ["TELEBOT_SECRET"] | go | 1 | 0 | |
main/backspaceCompare_test.go | package main
import "testing"
/**
goos: darwin
goarch: amd64
BenchmarkBackspaceCompare1-4 100000000 12.6 ns/op 0 B/op 0 allocs/op
BenchmarkBackspaceCompareMy1-4 500000 2200 ns/op 928 B/op 51 allocs/op
PASS
ok command-line-arguments 2.406s
*/
func BenchmarkBackspaceCompare1(b *testing.B) {
for i := 0; i < b.N; i++ {
backspaceCompare1("sdgfasgfgqugofgeyegqufg#######asdfasf", "dyaadsuigfyuegquygasdhgfaosgd#####sdgf")
}
}
func BenchmarkBackspaceCompareMy1(b *testing.B) {
for i := 0; i < b.N; i++ {
backspaceCompareMy1("sdgfasgfgqugofgeyegqufg#######asdfasf", "dyaadsuigfyuegquygasdhgfaosgd#####sdgf")
}
}
func backspaceCompare1(S string, T string) bool {
slen, tlen := len(S), len(T)
if slen == 0 && tlen == 0 {
return true
}
scur, tcur := slen-1, tlen-1
sbs, tbs := 0, 0
for {
for scur >= 0 && (S[scur] == '#' || sbs > 0) {
if S[scur] == '#' {
sbs++
} else {
sbs--
}
scur--
}
for tcur >= 0 && (T[tcur] == '#' || tbs > 0) {
if T[tcur] == '#' {
tbs++
} else {
tbs--
}
tcur--
}
if (scur == 0 && tcur == 0) || (scur < 0 && tcur < 0) {
return true
} else if (scur >= 0 && tcur < 0) || (scur < 0 && tcur >= 0) || S[scur] != T[tcur] {
return false
}
scur--
tcur--
}
}
func backspaceCompareMy1(S string, T string) bool {
s := ""
istep := 0
for i := len(S) - 1; i >= 0; i-- {
if S[i] == '#' {
istep++
continue
}
if istep > 0 {
istep--
continue
}
s = string(S[i]) + s
}
t := ""
lstep := 0
for l := len(T) - 1; l >= 0; l-- {
if T[l] == '#' {
lstep++
continue
}
if lstep > 0 {
lstep--
continue
}
t = string(T[l]) + t
}
return s == t
}
| [] | [] | [] | [] | [] | go | null | null | null |
server/config_test.go | package server
import (
"os"
"testing"
)
func compareString(t *testing.T, name string, str, exp string, i int) {
if str != exp {
t.Fatalf("expected %s to be:\n%s\nbut got:\n%s\nin test #%d", name, exp, str, i+1)
}
}
func compareStringPtr(t *testing.T, name string, str, exp *string, i int) {
if str == exp {
return
}
if str == nil && exp != nil {
t.Fatalf("expected %s to be:\n%s\nbut got:\nnil\nin test %d", name, *exp, i+1)
} else if str != nil && exp == nil {
t.Fatalf("expected %s to be:\nnil\nbut got:\n%s\nin test %d", name, *str, i+1)
} else if *str != *exp {
t.Fatalf("expected %s to be:\n%s\nbut got:\n%s\nin test %d", name, *exp, *str, i+1)
}
}
// Test config prepare method
func TestConfigPrepare(t *testing.T) {
defaultAddr := "0.0.0.0"
emptyAddr := ""
localAddr := "127.0.0.1"
ipv6Addr := "::1"
invalidAddr := "127.0.0"
invalidHeaderAuth := "test"
allowOriginAll := "*"
allowOriginSingle := "http://resgate.io"
allowOriginMultiple := "http://localhost;http://resgate.io"
allowOriginInvalidEmpty := ""
allowOriginInvalidEmptyOrigin := ";http://localhost"
allowOriginInvalidMultipleAll := "http://localhost;*"
allowOriginInvalidMultipleSame := "http://localhost;*"
allowOriginInvalidOrigin := "http://this.is/invalid"
method := "foo"
invalidMethod := "foo.bar"
defaultCfg := Config{}
defaultCfg.SetDefault()
tbl := []struct {
Initial Config
Expected Config
PrepareError bool
}{
// Valid config
{defaultCfg, Config{Addr: &defaultAddr, Port: 8080, WSPath: "/", APIPath: "/api/", APIEncoding: "json", scheme: "http", netAddr: "0.0.0.0:8080", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
{Config{WSPath: "/"}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
{Config{Addr: &emptyAddr, WSPath: "/"}, Config{Addr: &emptyAddr, Port: 80, WSPath: "/", APIPath: "/", scheme: "http", netAddr: ":80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
{Config{Addr: &localAddr, WSPath: "/"}, Config{Addr: &localAddr, Port: 80, WSPath: "/", APIPath: "/", scheme: "http", netAddr: "127.0.0.1:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
{Config{Addr: &ipv6Addr, WSPath: "/"}, Config{Addr: &ipv6Addr, Port: 80, WSPath: "/", APIPath: "/", scheme: "http", netAddr: "[::1]:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
// Allow origin
{Config{AllowOrigin: &allowOriginAll, WSPath: "/"}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
{Config{AllowOrigin: &allowOriginSingle, WSPath: "/"}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"http://resgate.io"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
{Config{AllowOrigin: &allowOriginMultiple, WSPath: "/"}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"http://localhost", "http://resgate.io"}, allowMethods: "GET, HEAD, OPTIONS, POST"}, false},
// HTTP method mapping
{Config{WSPath: "/", PUTMethod: &method}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", PUTMethod: &method, scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST, PUT"}, false},
{Config{WSPath: "/", DELETEMethod: &method}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", DELETEMethod: &method, scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST, DELETE"}, false},
{Config{WSPath: "/", PATCHMethod: &method}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", PATCHMethod: &method, scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST, PATCH"}, false},
{Config{WSPath: "/", PUTMethod: &method, DELETEMethod: &method, PATCHMethod: &method}, Config{Addr: nil, Port: 80, WSPath: "/", APIPath: "/", PUTMethod: &method, DELETEMethod: &method, PATCHMethod: &method, scheme: "http", netAddr: "0.0.0.0:80", allowOrigin: []string{"*"}, allowMethods: "GET, HEAD, OPTIONS, POST, PUT, DELETE, PATCH"}, false},
// Invalid config
{Config{Addr: &invalidAddr, WSPath: "/"}, Config{}, true},
{Config{HeaderAuth: &invalidHeaderAuth, WSPath: "/"}, Config{}, true},
{Config{AllowOrigin: &allowOriginInvalidEmpty, WSPath: "/"}, Config{}, true},
{Config{AllowOrigin: &allowOriginInvalidEmptyOrigin, WSPath: "/"}, Config{}, true},
{Config{AllowOrigin: &allowOriginInvalidMultipleAll, WSPath: "/"}, Config{}, true},
{Config{AllowOrigin: &allowOriginInvalidMultipleSame, WSPath: "/"}, Config{}, true},
{Config{AllowOrigin: &allowOriginInvalidOrigin, WSPath: "/"}, Config{}, true},
{Config{PUTMethod: &invalidMethod, WSPath: "/"}, Config{}, true},
{Config{DELETEMethod: &invalidMethod, WSPath: "/"}, Config{}, true},
{Config{PATCHMethod: &invalidMethod, WSPath: "/"}, Config{}, true},
}
for i, r := range tbl {
cfg := r.Initial
err := cfg.prepare()
if err != nil {
if !r.PrepareError {
t.Fatalf("expected no error, but got:\n%s\nin test #%d", err, i+1)
}
continue
} else if r.PrepareError {
t.Fatalf("expected an error, but got none, in test #%d", i+1)
}
compareString(t, "WSPath", cfg.WSPath, r.Expected.WSPath, i)
compareString(t, "APIPath", cfg.APIPath, r.Expected.APIPath, i)
compareString(t, "APIEncoding", cfg.APIEncoding, r.Expected.APIEncoding, i)
compareStringPtr(t, "Addr", cfg.Addr, r.Expected.Addr, i)
compareStringPtr(t, "PUTMethod", cfg.PUTMethod, r.Expected.PUTMethod, i)
compareStringPtr(t, "DELETEMethod", cfg.DELETEMethod, r.Expected.DELETEMethod, i)
compareStringPtr(t, "PATCHMethod", cfg.PATCHMethod, r.Expected.PATCHMethod, i)
if cfg.Port != r.Expected.Port {
t.Fatalf("expected Port to be:\n%d\nbut got:\n%d\nin test %d", r.Expected.Port, cfg.Port, i+1)
}
compareString(t, "scheme", cfg.scheme, r.Expected.scheme, i)
compareString(t, "netAddr", cfg.netAddr, r.Expected.netAddr, i)
compareString(t, "headerAuthAction", cfg.headerAuthAction, r.Expected.headerAuthAction, i)
compareString(t, "headerAuthRID", cfg.headerAuthRID, r.Expected.headerAuthRID, i)
compareString(t, "allowMethods", cfg.allowMethods, r.Expected.allowMethods, i)
if len(cfg.allowOrigin) != len(r.Expected.allowOrigin) {
t.Fatalf("expected allowOrigin to be:\n%+v\nbut got:\n%+v\nin test %d", r.Expected.allowOrigin, cfg.allowOrigin, i+1)
}
for i, origin := range cfg.allowOrigin {
if origin != r.Expected.allowOrigin[i] {
t.Fatalf("expected allowOrigin to be:\n%+v\nbut got:\n%+v\nin test %d", r.Expected.allowOrigin, cfg.allowOrigin, i+1)
}
}
compareStringPtr(t, "HeaderAuth", cfg.HeaderAuth, r.Expected.HeaderAuth, i)
}
}
// Test NewService configuration error
func TestNewServiceConfigError(t *testing.T) {
tbl := []struct {
Initial Config
ServiceError bool
}{
{Config{}, false},
{Config{APIEncoding: "json"}, false},
{Config{APIEncoding: "JSON"}, false},
{Config{APIEncoding: "jsonFlat"}, false},
{Config{APIEncoding: "jsonflat"}, false},
{Config{APIEncoding: "test"}, true},
}
for i, r := range tbl {
cfg := r.Initial
cfg.SetDefault()
_, err := NewService(nil, cfg)
if err != nil && !r.ServiceError {
t.Fatalf("expected no error, but got:\n%s\nin test #%d", err, i+1)
} else if err == nil && r.ServiceError {
t.Fatalf("expected an error, but got none, in test #%d", i+1)
}
}
}
// Test that the travis version tag (if existing) matches that
// of the Version constant.
func TestVersionMatchesTag(t *testing.T) {
tag := os.Getenv("TRAVIS_TAG")
if tag == "" {
t.SkipNow()
}
if tag[0] != 'v' {
t.Fatalf("Expected tag to start with `v`, got %+v", tag)
}
if Version != tag[1:] {
t.Fatalf("Expected version %+v, got %+v", Version, tag[1:])
}
}
func TestMatchesOrigins(t *testing.T) {
tbl := []struct {
AllowedOrigins []string
Origin string
Expected bool
}{
{[]string{"http://localhost"}, "http://localhost", true},
{[]string{"https://resgate.io"}, "https://resgate.io", true},
{[]string{"https://resgate.io"}, "https://Resgate.IO", true},
{[]string{"http://localhost", "https://resgate.io"}, "http://localhost", true},
{[]string{"http://localhost", "https://resgate.io"}, "https://resgate.io", true},
{[]string{"http://localhost", "https://resgate.io"}, "https://Resgate.IO", true},
{[]string{"http://localhost", "https://resgate.io", "http://resgate.io"}, "http://Localhost", true},
{[]string{"http://localhost", "https://resgate.io", "http://resgate.io"}, "https://Resgate.io", true},
{[]string{"http://localhost", "https://resgate.io", "http://resgate.io"}, "http://resgate.IO", true},
{[]string{"https://resgate.io"}, "http://resgate.io", false},
{[]string{"http://localhost", "https://resgate.io"}, "http://resgate.io", false},
{[]string{"http://localhost", "https://resgate.io", "http://resgate.io"}, "http://localhost/", false},
}
for i, r := range tbl {
if matchesOrigins(r.AllowedOrigins, r.Origin) != r.Expected {
t.Fatalf("expected matchesOrigins to return %#v\n\tmatchesOrigins(%#v, %#v)\n\tin test #%d", r.Expected, r.AllowedOrigins, r.Origin, i+1)
}
}
}
| [
"\"TRAVIS_TAG\""
] | [] | [
"TRAVIS_TAG"
] | [] | ["TRAVIS_TAG"] | go | 1 | 0 | |
Megatron-LM-v1.1.5-ZeRO3/megatron/initialize.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron initialization."""
import random
import os
import numpy as np
import torch
from megatron import get_adlr_autoresume
from megatron import get_args
from megatron import get_tensorboard_writer
from megatron import mpu
from megatron.global_vars import set_global_variables
from megatron.mpu import set_model_parallel_rank, set_model_parallel_world_size
import deepspeed
def initialize_megatron(extra_args_provider=None, args_defaults={},
ignore_unknown_args=False, allow_no_cuda=False):
"""Set global variables, initialize distributed, and
set autoresume and random seeds.
`allow_no_cuda` should not be set unless using megatron for cpu only
data processing. In general this arg should not be set unless you know
what you are doing.
Returns a function to finalize distributed env initialization
(optionally, only when args.lazy_mpu_init == True)
"""
if not allow_no_cuda:
# Make sure cuda is available.
assert torch.cuda.is_available(), 'Megatron requires CUDA.'
# Parse args, build tokenizer, and set adlr-autoresume,
# tensorboard-writer, and timers.
set_global_variables(extra_args_provider=extra_args_provider,
args_defaults=args_defaults,
ignore_unknown_args=ignore_unknown_args)
# torch.distributed initialization
def finish_mpu_init():
args = get_args()
# Pytorch distributed.
_initialize_distributed()
# Random seeds for reproducibility.
if args.rank == 0:
print('> setting random seeds to {} ...'.format(args.seed))
_set_random_seed(args.seed)
args = get_args()
if args.lazy_mpu_init:
args.use_cpu_initialization=True
# delayed initialization of DDP-related stuff
# We only set basic DDP globals
set_model_parallel_world_size(args.model_parallel_size)
# and return function for external DDP manager to call when it has DDP initialized
set_model_parallel_rank(args.rank)
return finish_mpu_init
else:
# Megatron's MPU is the master. Complete initialization right away.
finish_mpu_init()
# Initialize memory buffers.
_initialize_mem_buffs()
# Autoresume.
_init_autoresume()
# Write arguments to tensorboard.
_write_args_to_tensorboard()
# No continuation function
return None
def setup_deepspeed_random_and_activation_checkpointing(args):
'''Optional DeepSpeed Activation Checkpointing features.
Gives access to partition activations, contiguous memory optimizations
and cpu checkpointing.
Activation checkpoint requires keep track of the random states
and setting the random seed for each MP process. Megatron uses
mpu.get_cuda_rng_tracker and mpu.model_parallel_cuda_manual_seed
for keeping track of the random states and setting the random seeds.
Since they are used in places outside of activation checkpointing,
we overwrite them to maintain consistency.
This must be called before all the calls to mpu.model_parallel_cuda_manual_seed
'''
num_layers = args.num_layers // args.checkpoint_num_layers
num_layers = num_layers if args.num_layers % args.checkpoint_num_layers == 0 else num_layers + 1
if args.split_transformers:
num_layers *= 2
deepspeed.checkpointing.configure(
mpu,
partition_activations=args.partition_activations,
contiguous_checkpointing=args.contigious_checkpointing,
num_checkpoints=num_layers,
checkpoint_in_cpu=args.checkpoint_in_cpu,
synchronize=args.synchronize_each_layer,
profile=args.profile_backward)
mpu.checkpoint = deepspeed.checkpointing.checkpoint
mpu.get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker
mpu.model_parallel_cuda_manual_seed = deepspeed.checkpointing.model_parallel_cuda_manual_seed
def _initialize_distributed():
"""Initialize torch.distributed and mpu."""
args = get_args()
device_count = torch.cuda.device_count()
if torch.distributed.is_initialized():
if args.rank == 0:
print('torch distributed is already initialized, '
'skipping initialization ...', flush=True)
args.rank = torch.distributed.get_rank()
args.world_size = torch.distributed.get_world_size()
else:
if args.rank == 0:
print('> initializing torch distributed ...', flush=True)
# Manually set the device ids.
if device_count > 0:
device = args.rank % device_count
if args.local_rank is not None:
assert args.local_rank == device, \
'expected local-rank to be the same as rank % device-count.'
else:
args.local_rank = device
torch.cuda.set_device(device)
# Call the init process
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend=args.distributed_backend,
world_size=args.world_size, rank=args.rank,
init_method=init_method)
# Set the model-parallel / data-parallel communicators.
if device_count > 0:
if mpu.model_parallel_is_initialized():
print('model parallel is already initialized')
else:
mpu.initialize_model_parallel(args.model_parallel_size)
# Optional DeepSpeed Activation Checkpointing Features
#
if args.deepspeed and args.deepspeed_activation_checkpointing:
setup_deepspeed_random_and_activation_checkpointing(args)
def _init_autoresume():
"""Set autoresume start time."""
autoresume = get_adlr_autoresume()
if autoresume:
torch.distributed.barrier()
autoresume.init()
torch.distributed.barrier()
def _set_random_seed(seed):
"""Set random seed for reproducability."""
if seed is not None and seed > 0:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 0:
mpu.model_parallel_cuda_manual_seed(seed)
else:
raise ValueError('Seed ({}) should be a positive integer.'.format(seed))
def _write_args_to_tensorboard():
"""Write arguments to tensorboard."""
args = get_args()
writer = get_tensorboard_writer()
if writer:
for arg in vars(args):
writer.add_text(arg, str(getattr(args, arg)))
def _initialize_mem_buffs():
"""Initialize manually allocated static memory."""
args = get_args()
# Initialize memory for checkpointed activations.
if args.distribute_checkpointed_activations:
mpu.init_checkpointed_activations_memory_buffer()
| [] | [] | [
"MASTER_ADDR",
"MASTER_PORT"
] | [] | ["MASTER_ADDR", "MASTER_PORT"] | python | 2 | 0 | |
Tools/tdd/src/testAllPkgs.py | ###############################################################################
# Copyright (c) 2021, Milan Neubert ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import threading
import TDDConfig
from TDDConfig import CEnvCfg
import tdd_support
import readchar
import os
import time
from tabulate import tabulate
import CodeStatistics
import colorama
from colorama import Fore, Style
from pathlib import Path
import subprocess
import KeyPressThread
import keyboard
# import sys
import cmakeSupport as CS
env_bckp = os.environ.copy()
def assertWithText(condition, text):
'''
Assertion wrapping function
'''
assert condition, text
def removeDirectory(path: Path):
'''
Function for removing directory
'''
if path.is_dir():
tdd_support.del_folder(path)
assertWithText(not path.is_dir(), "Something went wrong. Path(%s) was not deleted!" % (str(path)))
def backUpEnvVariable():
env_bckp = os.environ.copy()
def setEnvVariable(co_env: CEnvCfg):
os.environ['PATH'] = co_env.str_cmake + ";" + os.environ['PATH']
os.environ['PATH'] = co_env.str_mingw + ";" + os.environ['PATH']
os.environ['PATH'] = co_env.str_cppcheck + ";" + os.environ['PATH']
os.environ['PATH'] = co_env.str_clang + ";" + os.environ['PATH']
os.environ['PATH'] = co_env.str_cppumockgen + ";" + os.environ['PATH']
def resetEnvVariable():
os.environ = env_bckp
def testOnePkg(pckgDir, mCfg):
print(pckgDir)
kpt = KeyPressThread.KeyboardThread()
backUpEnvVariable()
setEnvVariable(mCfg.co_env)
str_testPackagesPath = mCfg.co_pkg.str_testpath
str_testPackageSuffix = mCfg.co_pkg.str_testfldr_suffix
len_tpkgS = len(str_testPackageSuffix)
path_dir = Path(str_testPackagesPath).glob("*_Tpkg")
dir_lst = [folder for folder in path_dir if folder.is_dir()]
lst_testDir = []
for dir in dir_lst:
strdir = str(dir.stem)
if (strdir[-len_tpkgS:] == str_testPackageSuffix):
lst_testDir.append(strdir)
task_lst = [CTestPkg(pckg, mCfg, kpt) for pckg in lst_testDir]
pTask = None
for task in task_lst:
if task.name == pckgDir:
pTask = task
break
pTask.b_silent = False
pTask.run()
# revert environment varaiables
resetEnvVariable()
print("Press any key to quit.\n")
# keyboard.read_key()
readchar.readkey()
def debug(lstPackage, mainCfg):
kpt = KeyPressThread.KeyboardThreadDbg()
task_lst = [CTestPkg(pckg, mainCfg, kpt) for pckg in lstPackage]
id = 1
task_lst[id].b_silent = False
task_lst[id].run()
def tests_minimized(lstPackage, mainCfg):
colorama.init()
backUpEnvVariable()
setEnvVariable(mainCfg.co_env)
kpt = KeyPressThread.KeyboardThread()
task_lst = [CTestPkgThread(pckg, mainCfg, kpt)
for pckg in lstPackage]
numTasks = len(task_lst)
resultTabRef = []
tdd_support.clear()
while(True):
numOfTerminated = 0
numOfIdle = 0
resultTab = []
for task in task_lst:
result = [
task.name,
task.str_step,
task.str_status,
task.str_testStatus,
task.str_uncoverage,
task.str_analysis,
task.str_complexity]
resultTab.append(result)
if "Terminated" in task.str_status:
numOfTerminated += 1
if "Idle" in task.str_status:
numOfIdle += 1
if numOfTerminated == numTasks:
break
bRewrite = False
if not resultTabRef:
bRewrite = True
else:
bIsSame = True
for idx in range(len(resultTab)):
for idy in range(len(resultTab[0])):
bIsSame *= (resultTab[idx][idy] == resultTabRef[idx][idy])
if not bIsSame:
bRewrite = True
if bRewrite:
resultTabRef = resultTab
errPkgList = []
#tdd_support.clear()
#print(result[1])
statusStr = ""
resultStr = ""
# for id in range(3):
# resultStr += chr(0x00BC + id)
# resultStr += "|"
colorChar = Style.RESET_ALL
for result in resultTab:
# statusStr += result[1] + " "
charAsNum = 0x20
if result[1] in ["CMake", "Makefile"]:
# charAsNum = 177
charAsNum = 0xBC
if result[1] == "Test":
charAsNum = 0xBD
# charAsNum = 66
if result[1] in ["Coverage", "StaticCheck", "Analysis"]:
charAsNum = 0xBE
# charAsNum = 67
if result[1] == "Finished":
charAsNum = 0x2588
statusStr += result[3] + " "
# StatusList = ["Empty" "Fail" "Pass"]
if "Empty" in result[3]:
colorChar = Fore.YELLOW
if "Fail" in result[3]:
colorChar = Fore.RED
errPkgList.append(result[0])
if "Pass" in result[3]:
colorChar = Fore.GREEN
resultStr += colorChar + chr(charAsNum) + Style.RESET_ALL
tdd_support.clear()
print("[%s]" % (resultStr))
if errPkgList:
print("Corrupted or incompilable tests:")
for errPkg in errPkgList:
print(Fore.RED + errPkg + Style.RESET_ALL)
# sys.stdout.write("\r")
# sys.stdout.flush()
# sys.stdout.write("\r\r[%s]" % (resultStr))
# sys.stdout.flush()
# sys.stdout.write("\r[%s]%s" % (resultStr, 5*" "))
# print("[%s], [%s]" % (resultStr, statusStr))
# sys.stdout.write("\r[%s]" % (resultStr))
# sys.stdout.write("[%s]\n" % (resultStr))
#sys.stdout.write("[%s] %s\n" % (resultStr, statusStr))
# print(tabulate(resultTab, headerTab, tablefmt="pretty"))
time.sleep(0.5)
# revert environment varaiables
resetEnvVariable()
print("Press any key to quit.\n")
readchar.readkey()
def tests(lstPackage, mainCfg):
colorama.init()
# print("Test all pkgs")
# print(lstPackage)
# debug(lstPackage, mainCfg)
backUpEnvVariable()
setEnvVariable(mainCfg.co_env)
kpt = KeyPressThread.KeyboardThread()
task_lst = [CTestPkgThread(pckg, mainCfg, kpt) for pckg in lstPackage]
numTasks = len(task_lst)
headerTab = [
"Name",
"Step",
"Status",
"Test Status",
"Noncovered line",
"Static err",
"Complexity err"]
resultTabRef = []
while(True):
numOfTerminated = 0
resultTab = []
for task in task_lst:
result = [
task.name,
task.str_step,
task.str_status,
task.str_testStatus,
task.str_uncoverage,
task.str_analysis,
task.str_complexity]
resultTab.append(result)
if "Terminated" in task.str_status:
numOfTerminated += 1
if numOfTerminated == numTasks:
break
bRewrite = False
if not resultTabRef:
bRewrite = True
else:
bIsSame = True
for idx in range(len(resultTab)):
for idy in range(len(resultTab[0])):
bIsSame *= (resultTab[idx][idy] == resultTabRef[idx][idy])
if not bIsSame:
bRewrite = True
if bRewrite:
resultTabRef = resultTab
tdd_support.clear()
print(tabulate(resultTab, headerTab, tablefmt="pretty"))
# print(tabulate(resultTab))
time.sleep(1)
resultTab = []
for task in task_lst:
result = [
task.name,
task.str_step,
task.str_status,
task.str_testStatus,
task.str_uncoverage,
task.str_analysis,
task.str_complexity]
resultTab.append(result)
tdd_support.clear()
print(tabulate(resultTab, headerTab, tablefmt="pretty"))
# revert environment varaiables
resetEnvVariable()
print("Press any key to quit.\n")
readchar.readkey()
# keyboard.read_key()
class CTestPkg():
str_step: str
str_status: str
str_testBinName: str
str_uncoverage: str
str_testStatus: str
str_analysis: str
str_complexity: str
path_buildFldr: Path
path_TpkgRoot: Path
str_srcFldr: str
str_TpkgRoot: str
tCfg: TDDConfig.CTestConfig
b_silent: bool
str_cmakeName: str
b_infiniteRun: bool
LS_chckLFile: [str]
LS_srcL: [str]
LS_dstL: [str]
dic_chckFiles: {}
IncMockLst: []
def __init__(self, name, mainCfg, kpt):
# super(CTestPkg, self).__init__(name=name)
self.mCfg = mainCfg
self.name = name
self.str_testBinName = CS.getTestBinaryName()
self.str_step = "Created"
self.str_status = "Ready"
self.str_uncoverage = "Empty"
self.str_testStatus = "Empty"
self.str_analysis = "Empty"
self.str_complexity = "Empty"
self.path_buildFldr = Path("")
self.path_TpkgRoot = Path(self.mCfg.co_pkg.str_testpath) / self.name
self.str_TpkgRoot = str(self.path_TpkgRoot)
self.str_srcFldr = ""
self.tCfg = TDDConfig.CTestConfig()
self.b_silent = False
self.str_testType = "single"
self.str_cmakeName = ""
self.b_infiniteRun = True
self.LS_chckLFile = []
self.LS_srcL = []
self.LS_dstL = []
self.dic_chckFiles = {}
self.thread_keyPress = kpt
self.IncMockLst = []
self.DicMock = {}
self.DickCppMock = {}
self.dic_chckMockFiles = {}
self.dic_chckCppMockFiles = {}
# self.start()
def __getBuildFolderName__(self):
return self.tCfg.co_testToolchain.str_compiler + self.mCfg.co_pkg.str_buildsuffix + "_" + self.str_testType
def __getBuildFolderPath__(self):
return self.path_TpkgRoot / self.__getBuildFolderName__()
def __readInit__(self):
self.__writeStep__("Read inifile")
iniFile = self.path_TpkgRoot / self.mCfg.co_pkg.str_testcfgfilename
self.tCfg.readCfgFile(str(iniFile))
pass
def __createCmake__(self):
self.__writeStep__("Gen CMake")
# self.str_cmakeName = tdd_support.getCompilerNameInTestConfig(self.tCfg)
self.str_cmakeName = self.tCfg.co_testToolchain.str_compiler
self.str_cmakeName += "_" + self.str_testType
self.str_cmakeName += ".cmake"
path_cmakelist = Path(self.mCfg.co_pkg.str_testpath) / \
self.name / self.str_cmakeName
str_cmakelist = str(path_cmakelist)
if path_cmakelist.is_file():
try:
path_cmakelist.unlink()
except BaseException:
print('Error: removing CMakeLists.txt failed.')
self.__writeStep__("Creating CMakeLists")
cmakeGen = CS.CCMakeGenerator(str_cmakelist, self.str_testType,self.mCfg, self.tCfg )
cmakeGen.generate()
pass
def __fileCopying__(self):
self.__writeStep__("Copy files")
# copy all files and create lists
self.LS_srcL, self.LS_dstL, self.LS_chckLFile, self.DicMock, self.DicCppMock, self.IncMockLst = tdd_support.processAllFilesAndReturnListOfThem(
self.name, self.mCfg, self.tCfg, self.str_testType)
# create dictionary key is chckLFile, value status_time
self.dic_chckFiles = {
chckF: os.stat(chckF).st_mtime for chckF in self.LS_chckLFile}
self.dic_chckMockFiles = {chckF: os.stat(chckF).st_mtime for chckF in self.DicMock}
self.dic_chckCppMockFiles = {chckF: os.stat(chckF).st_mtime for chckF in self.DicCppMock}
# self.str_srcFldr
self.str_srcFldr = tdd_support.getSrcTestTempFolderName(
self.tCfg, self.mCfg, self.str_testType)
self.path_buildFldr = self.__getBuildFolderPath__()
removeDirectory(self.path_buildFldr)
self.path_buildFldr.mkdir()
pass
def __fileCopyingUpdatedOnly__(self):
locdic_chckFiles = {
chckF: os.stat(chckF).st_mtime for chckF in self.LS_chckLFile}
for id_file, str_file in enumerate(self.LS_chckLFile):
if locdic_chckFiles.get(str_file) != self.dic_chckFiles.get(str_file):
# check list could be bigger than source and dst list.
if id_file < len(self.LS_dstL):
Path(self.LS_dstL[id_file]).write_text(
Path(self.LS_srcL[id_file]).read_text())
self.dic_chckFiles = locdic_chckFiles
def __cmake__(self):
self.__writeStep__("CMake")
op_cmakeLst = []
op_cmakeLst.append("cmake")
# root folder (position of cmakefile)
op_cmakeLst.append("-S")
op_cmakeLst.append(self.str_TpkgRoot)
op_cmakeLst.append("-B")
op_cmakeLst.append(str(self.path_buildFldr))
op_cmakeLst.append("-G")
op_cmakeLst.append(CS.getGeneratorName(self.tCfg))
op_cmakeLst.append("-DTDD_FRAMEWORK_ROOT_DIR=%s" % str(Path.cwd()))
op_cmakeLst.append("-DCMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON")
if True == self.tCfg.co_debugconfig.isDebugConfigOn:
op_cmakeLst.append('-DCMAKE_BUILD_TYPE=Debug')
op_cmakeLst.append("-DCMAKELISTS_NAME=" + self.str_cmakeName)
if self.b_silent:
op_cmakeLst.append(">")
op_cmakeLst.append(str(self.path_buildFldr / "cmake.out"))
op_cmakeLst.append("2>")
op_cmakeLst.append(str(self.path_buildFldr / "cmake.err"))
# print(op_cmakeLst)
subprocess.call(op_cmakeLst, shell=True)
def __make__(self):
self.__writeStep__("Makefile")
testAppPath = self.path_buildFldr / self.str_testBinName
if testAppPath.is_file():
try:
testAppPath.unlink()
except BaseException:
with open("debug.log", "a") as log:
log.write("BaseException when trying to delete this file: "
+ testAppPath + "\n")
pass
op_makeLst = []
op_makeLst.append(CS.getMaketoolName(self.tCfg))
op_makeLst.append('-C')
op_makeLst.append(str(self.path_buildFldr))
if self.b_silent:
op_makeLst.append('>')
op_makeLst.append(str(self.path_buildFldr / "makefile.out"))
op_makeLst.append('2>')
op_makeLst.append(str(self.path_buildFldr / "makefile.err"))
subprocess.call(op_makeLst, shell=True)
if not (self.path_buildFldr / self.str_testBinName).is_file():
self.__writeStatus__("Terminated")
self.str_testStatus = "Fail"
return(False)
return(True)
def __runTestBin__(self):
bRetVal = True
testAppPath = str(self.path_buildFldr / self.str_testBinName)
outF = str(self.path_buildFldr / "testbin.out")
errF = str(self.path_buildFldr / "testbin.err")
self.__writeStep__("Test")
op_testRunLst = []
op_testRunLst.append(testAppPath)
if self.b_silent:
op_testRunLst.append(">")
op_testRunLst.append(outF)
op_testRunLst.append("2>")
op_testRunLst.append(errF)
else:
op_testRunLst.append("-v")
op_testRunLst.append("-c")
print(10*'-' + '< ' + self.name + ' >' + 10*'-' + '\n')
intRetVal = subprocess.call(op_testRunLst, shell=True)
if self.b_silent:
testResult = 0
if intRetVal <= 1 :
testResult = tdd_support.interpretCPPUTESToutput(outF)
if testResult == True:
self.str_testStatus = Fore.GREEN + "Pass" + Style.RESET_ALL
else:
self.str_testStatus = Fore.RED + "Fail" + Style.RESET_ALL
bRetVal = False
else:
if not ( intRetVal in range(0,100) ):
print(Fore.RED + '\nSomething is rotten in (Denmark) that code.')
print('Test application terminate with this error: %i' % intRetVal)
print(Style.RESET_ALL)
return(bRetVal)
def __coverage__(self, sutList: [str], silent=True):
# if tdd_support.isCoverageEnabled(self.tCfg):
if self.tCfg.co_coverage.isTurnedOn:
self.__writeStep__("Coverage")
cover_out = str(self.path_buildFldr / "coverage.out")
cover_err = str(self.path_buildFldr / "coverage.err")
covCmdLst = []
# gcovr --object-directory CMakeFiles\TestApp.dir\mingw_tmp_single -r ..\mingw_tmp_single\ -f "\.\./mingw_tmp_single/Calculator.cpp" -b --txt cov_vypis.txt --html cov\cov_html.html --html-details cov\coverage_details.html
covCmdLst.append("gcov")
covCmdLst.append("--object-directory")
covCmdLst.append(
str(Path("CMakeFiles") / "TestApp.dir" / self.str_srcFldr))
for sutCovListItem in sutList:
covCmdLst.append(str(Path(self.str_TpkgRoot, sutCovListItem)))
covCmdLst.append(">")
covCmdLst.append("coverage.out")
covCmdLst.append("2>")
covCmdLst.append("coverage.err")
# print(covCmdLst)
subprocess.call(covCmdLst, shell=True, cwd=self.path_buildFldr)
lst_file, dict_covFile = tdd_support.interpretGCOV2lists(
cover_out, self.path_buildFldr)
if not lst_file:
self.str_uncoverage = Fore.RED + "Fail" + Style.RESET_ALL
if not self.b_silent:
print(Fore.RED + "Coverage evaluation failed!" + Style.RESET_ALL)
return
# self.str_uncoverage = ""
listLines = []
# sutFileNames = [sutFileName.split(
# "\\")[-1] for sutFileName in sutList]
sutFileNames = [str(Path(sutFileName).name)
for sutFileName in sutList]
isCoverageProblem = False
for key in dict_covFile:
# if not key.contain(".hpp."):
# if dict_covFile[key] :
# print( key,"Uncovered lines: ", dict_covFile[key])
if any(sfname in key for sfname in sutFileNames):
cntUncov = len(set(dict_covFile[key]))
if cntUncov:
str_uncov = Fore.RED
isCoverageProblem = True
else:
str_uncov = Fore.GREEN
str_uncov += str(cntUncov) + Style.RESET_ALL
listLines.append(str_uncov)
# else :
# listLines.append("0")
self.str_uncoverage = ','.join(listLines)
if not self.b_silent:
print("Non covered lines: " + self.str_uncoverage)
if isCoverageProblem:
pass
# This is probably obsolete, but with coverage can be trouble with different behavior for c and c++
# print(
# "Some lines could be duplicite"
# + " because c++ create multiple implementation of functions")
for file in dict_covFile:
lineLst = list(set(dict_covFile[file]))
lineLst.sort()
# res = []
# [res.append(x) for x in lineLst if x not in res]
# lenOfLineLst = len(res)
lenOfLineLst = len(lineLst)
if lenOfLineLst != 0:
# strLineLst = ", ".join(res)
strLineLst = ", ".join(lineLst)
print(Fore.LIGHTRED_EX + '.'.join(file.split(".")[
:-1]) + " [ " + strLineLst + "]" + Style.RESET_ALL)
else:
self.str_uncoverage = Fore.YELLOW + "OFF" + Style.RESET_ALL
def __staticCheck__(self, sutList: [str]):
if self.tCfg.co_staticAnalysis.isTurnedOn:
self.__writeStep__("StaticCheck")
op_lst = []
op_lst.append("cppcheck")
op_lst.append("--enable=all")
op_lst.append("--inconclusive")
op_lst.append("--library=posix")
if not self.tCfg.co_staticAnalysis.isLanguageDefinedBySuffix:
op_lst.append("--language="
+ self.tCfg.co_staticAnalysis.str_ForcedLang)
op_lst.append("--std=" + self.tCfg.co_staticAnalysis.str_c_version)
op_lst.append(
"--std=" + self.tCfg.co_staticAnalysis.str_cpp_version)
for supp in self.tCfg.co_staticAnalysis.suppressionLst:
op_lst.append("--suppress=" + supp)
# TODO add switch for turning all to C++ file. Configurable from test.ini
# add c++ version standard
# op_statCheck += "--std=c++11 "
check_out = str(self.path_buildFldr / "cppcheck.out")
check_err = str(self.path_buildFldr / "cppcheck.err")
for sutListItem in sutList:
op_lst.append(str(self.path_buildFldr / sutListItem))
op_lst.append("-I")
op_lst.append(str(self.path_buildFldr / ".." / self.str_srcFldr))
op_lst.append(">")
op_lst.append(check_out)
op_lst.append("2>")
op_lst.append(check_err)
# print(op_lst)
subprocess.call(op_lst, shell=True)
numOfError = tdd_support.interpretCPPCHECKerrors(check_err)
if numOfError != 0:
self.str_analysis = Fore.RED
else:
self.str_analysis = Fore.GREEN
self.str_analysis += str(numOfError) + Style.RESET_ALL
if not self.b_silent:
print("Number of static check errors: ", self.str_analysis)
if numOfError:
numL = 40
print(numL*'-' + '\n')
with open(check_err, 'r') as fin:
print(fin.read())
print(numL*'-')
else:
self.str_analysis = Fore.YELLOW + "OFF" + Style.RESET_ALL
def __codeAnalysis__(self, sutList: [str]):
if self.tCfg.co_codeStatistics.isTurnedOn:
self.__writeStep__("Analysis")
lizardCsv = str(self.path_buildFldr / "lizard.csv")
lizard_out = str(self.path_buildFldr / "lizard.out")
lizard_err = str(self.path_buildFldr / "lizard.err")
# choose used parameters
if self.tCfg.co_codeStatistics.isUsedTestSpecificOnly == True:
int_McCabeCompl = self.tCfg.co_codeStatistics.int_mccabeComplex
int_FncLen = self.tCfg.co_codeStatistics.int_fncLength
int_ParCnt = self.tCfg.co_codeStatistics.int_paramCnt
else:
if self.tCfg.co_codeStatistics.isUsedStricter == True:
int_McCabeCompl = min(self.tCfg.co_codeStatistics.int_mccabeComplex, self.mCfg.co_stat.int_mccabeComplex)
int_FncLen = min(self.tCfg.co_codeStatistics.int_fncLength, self.mCfg.co_stat.int_fncLength)
int_ParCnt = min(self.tCfg.co_codeStatistics.int_paramCnt, self.mCfg.co_stat.int_paramCnt)
else:
int_McCabeCompl = self.mCfg.co_stat.int_mccabeComplex
int_FncLen = self.mCfg.co_stat.int_fncLength
int_ParCnt = self.mCfg.co_stat.int_paramCnt
op_lst = []
op_lst.append('lizard')
for sutListItem in sutList:
op_lst.append(str(self.path_buildFldr / sutListItem))
op_lst.append("-C")
op_lst.append(str(int_McCabeCompl))
op_lst.append("-L")
op_lst.append(str(int_FncLen))
op_lst.append("-a")
op_lst.append(str(int_ParCnt))
op_lst.append("-o")
op_lst.append(lizardCsv)
op_lst.append(">")
op_lst.append(lizard_out)
op_lst.append("2>")
op_lst.append(lizard_err)
subprocess.call(op_lst, shell=True)
errTab = CodeStatistics.interpretLIZARDoutfile(
lizardCsv, int_McCabeCompl, int_ParCnt, int_FncLen)
cntError = len(errTab)
if cntError:
self.str_complexity = Fore.RED
else:
self.str_complexity = Fore.GREEN
self.str_complexity += str(cntError) + Style.RESET_ALL
if not self.b_silent:
print("Number of error functions: ", self.str_complexity)
if cntError:
CodeStatistics.printLIZARDerrArrayShortAndColor(errTab,
int_McCabeCompl, int_ParCnt, int_FncLen)
else:
self.str_complexity = Fore.YELLOW + "OFF" + Style.RESET_ALL
def __codeEvaluation__(self):
sutList = tdd_support.createSutList(
self.tCfg, self.mCfg, self.str_testType)
self.__coverage__(sutList)
self.__staticCheck__(sutList)
self.__codeAnalysis__(sutList)
def __writeStatus__(self, status: str):
lStatus = status
colDict = {"Start": Fore.YELLOW, "Run": Fore.GREEN, "Idle": Fore.MAGENTA,
"Error": Fore.RED, "Failure": Fore.RED, "Terminated": Fore.LIGHTYELLOW_EX}
if lStatus in colDict:
lStatus = colDict.get(status) + lStatus + Style.RESET_ALL
else:
lStatus = Fore.CYAN + lStatus + Style.RESET_ALL
self.str_status = lStatus
def __writeStep__(self, step: str):
self.str_step = step
if not self.b_silent:
print("\n" + Fore.YELLOW + self.str_step + Style.RESET_ALL)
pass
def __checkExternalTerminationCondition__(self):
if self.thread_keyPress.isAnyKeyPressed():
return(False)
else:
return(True)
def __checkIniFileChanged__(self):
locdic_chckFiles = {
chckF: os.stat(chckF).st_mtime for chckF in self.LS_chckLFile}
if locdic_chckFiles.get(self.LS_chckLFile[-1]) == self.dic_chckFiles.get(self.LS_chckLFile[-1]):
return(False)
else:
return(True)
def __cleanStatusVariables__(self):
self.str_uncoverage = "Empty"
self.str_testStatus = "Empty"
self.str_analysis = "Empty"
self.str_complexity = "Empty"
def __cleanTmpSource__(self):
path_srcFldr = Path(self.mCfg.co_pkg.str_testpath) / self.name / tdd_support.getSrcTestTempFolderName(
self.tCfg, self.mCfg, self.str_testType)
removeDirectory(path_srcFldr)
def __checkSrcFileChanged__(self):
locdic_chckFiles = {
chckF: os.stat(chckF).st_mtime for chckF in self.LS_chckLFile}
for str_file in self.LS_chckLFile:
if locdic_chckFiles.get(str_file) != self.dic_chckFiles.get(str_file):
return(True)
return(False)
def __cleanScreenBeforeRerun__(self):
if not self.b_silent:
tdd_support.clear()
def __automocks__(self):
pass
def __updateAutomocks__(self):
# self.dic_chckMockFiles (check and process)
self.dic_chckMockFiles = self.checkAndUpdateCheckMockDictionary(
self.dic_chckMockFiles,
self.DicMock,
self.IncMockLst,
False)
# self.dic_chckCppMockFiles (check and process)
self.dic_chckCppMockFiles = self.checkAndUpdateCheckMockDictionary(
self.dic_chckCppMockFiles,
self.DicCppMock,
self.IncMockLst,
True)
def checkAndUpdateCheckMockDictionary(self, dic_chck, dic_mock, lst_mockInc, fCpp):
hMockLst = list(dic_chck.keys())
hMockLst.sort()
locdic_chckFiles = {
chckF: os.stat(chckF).st_mtime for chckF in hMockLst}
for key in dic_chck:
if locdic_chckFiles.get(key) != dic_chck.get(key):
#NOTE: header file copy is not neccesary it happend in loop above
#create new mock
print({key:dic_mock.get(key)})
tdd_support.createAutomocks({key:dic_mock.get(key)},
lst_mockInc,
forcedCpp=fCpp)
return locdic_chckFiles
def __runTest__(self):
b_buildStatus = False
self.__writeStep__("Start")
self.__writeStatus__("Run")
self.__readInit__()
self.__createCmake__()
self.__cleanTmpSource__()
self.__fileCopying__()
self.__automocks__()
self.__cmake__()
while True:
b_buildStatus = self.__make__()
if b_buildStatus is True:
b_buildStatus = self.__runTestBin__()
if b_buildStatus is True:
self.__codeEvaluation__()
self.__writeStatus__("Idle")
else:
self.__writeStatus__("Error")
self.__writeStep__("Finished")
while True:
time.sleep(1)
self.b_infiniteRun = self.__checkExternalTerminationCondition__()
if not self.b_infiniteRun:
return
if self.__checkIniFileChanged__():
self.__cleanStatusVariables__()
return
if self.__checkSrcFileChanged__():
self.__cleanStatusVariables__()
self.__fileCopyingUpdatedOnly__()
self.__updateAutomocks__()
self.__cleanScreenBeforeRerun__()
break
def run(self):
while self.b_infiniteRun:
self.__runTest__()
self.__writeStep__("Finished")
self.__writeStatus__("Terminated")
# print("\n",self.name, " Finished!")
class CTestPkgThread(CTestPkg, threading.Thread):
def __init__(self, name, mainCfg, kpt):
threading.Thread.__init__(self, name=name)
CTestPkg.__init__(self, name=name, mainCfg=mainCfg, kpt=kpt)
self.b_silent = True
self.str_testType = "summary"
self.start()
class CTestPkgThreadMinimal(CTestPkg, threading.Thread):
def __init__(self, name, mainCfg, kpt):
threading.Thread.__init__(self, name=name)
CTestPkg.__init__(self, name=name, mainCfg=mainCfg, kpt=kpt)
self.b_silent = True
self.str_testType = "minimal"
self.start()
if __name__ == "__main__":
mainConfig = TDDConfig.CMainConfig("project.ini")
os.environ['PATH'] += ";" + mainConfig.co_env.str_cmake
os.environ['PATH'] += ";" + mainConfig.co_env.str_mingw
os.environ['PATH'] += ";" + mainConfig.co_env.str_cppcheck
scrP = "C:\\Users\\z003ukaz\\AppData\\Local"
scrP += "\\Programs\\Python\\Python39\\Scripts"
os.environ['PATH'] += ";" + scrP
str_testPackagesPath = mainConfig.co_pkg.str_testpath
str_testPackageSuffix = mainConfig.co_pkg.str_testfldr_suffix
len_tpkgS = len(str_testPackageSuffix)
path_dir = Path(str_testPackagesPath).glob("*_Tpkg")
dir_lst = [folder for folder in path_dir if folder.is_dir()]
lst_testDir = []
for dir in dir_lst:
strdir = str(dir.stem)
if (strdir[-len_tpkgS:] == str_testPackageSuffix):
lst_testDir.append(strdir)
debug(lst_testDir, mainConfig)
debug(lst_testDir, mainConfig)
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
parse.go | // parse.go
//
// Copyright © 2018 by Ollivier Robert <[email protected]>
package proxy // import "github.com/keltia/proxy"
import (
"bufio"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"strings"
"golang.org/x/net/http/httpproxy"
)
const (
proxyTag = "proxy"
// MyVersion is our API Version
MyVersion = "0.9.5"
// MyName is the library name
MyName = "proxy"
)
var (
ctx Context
)
type Context struct {
proxyauth string
level int
Log *log.Logger
}
func init() {
// Default is stderr
ctx = Context{Log: log.New(os.Stderr, "", log.LstdFlags)}
}
// ErrNoAuth is just to say we do not use auth for proxy
var ErrNoAuth = fmt.Errorf("no proxy auth")
func SetupProxyAuth() (proxyauth string, err error) {
// Try to load $HOME/.netrc or file pointed at by $NETRC
user, password := loadNetrc()
if user != "" {
verbose("Proxy user %s found.", user)
}
err = ErrNoAuth
// Do we have a proxy user/password?
if user != "" && password != "" {
auth := fmt.Sprintf("%s:%s", user, password)
proxyauth = "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
err = nil
// Store copy in context
ctx.proxyauth = proxyauth
}
return
}
// SetLevel change the log level (currently 0, 1 or 2)
func SetLevel(level int) {
ctx.level = level
debug("logging level set to %d", level)
}
// SetLog allow to change the default logger
func SetLog(l *log.Logger) {
ctx.Log = l
debug("logging logger set to %v", l)
}
// GetAuth returns the proxyauth encoded string
func GetAuth() string {
debug("returns cached credentials")
return ctx.proxyauth
}
// SetupTransport is the way to have a custom http client
func SetupTransport(str string) (*http.Request, *http.Transport) {
/*
Proxy code taken from https://github.com/LeoCBS/poc-proxy-https/blob/master/main.go
*/
myurl, err := url.Parse(str)
if err != nil {
log.Printf("error parsing %s: %v", str, err)
return nil, nil
}
req, err := http.NewRequest("GET", str, nil)
if err != nil {
debug("error: req is nil: %v", err)
return nil, nil
}
req.Header.Set("Host", myurl.Host)
req.Header.Add("User-Agent", fmt.Sprintf("%s/%s", MyName, MyVersion))
// Get proxy URL
proxyURL := getProxy(req)
if ctx.proxyauth != "" {
req.Header.Add("Proxy-Authorization", ctx.proxyauth)
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyURL),
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
ProxyConnectHeader: req.Header,
}
debug("transport=%#v", transport)
return req, transport
}
// Version should be obvious
func Version() string {
return MyVersion
}
// Private functions
func getProxy(req *http.Request) (uri *url.URL) {
config := httpproxy.FromEnvironment()
f := config.ProxyFunc()
uri, _ = f(req.URL)
return
}
// loadNetrc supports a subset of the original ftp(1) .netrc file.
/*
We support:
machine
default
login
password
Format:
machine <host> login <user> password <pass>
*/
func loadNetrc() (user, password string) {
var dnetrc string
// is $NETRC defined?
dnetVar := os.Getenv("NETRC")
// Allow override
if dnetVar == "" {
dnetrc = netrcFile
} else {
dnetrc = dnetVar
}
if dnetrc == "ignore" {
return "", ""
}
verbose("NETRC=%s", dnetrc)
// First check for permissions
fh, err := os.Open(dnetrc)
if err != nil {
verbose("warning: can not find/read %s: %v", dnetrc, err)
return "", ""
}
defer fh.Close()
// Now check permissions
st, err := fh.Stat()
if err != nil {
verbose("unable to stat: %v", err)
return "", ""
}
if (st.Mode() & 077) != 0 {
verbose("invalid permissions, must be 0400/0600")
return "", ""
}
verbose("now parsing")
user, password = parseNetrc(fh)
return
}
/*
Format:
machine proxy|default login <user> password <pass>
*/
func parseNetrc(r io.Reader) (user, password string) {
verbose("found netrc")
s := bufio.NewScanner(r)
for s.Scan() {
line := s.Text()
if line == "" {
break
}
flds := strings.Split(line, " ")
debug("%s: %d fields", line, len(flds))
if flds[0] != "machine" {
verbose("machine is not the first word")
continue
}
// Check what we need
if len(flds) != 6 {
verbose("bad format")
continue
}
if flds[1] == proxyTag || flds[1] == "default" {
if flds[2] == "login" && flds[4] == "password" {
user = flds[3]
password = flds[5]
verbose("got %s/default entry for user %s", proxyTag, user)
}
break
}
}
if err := s.Err(); err != nil {
verbose("error reading netrc: %v", err)
return "", ""
}
debug("nothing found for %s", proxyTag)
if user == "" {
verbose("no user/password for %s/default in netrc", proxyTag)
}
return
}
| [
"\"NETRC\""
] | [] | [
"NETRC"
] | [] | ["NETRC"] | go | 1 | 0 | |
python/paddle/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
from paddle import fluid
import os
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
class TestFleetLambMetaOptimizer(unittest.TestCase):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "1"
os.environ[
"PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002"
def net(self, main_prog, startup_prog):
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32')
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64')
fc_1 = paddle.fluid.layers.fc(input=input_x,
size=64,
act='tanh')
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh')
prediction = paddle.fluid.layers.fc(input=[fc_2],
size=2,
act='softmax')
cost = paddle.fluid.layers.cross_entropy(
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
return avg_cost, strategy
def test_lamb_optimizer(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lamb', ops)
def test_lamb_not_apply_with_momentum(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=0.1, momentum=0.9)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertNotIn('lamb', ops)
def test_lamb_exclude_fn(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': ['.b_0'],
}
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops_without_wd = [
op for op in avg_cost.block.ops
if op.type == 'lamb' and op.attr('op_role_var')[0].endswith('.b_0')
]
for op in ops_without_wd:
self.assertEqual(op.attr('weight_decay'), 0)
def test_lamb_apply_with_amp(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32')
input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64')
fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh')
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh')
prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax')
cost = paddle.fluid.layers.cross_entropy(
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {
"init_loss_scaling": 32768,
"decr_every_n_nan_or_inf": 2,
"incr_every_n_steps": 1000,
"incr_ratio": 2.0,
"use_dynamic_loss_scaling": True,
"decr_ratio": 0.5,
"custom_white_list": ['softmax'],
"custom_black_list": ['tanh'],
}
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lamb', ops)
self.assertIn('cast', ops)
self.assertIn('isfinite', ops)
if __name__ == "__main__":
unittest.main()
| [] | [] | [
"PADDLE_TRAINER_ID",
"PADDLE_TRAINER_ENDPOINTS"
] | [] | ["PADDLE_TRAINER_ID", "PADDLE_TRAINER_ENDPOINTS"] | python | 2 | 0 | |
main.go | /*
Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"os"
"cloud.google.com/go/compute/metadata"
"github.com/GoogleCloudPlatform/gke-autoneg-controller/controllers"
"google.golang.org/api/compute/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = corev1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.Parse()
ctrl.SetLogger(zap.Logger(true))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s, err := compute.NewService(ctx)
if err != nil {
setupLog.Error(err, "can't request Google compute service")
os.Exit(1)
}
project := getProject()
if project == "" {
setupLog.Error(err, "can't determine project ID")
os.Exit(1)
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&controllers.ServiceReconciler{
Client: mgr.GetClient(),
BackendController: controllers.NewBackendController(project, s),
Recorder: mgr.GetEventRecorderFor("autoneg-controller"),
Log: ctrl.Log.WithName("controllers").WithName("Service"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Service")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
func getProject() string {
// probe metadata service for project, or fall back to PROJECT_ID in environment
p, err := metadata.ProjectID()
if err == nil {
return p
}
return os.Getenv("PROJECT_ID")
}
| [
"\"PROJECT_ID\""
] | [] | [
"PROJECT_ID"
] | [] | ["PROJECT_ID"] | go | 1 | 0 | |
code/SSGP_classifier.py | import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import tensorflow as tf
import common_funcs
from common_funcs import FLOAT_TYPE
import data_loader
from sklearn.cluster import KMeans
import time
import joblib as jb
# Streaming Sparse Gaussian Tensor Decomposition
# By ZMP
import sys
#run as
print("usage : python *.py rank=3 batch_size=256 dataset=dblp")
print('start')
print( sys.argv)
#parse args
py_name = sys.argv[0]
args = sys.argv[1:]
args_dict = {}
for arg_pair in args:
arg, val_str = arg_pair.split( '=')
args_dict[ arg] = val_str.strip()
arg_rank = int( args_dict['rank'])
arg_data_name = args_dict['dataset']
arg_batch_size = int( args_dict['batch_size'])
class SSGP_CLF:
def __init__(self, init_config):
#Model configuration parameters
self.num_pseudo_points = init_config['num_pseudo_points']
self.rank = init_config['rank']
self.init_method = init_config['init_method']
self.elem_sizes = init_config['elem_sizes'] # list, number of elements( users, items, ...)
self.learning_rate = init_config['learning_rate']
self.N_data_points = init_config['N_data_points']
if 'saved_model' in init_config:
saved_model = init_config['saved_model']
self.init_mu_U = saved_model['mu_U']
self.init_std_vec_U = saved_model['std_vec_U']
self.fix_U = True
else:
self.fix_U = False
self.num_mods = len( self.elem_sizes)
self.num_factors = np.sum( self.elem_sizes)
self.rank_psd_input = self.num_mods * self.rank # Will be different if use neural kernel
self.tf_initializer = common_funcs.get_initializer(self.init_method, args = None)
#Parameters
self.PARAS_SCOPE_NAME = 'PARAS'
with tf.variable_scope( self.PARAS_SCOPE_NAME):
if self.fix_U:
self.tf_mu_U = [ tf.constant( self.init_mu_U[i], dtype = FLOAT_TYPE) for i in range( self.num_mods)]
self.tf_std_vec_U = [ tf.constant( self.init_std_vec_U[i], dtype=FLOAT_TYPE) for i in range( self.num_mods)]
else:
#Embeddings initialized by default initlizer
self.tf_mu_U = [tf.Variable(np.random.randn( num_elem, self.rank) * 1.0, dtype=FLOAT_TYPE) for num_elem in self.elem_sizes]
self.tf_std_vec_U = [ tf.Variable( np.ones( shape = [ num_elem, self.rank]) * 0.1, dtype=FLOAT_TYPE) for num_elem in self.elem_sizes] #var = diag( std * std )
self.B_init_holder = tf.placeholder( dtype=FLOAT_TYPE, shape=[ self.num_pseudo_points, self.rank_psd_input])
self.tf_B = tf.Variable( initial_value=self.B_init_holder)
self.tf_post_mu_b = tf.Variable(tf.random.normal( shape = [self.num_pseudo_points, 1], dtype=FLOAT_TYPE), dtype=FLOAT_TYPE)
self.tf_post_Ltrig_b = tf.linalg.band_part(tf.Variable(np.eye( self.num_pseudo_points), dtype=FLOAT_TYPE), -1, 0)
#Kernel parameters. ARD
self.tf_log_lengthscale = tf.Variable(np.zeros(shape = [self.rank_psd_input, 1]), dtype=FLOAT_TYPE)
self.tf_log_amp = tf.Variable(0.0, dtype=FLOAT_TYPE)
#Place holders
self.batch_inds = tf.placeholder(dtype=tf.int32, shape=[None, self.num_mods])
self.batch_rates = tf.placeholder(dtype=FLOAT_TYPE, shape=[None, ])
self.batch_uniq_fac_inds = [tf.placeholder( dtype=tf.int32,shape= [None,] ) for _ in range( self.num_mods)]
#Old values. Be aware, Complicated logic here. Becareful to modify.
self.mu_b_old_ori = tf.Variable( np.zeros( shape=[self.num_pseudo_points,1]), dtype=FLOAT_TYPE)
self.mu_b_old = tf.stop_gradient(self.mu_b_old_ori )
self.Ltrig_b_old_ori_init_holder = tf.placeholder( dtype=FLOAT_TYPE, shape=[ self.num_pseudo_points, self.num_pseudo_points])
self.Ltrig_b_old_ori = tf.Variable(self.Ltrig_b_old_ori_init_holder , dtype=FLOAT_TYPE)
self.Ltrig_b_old = tf.stop_gradient( self.Ltrig_b_old_ori)
self.Kmm_old_ori = tf.Variable( np.zeros( shape = [ self.num_pseudo_points, self.num_pseudo_points]), dtype=FLOAT_TYPE)
self.Kmm_old = tf.stop_gradient( self.Kmm_old_ori)
self.mu_U_old_ori = [ tf.Variable( np.zeros( shape = [ num_elem, self.rank]), dtype=FLOAT_TYPE) for num_elem in self.elem_sizes]
self.mu_U_old = [ tf.stop_gradient( self.mu_U_old_ori[k]) for k in range( self.num_mods)]
self.std_vec_U_old_ori = [tf.Variable(np.zeros(shape = [num_elem, self.rank]), dtype=FLOAT_TYPE) for num_elem in self.elem_sizes]
self.std_vec_U_old = [tf.stop_gradient( self.std_vec_U_old_ori[k]) for k in range( self.num_mods)]
self.var_normal_params_old_ori = tf.Variable(np.array([0, 1]), dtype=FLOAT_TYPE)
self.var_normal_params_old = tf.stop_gradient(self.var_normal_params_old_ori)
self.assign_old_values_op = [tf.assign( self.mu_b_old_ori, self.tf_post_mu_b), tf.assign( self.Ltrig_b_old_ori, self.tf_post_Ltrig_b)]
self.assign_old_values_op = self.assign_old_values_op + [ tf.assign( self.mu_U_old_ori[k], self.tf_mu_U[k]) for k in range( self.num_mods)] + \
[tf.assign(self.std_vec_U_old_ori[k], self.tf_std_vec_U[k]) for k in range( self.num_mods)]
self.sub_batch_size = self.N_data_points
#sample posterior embeddings
sampled_embeddings, self.batch_mean, self.batch_std_vec = common_funcs.sample_embeddings( self.tf_mu_U, self.tf_std_vec_U, self.batch_inds, return_batch_info= True)
self.sampled_X = tf.concat( sampled_embeddings, axis=1)
'''
Some neural kernel transform here if using neural kernel
'''
self.Kmm = common_funcs.kernel_cross_tf(self.tf_B, self.tf_B, self.tf_log_amp, self.tf_log_lengthscale)# + MATRIX_JITTER * tf.eye( self.num_pseudo_points)
self.Knm = common_funcs.kernel_cross_tf(self.sampled_X, self.tf_B, self.tf_log_amp, self.tf_log_lengthscale)
self.assign_old_values_op.append( tf.assign( self.Kmm_old_ori, self.Kmm))
post_sample_f, f_std = common_funcs.sample_sparse_f( self.tf_post_mu_b, self.tf_post_Ltrig_b, self.Kmm, self.Knm, self.tf_log_amp, return_std=True) #[batch_size, 1]
self.post_sample_f = tf.reshape(post_sample_f, shape=[-1]) # [ batch_size,]
#MLE sample of f. Used in prediction
self.f_mle = tf.reshape( self.Knm @ tf.linalg.solve( self.Kmm, self.tf_post_mu_b), shape=[-1])
self.f_std = tf.reshape( f_std, shape = [-1])
self.data_fidelity = self.sub_batch_size * tf.reduce_mean(- tf.nn.sigmoid_cross_entropy_with_logits( labels=self.batch_rates, logits=self.post_sample_f))
# KL U
# Note this is biased, because uniformly sampling from rating is not equivalent to uniformly sampling from factors
uniq_mu_U = common_funcs.get_uniq_factors(self.tf_mu_U, self.batch_uniq_fac_inds)
uniq_std_vec_U = common_funcs.get_uniq_factors(self.tf_std_vec_U, self.batch_uniq_fac_inds)
uniq_mu_U_old = common_funcs.get_uniq_factors( self.mu_U_old, self.batch_uniq_fac_inds)
uniq_std_vec_U_old = common_funcs.get_uniq_factors( self.std_vec_U_old, self.batch_uniq_fac_inds)
self.batch_KL_U = common_funcs.KL_Gaussian_std_vec_tf(tf.concat(uniq_mu_U, axis=0),
tf.concat(uniq_std_vec_U, axis=0),
tf.concat(uniq_mu_U_old, axis=0),
tf.concat(uniq_std_vec_U_old, axis=0), self.rank)
self.KL_U = self.batch_KL_U
# KL( q(b)|| p(b))
self.KL_q_pb_new = common_funcs.KL_pseudo_output(self.Kmm, self.tf_post_Ltrig_b, self.tf_post_mu_b,
self.num_pseudo_points)
# KL( q(b) || q(b)_old)
self.KL_q_qb_old = common_funcs.KL_Gaussian_Ltrig_tf( self.tf_post_mu_b, self.tf_post_Ltrig_b, self.mu_b_old, self.Ltrig_b_old, self.num_pseudo_points)
# KL ( q(b) || p(b)_old)
self.KL_q_pb_old = common_funcs.KL_pseudo_output( self.Kmm_old, self.tf_post_Ltrig_b, self.tf_post_mu_b,self.num_pseudo_points)
self.KL_b = self.KL_q_qb_old + self.KL_q_pb_new - self.KL_q_pb_old
# Loss functions
self.ELBO = self.data_fidelity - self.KL_b - self.KL_U
#Session settings
self.min_opt = tf.train.AdamOptimizer(self.learning_rate)
self.min_step = self.min_opt.minimize(- self.ELBO)
self.train_hist = []
# GPU settings
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
#Pre-initialize pseudo input
self.sess.run( tf.global_variables_initializer(), feed_dict={ self.B_init_holder : np.random.randn(self.num_pseudo_points, self.rank_psd_input),
self.Ltrig_b_old_ori_init_holder : np.random.randn( self.num_pseudo_points, self.num_pseudo_points)} )
self.is_B_initialized = False
def _get_init_pseudo_input(self, inds):
max_num_input_points = self.num_pseudo_points * 100
if len( inds) > max_num_input_points:
arg_random = np.random.permutation( len( inds))
inds = inds[ arg_random[ : max_num_input_points]]
X = self.sess.run( self.sampled_X, feed_dict={ self.batch_inds : inds})
kmeans = KMeans( n_clusters = self.num_pseudo_points, n_jobs=-1)
_ = kmeans.fit(X)
return kmeans.cluster_centers_
def _fit(self, inds, rates, batch_size, num_iters_per_batch, print_every_by_iters):
num_batches = int( len( inds / batch_size))
self.batch_X_y_gnrt = common_funcs.DataGenerator(inds, rates, shuffle=True)
for n_batch in range( 1, num_batches + 1):
batch_inds, batch_rates = self.batch_X_y_gnrt.draw_next(batch_size)
self.fit_batch( batch_inds, batch_rates, num_iters_per_batch, print_every = print_every_by_iters)
def fit_batch(self, inds, rates, steps, print_every = 100, clean_hist = True, verbose = True ):
start_time = time.time()
if not self.is_B_initialized:
# Initialized model using
print('Re-initializing B using Kmeans')
cluster_centers = self._get_init_pseudo_input( inds)
self.sess.run( self.tf_B.initializer, feed_dict = { self.B_init_holder : cluster_centers})
self.is_B_initialized = True
# update old posteriors and hyper-parameters
_ = self.sess.run(self.assign_old_values_op)
init_Kmm = self.sess.run(self.Kmm)
L = np.linalg.cholesky(init_Kmm)
self.sess.run(self.Ltrig_b_old_ori, feed_dict={self.Ltrig_b_old_ori_init_holder: L})
print("Re-initializing Done")
if clean_hist:
self.train_hist = []
# Get unique inds
uniq_inds = [np.unique(inds[:, k]) for k in range(self.num_mods)]
for step in range( 1, steps + 1):
# Get unique inds
train_feed = {self.batch_inds: inds, self.batch_rates: rates}
for k in range( self.num_mods):
train_feed[ self.batch_uniq_fac_inds[k]] = uniq_inds[k]
ELBO, sampled_f, data_fidelity,KL_U, KL_b, batch_U_mean, batch_U_std_vec, _ = self.sess.run( [
self.ELBO, self.post_sample_f, self.data_fidelity,self.KL_U,self.KL_b, self.batch_mean, self.batch_std_vec, self.min_step], feed_dict= train_feed, options= self.run_options)
self.train_hist.append( ELBO)
if step % print_every == 0 and verbose:
print('true_rates: ', rates[:5])
print('sampled logits: ', sampled_f[:5])
print('sampled rates: ', (sampled_f[:5] >= 0).astype(np.float32))
train_auc = common_funcs.metrics_auc( rates, sampled_f )
print( '\nstep = %d, ELBO = %g, data_fidelity = %g, -KL_U = %g, -KL_b = %g, train_auc = %g' % ( step, ELBO,data_fidelity, -KL_U, -KL_b,train_auc))
# update old posteriors and hyper-parameters
_ = self.sess.run(self.assign_old_values_op)
end_time = time.time()
if verbose:
print( 'secs_per_entry = %e' % (( end_time - start_time)/ len( inds)))
return self
def predict_log_llk(self, inds, y, batch_size):
N = len( inds)
test_llk = []
start_idx = 0
end_idx = start_idx + batch_size
while( start_idx < N):
end_idx = min( end_idx, N)
batch_inds = inds[ start_idx : end_idx]
batch_y = y[ start_idx : end_idx]
test_feed = { self.batch_inds : batch_inds}
batch_f, batch_f_std = self.sess.run( [ self.f_mle, self.f_std], feed_dict=test_feed)
f_var = batch_f_std ** 2
kappa = np.sqrt( 1 + np.pi * f_var/ 8)
p = common_funcs.sigmoid( kappa * batch_f)
llk = batch_y * np.log( p) + ( 1 - batch_y) * np.log( 1 - p)
test_llk.append(llk)
start_idx += batch_size
end_idx = start_idx + batch_size
test_llk = np.concatenate( test_llk)
assert len( test_llk) == N, "Dims not match"
return test_llk
def _batch_wise_predict(self, inds, batch_size, return_logits):
y_pred_logits = []
N = len(inds)
start_idx = 0
end_idx = start_idx + batch_size
while (start_idx < N):
end_idx = min(end_idx, N)
batch_inds = inds[start_idx:end_idx]
test_feed = {self.batch_inds: batch_inds}
batch_y = self.sess.run(self.f_mle, feed_dict=test_feed)
y_pred_logits.append(batch_y)
start_idx += batch_size
end_idx = start_idx + batch_size
y_pred_logits = np.concatenate(y_pred_logits)
y_pred = ( y_pred_logits >= 0).astype( np.float32)
assert len(y_pred_logits) == N, "prediction length not match"
if return_logits:
return y_pred, y_pred_logits
else:
return y_pred
def predict(self, inds, batch_size=None, return_logits = False):
if batch_size is not None:
return self._batch_wise_predict(inds, batch_size, return_logits)
else:
test_feed = {self.batch_inds: inds}
y_pred_logits = self.sess.run(self.f_mle, feed_dict=test_feed)
y_pred = (y_pred_logits >= 0).astype( np.float32)
if return_logits:
return y_pred, y_pred_logits
else:
return y_pred
def main():
assert arg_data_name in ['dblp','anime'], 'Wrong data name %s' % (arg_data_name)
if arg_data_name == 'dblp':
data = data_loader.load_dblp()
elif arg_data_name == 'anime':
data = data_loader.load_anime_binary( )
else:
raise NameError('wrong data set: %s' % arg_data_name)
train_inds = data['train_X']
train_rates = data['train_y']
test_inds = data['test_X']
test_rates = data['test_y']
data_name = data['name']
elem_sizes = data['elem_sizes']
N_train = len( train_rates)
N_test = len(test_rates)
print('elem size:', elem_sizes)
print('pseudo N train = %d, true N train = %d' % (N_train, len(train_rates)))
print("N train = %d, N test = %d" % (N_train, N_test))
print('mods = ', elem_sizes)
# np.random.seed(47)
# tf.random.set_random_seed( 47)
#parameters settings--------------
batch_size = arg_batch_size
num_iters_per_batch = 100
# init U
init_config = {
'elem_sizes': elem_sizes,
'learning_rate': 1e-3,
'init_method': 'he_normal',
'rank': arg_rank,
'num_pseudo_points': 128,
'batch_size': batch_size,
'num_iters_per_batch': num_iters_per_batch,
'N_data_points': N_train,
'init_batch_size' : 2048
}
#end parameters settings----------
if 'USER' in os.environ:
user_name = os.environ['USER']
else:
user_name = os.environ['USERNAME']
log_file = common_funcs.init_log_file('ssgp_classifier_by_%s.txt' % user_name, data_name, init_config)
init_config['log_file'] = log_file
model = SSGP_CLF(init_config)
num_batches = int(len(train_inds) / batch_size)
print("num train = %d, num test = %d, batch_size = %d, num batches = %d" % (
len(train_inds), len(test_inds), batch_size, num_batches))
batch_X_y_gnrt = common_funcs.DataGenerator(train_inds, train_rates, shuffle=True)
batch_inds, batch_rates = batch_X_y_gnrt.draw_next(init_config['init_batch_size'])
model.fit_batch(batch_inds, batch_rates, num_iters_per_batch, print_every=20, verbose=True)
for n_batch in range(1, num_batches + 1):
batch_inds, batch_rates = batch_X_y_gnrt.draw_next(batch_size)
verbose = n_batch % int(num_batches / 20) == 0
model.fit_batch(batch_inds, batch_rates, steps=num_iters_per_batch, verbose=verbose,print_every=50)
if verbose:
y_pred, logtis_pred = model.predict(test_inds, return_logits=True, batch_size = 1024)
acc = common_funcs.metrics_accuracy(test_rates, y_pred)
auc = common_funcs.metrics_auc(test_rates, logtis_pred)
test_llk = model.predict_log_llk( test_inds, test_rates, batch_size=1024)
ave_test_llk = np.average( test_llk)
print("\nbatch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\n" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))
log_file.write("batch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\n" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))
log_file.flush()
os.fsync(log_file.fileno())
y_pred, logtis_pred = model.predict(test_inds, return_logits=True, batch_size = 1024)
acc = common_funcs.metrics_accuracy(test_rates, y_pred)
auc = common_funcs.metrics_auc(test_rates, logtis_pred)
test_llk = model.predict_log_llk(test_inds, test_rates, batch_size=1024)
ave_test_llk = np.average(test_llk)
print("\nbatch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\n" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))
log_file.write("batch = %d, progress = %4.3g, test_acc = %g, test_auc = %g, ave_llk = %g\n" % ( n_batch, n_batch / num_batches * 100, acc, auc, ave_test_llk))
log_file.close()
if __name__ == '__main__':
main()
| [] | [] | [
"CUDA_VISIBLE_DEVICES",
"USERNAME",
"USER"
] | [] | ["CUDA_VISIBLE_DEVICES", "USERNAME", "USER"] | python | 3 | 0 | |
start.py | from telethon import TelegramClient, events, Button
import requests
import sqlite3
from headers import headers
import urls
import os
#from flask import request
client = TelegramClient('anfghohn', int(os.environ.get("APP_ID" )), os.environ.get("API_HASH")).start(bot_token= os.environ.get("TG_BOT_TOKEN"))
@client.on(events.NewMessage(pattern='/start'))
async def handler(event):
chat = await event.get_chat()
await client.send_message(chat,"""嗨,这是Player Video Uploader Bot.您可以使用我制作任何Zee5或MX Player视频的Streamable Link.""")
@client.on(events.NewMessage(pattern='/help'))
async def handler(event):
chat = await event.get_chat()
await client.send_message(chat,"""嗨,这是** Zee5 **和** MX Player **视频下载器和上传器Bot.您可以使用我制作任何Zee5或MX Player视频的Streamable Link.只需从Zee5或MX Player复制视频链接并将其发送给我,我将对其进行加载并将其发送给您.""")
@client.on(events.NewMessage(pattern='(?i)https://www.zee5.com'))
async def handler(event):
link =event.text.split('/')[-1]
chat = await event.get_chat()
w =link
markup = client.build_reply_markup(Button.url("https://www.zee5.com/tvshows/details/sembaruthi/0-6-675/sembaruthi-november-18-2020/0-1-manual_7adlhget67b0"+link))
req1 = requests.get(urls.token_url1, headers=headers).json()
req2 = requests.get(urls.platform_token).json()["token"]
headers["X-Access-Token"] = req2
req3 = requests.get(urls.token_url2, headers=headers).json()
r1 = requests.get(urls.search_api_endpoint + w,headers=headers, params={"translation":"en", "country":"IN"}).json()
g1 = (r1["hls"][0].replace("drm", "hls") + req1["video_token"])
# await client.send_file(chat,r1["image_url"],caption = r1["title"])
markup = client.build_reply_markup(Button.url("Transloaded Link",urls.stream_baseurl+g1))
await client.send_message(chat, "Zee5 Link Transloaded! \n\n"+"**Video Title:** "+r1["title"]+" \n**Video Description:** "+r1["description"],file=r1["image_url"], buttons=markup)
#rgx = w
# await client.send_message(chat, g1)
#await client.send_message(chat,"445")
@client.on(events.NewMessage(pattern='(?i)https://www.mxplayer.in'))
async def handler(event):
link =event.text.split('/')[-1]
video_d = "https://llvod.mxplay.com/"
A =requests.get("https://api.mxplay.com/v1/web/detail/video?type=movie&id="+link+"&platform=com.mxplay.desktop&device-density=2&userid=30bb09af-733a-413b-b8b7-b10348ec2b3d&platform=com.mxplay.mobile&content-languages=hi,en,ta").json()
#A =requests.get("https://api.mxplay.com/v1/web/detail/video?type=movie&id="+link+"&platform=com.mxplay.desktop&device-density=2&userid=30bb09af-733a-413b-b8b7-b10348ec2b3d&platform=com.mxplay.mobile&content-languages=hi,en,ta").json()
chat = await event.get_chat()
markup = client.build_reply_markup(Button.url("Transloaded Link",video_d+A["stream"]['hls']['high']))
await client.send_message(chat,"Title: "+A["title"],buttons=markup)
print(A)
print(link)
@client.on(events.NewMessage(pattern='(?i)https://www.hotstar.com/in/'))
async def handler(event):
link =event.text
print(link)
#import youtube_dl
ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'})
with ydl:
result = ydl.extract_info(
link,
download=True # We just want to extract the info
)
await client.send_message(chat,result)
@client.on(events.NewMessage(pattern='(?i)/ls'))
async def handler(event):
link =event.text.split(" ")[1]
e = os.listdir(link)
chat = await event.get_chat()
c = "|"
#str1.join(s)
#print(c)
await client.send_message(chat,c.join(e))
@client.on(events.NewMessage(pattern='(?i)sm'))
async def handler(event):
link =event.text.split(" ")[1]
print(link)
chat = await event.get_chat()
await client.send_file(chat, '/Download'+link,force_document=True)
client.start()
client.run_until_disconnected()
| [] | [] | [
"API_HASH",
"TG_BOT_TOKEN",
"APP_ID\""
] | [] | ["API_HASH", "TG_BOT_TOKEN", "APP_ID\""] | python | 3 | 0 | |
plugins/inputs/tail/tail_test.go | package tail
import (
"io/ioutil"
"os"
"runtime"
"testing"
"github.com/lavaorg/telex/plugins/parsers"
"github.com/lavaorg/telex/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTailFromBeginning(t *testing.T) {
if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
t.Skip("Skipping CI testing due to race conditions")
}
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
_, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n")
require.NoError(t, err)
tt := NewTail()
tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()}
tt.SetParserFunc(parsers.NewInfluxParser)
defer tt.Stop()
defer tmpfile.Close()
acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc))
require.NoError(t, acc.GatherError(tt.Gather))
acc.Wait(1)
acc.AssertContainsTaggedFields(t, "cpu",
map[string]interface{}{
"usage_idle": float64(100),
},
map[string]string{
"mytag": "foo",
"path": tmpfile.Name(),
})
}
func TestTailFromEnd(t *testing.T) {
if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
t.Skip("Skipping CI testing due to race conditions")
}
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
_, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n")
require.NoError(t, err)
tt := NewTail()
tt.Files = []string{tmpfile.Name()}
tt.SetParserFunc(parsers.NewInfluxParser)
defer tt.Stop()
defer tmpfile.Close()
acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc))
for _, tailer := range tt.tailers {
for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() {
// wait for tailer to jump to end
runtime.Gosched()
}
}
_, err = tmpfile.WriteString("cpu,othertag=foo usage_idle=100\n")
require.NoError(t, err)
require.NoError(t, acc.GatherError(tt.Gather))
acc.Wait(1)
acc.AssertContainsTaggedFields(t, "cpu",
map[string]interface{}{
"usage_idle": float64(100),
},
map[string]string{
"othertag": "foo",
"path": tmpfile.Name(),
})
assert.Len(t, acc.Metrics, 1)
}
func TestTailBadLine(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
tt := NewTail()
tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()}
tt.SetParserFunc(parsers.NewInfluxParser)
defer tt.Stop()
defer tmpfile.Close()
acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc))
require.NoError(t, acc.GatherError(tt.Gather))
_, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n")
require.NoError(t, err)
acc.WaitError(1)
assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line")
}
func TestTailDosLineendings(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
_, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n")
require.NoError(t, err)
tt := NewTail()
tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()}
tt.SetParserFunc(parsers.NewInfluxParser)
defer tt.Stop()
defer tmpfile.Close()
acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc))
require.NoError(t, acc.GatherError(tt.Gather))
acc.Wait(2)
acc.AssertContainsFields(t, "cpu",
map[string]interface{}{
"usage_idle": float64(100),
})
acc.AssertContainsFields(t, "cpu2",
map[string]interface{}{
"usage_idle": float64(200),
})
}
| [
"\"CIRCLE_PROJECT_REPONAME\"",
"\"CIRCLE_PROJECT_REPONAME\""
] | [] | [
"CIRCLE_PROJECT_REPONAME"
] | [] | ["CIRCLE_PROJECT_REPONAME"] | go | 1 | 0 | |
ddl/ddl_test.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"context"
"os"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
. "github.com/pingcap/check"
"github.com/pingcap/log"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testleak"
"go.uber.org/zap"
)
type DDLForTest interface {
// SetHook sets the hook.
SetHook(h Callback)
// SetInterceptoror sets the interceptor.
SetInterceptoror(h Interceptor)
}
// SetHook implements DDL.SetHook interface.
func (d *ddl) SetHook(h Callback) {
d.mu.Lock()
defer d.mu.Unlock()
d.mu.hook = h
}
// SetInterceptoror implements DDL.SetInterceptoror interface.
func (d *ddl) SetInterceptoror(i Interceptor) {
d.mu.Lock()
defer d.mu.Unlock()
d.mu.interceptor = i
}
// generalWorker returns the general worker.
func (d *ddl) generalWorker() *worker {
return d.workers[generalWorker]
}
// restartWorkers is like the function of d.start. But it won't initialize the "workers" and create a new worker.
// It only starts the original workers.
func (d *ddl) restartWorkers(ctx context.Context) {
d.quitCh = make(chan struct{})
if !RunWorker {
return
}
err := d.ownerManager.CampaignOwner(ctx)
terror.Log(err)
for _, worker := range d.workers {
worker.wg.Add(1)
worker.quitCh = make(chan struct{})
w := worker
go util.WithRecovery(func() { w.start(d.ddlCtx) },
func(r interface{}) {
if r != nil {
log.Error("[ddl] restart DDL worker meet panic", zap.String("worker", w.String()), zap.String("ID", d.uuid))
}
})
asyncNotify(worker.ddlJobCh)
}
}
func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
autoid.SetStep(5000)
ReorgWaitTimeout = 30 * time.Millisecond
testleak.BeforeTest()
TestingT(t)
testleak.AfterTestT(t)()
}
func testCreateStore(c *C, name string) kv.Storage {
store, err := mockstore.NewMockTikvStore()
c.Assert(err, IsNil)
return store
}
func testNewContext(d *ddl) sessionctx.Context {
ctx := mock.NewContext()
ctx.Store = d.store
return ctx
}
func testNewDDL(ctx context.Context, etcdCli *clientv3.Client, store kv.Storage,
infoHandle *infoschema.Handle, hook Callback, lease time.Duration) *ddl {
return newDDL(ctx, etcdCli, store, infoHandle, hook, lease, nil)
}
func getSchemaVer(c *C, ctx sessionctx.Context) int64 {
err := ctx.NewTxn(context.Background())
c.Assert(err, IsNil)
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
m := meta.NewMeta(txn)
ver, err := m.GetSchemaVersion()
c.Assert(err, IsNil)
return ver
}
type historyJobArgs struct {
ver int64
db *model.DBInfo
tbl *model.TableInfo
tblIDs map[int64]struct{}
}
func checkEqualTable(c *C, t1, t2 *model.TableInfo) {
c.Assert(t1.ID, Equals, t2.ID)
c.Assert(t1.Name, Equals, t2.Name)
c.Assert(t1.Charset, Equals, t2.Charset)
c.Assert(t1.Collate, Equals, t2.Collate)
c.Assert(t1.PKIsHandle, DeepEquals, t2.PKIsHandle)
c.Assert(t1.Comment, DeepEquals, t2.Comment)
c.Assert(t1.AutoIncID, DeepEquals, t2.AutoIncID)
}
func checkHistoryJob(c *C, job *model.Job) {
c.Assert(job.State, Equals, model.JobStateSynced)
}
func checkHistoryJobArgs(c *C, ctx sessionctx.Context, id int64, args *historyJobArgs) {
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
t := meta.NewMeta(txn)
historyJob, err := t.GetHistoryDDLJob(id)
c.Assert(err, IsNil)
c.Assert(historyJob.BinlogInfo.FinishedTS, Greater, uint64(0))
if args.tbl != nil {
c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver)
checkEqualTable(c, historyJob.BinlogInfo.TableInfo, args.tbl)
return
}
// for handling schema job
c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver)
c.Assert(historyJob.BinlogInfo.DBInfo, DeepEquals, args.db)
// only for creating schema job
if args.db != nil && len(args.tblIDs) == 0 {
return
}
}
func buildCreateIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job {
return &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionAddIndex,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{unique, model.NewCIStr(indexName),
[]*ast.IndexColName{{
Column: &ast.ColumnName{Name: model.NewCIStr(colName)},
Length: types.UnspecifiedLength}}},
}
}
func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job {
job := buildCreateIdxJob(dbInfo, tblInfo, unique, indexName, colName)
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func testAddColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, args []interface{}) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionAddColumn,
Args: args,
BinlogInfo: &model.HistoryInfo{},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func buildDropIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job {
return &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionDropIndex,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{model.NewCIStr(indexName)},
}
}
func testDropIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job {
job := buildDropIdxJob(dbInfo, tblInfo, indexName)
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func buildRebaseAutoIDJobJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, newBaseID int64) *model.Job {
return &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionRebaseAutoID,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{newBaseID},
}
}
| [
"\"log_level\""
] | [] | [
"log_level"
] | [] | ["log_level"] | go | 1 | 0 | |
testutils/source.go | package testutils
import "github.com/securego/gosec/v2"
// CodeSample encapsulates a snippet of source code that compiles, and how many errors should be detected
type CodeSample struct {
Code []string
Errors int
Config gosec.Config
}
var (
// SampleCodeG101 code snippets for hardcoded credentials
SampleCodeG101 = []CodeSample{
{[]string{`
package main
import "fmt"
func main() {
username := "admin"
password := "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()},
{[]string{`
// Entropy check should not report this error by default
package main
import "fmt"
func main() {
username := "admin"
password := "secret"
fmt.Println("Doing something with: ", username, password)
}`}, 0, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
var password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
func main() {
username := "admin"
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
const password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
func main() {
username := "admin"
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
const (
username = "user"
password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
)
func main() {
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
var password string
func init() {
password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
const (
ATNStateSomethingElse = 1
ATNStateTokenStart = 42
)
func main() {
println(ATNStateTokenStart)
}`}, 0, gosec.NewConfig()},
{[]string{`
package main
const (
ATNStateTokenStart = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
)
func main() {
println(ATNStateTokenStart)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
func main() {
var password string
if password == "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" {
fmt.Println("password equality")
}
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
func main() {
var password string
if password != "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" {
fmt.Println("password equality")
}
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
func main() {
var p string
if p != "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" {
fmt.Println("password equality")
}
}`}, 0, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
const (
pw = "KjasdlkjapoIKLlka98098sdf012U/rL2sLdBqOHQUlt5Z6kCgKGDyCFA=="
)
func main() {
fmt.Println(pw)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
var (
pw string
)
func main() {
pw = "KjasdlkjapoIKLlka98098sdf012U/rL2sLdBqOHQUlt5Z6kCgKGDyCFA=="
fmt.Println(pw)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
const (
cred = "KjasdlkjapoIKLlka98098sdf012U/rL2sLdBqOHQUlt5Z6kCgKGDyCFA=="
)
func main() {
fmt.Println(cred)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
var (
cred string
)
func main() {
cred = "KjasdlkjapoIKLlka98098sdf012U/rL2sLdBqOHQUlt5Z6kCgKGDyCFA=="
fmt.Println(cred)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
const (
apiKey = "KjasdlkjapoIKLlka98098sdf012U"
)
func main() {
fmt.Println(apiKey)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
var (
apiKey string
)
func main() {
apiKey = "KjasdlkjapoIKLlka98098sdf012U"
fmt.Println(apiKey)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
const (
bearer = "Bearer: 2lkjdfoiuwer092834kjdwf09"
)
func main() {
fmt.Println(bearer)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import "fmt"
var (
bearer string
)
func main() {
bearer = "Bearer: 2lkjdfoiuwer092834kjdwf09"
fmt.Println(bearer)
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG102 code snippets for network binding
SampleCodeG102 = []CodeSample{
// Bind to all networks explicitly
{[]string{`
package main
import (
"log"
"net"
)
func main() {
l, err := net.Listen("tcp", "0.0.0.0:2000")
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
// Bind to all networks implicitly (default if host omitted)
{[]string{`
package main
import (
"log"
"net"
)
func main() {
l, err := net.Listen("tcp", ":2000")
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
// Bind to all networks indirectly through a parsing function
{[]string{`
package main
import (
"log"
"net"
)
func parseListenAddr(listenAddr string) (network string, addr string) {
return "", ""
}
func main() {
addr := ":2000"
l, err := net.Listen(parseListenAddr(addr))
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
// Bind to all networks indirectly through a parsing function
{[]string{`
package main
import (
"log"
"net"
)
const addr = ":2000"
func parseListenAddr(listenAddr string) (network string, addr string) {
return "", ""
}
func main() {
l, err := net.Listen(parseListenAddr(addr))
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import (
"log"
"net"
)
const addr = "0.0.0.0:2000"
func main() {
l, err := net.Listen("tcp", addr)
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG103 find instances of unsafe blocks for auditing purposes
SampleCodeG103 = []CodeSample{
{[]string{`
package main
import (
"fmt"
"unsafe"
)
type Fake struct{}
func (Fake) Good() {}
func main() {
unsafeM := Fake{}
unsafeM.Good()
intArray := [...]int{1, 2}
fmt.Printf("\nintArray: %v\n", intArray)
intPtr := &intArray[0]
fmt.Printf("\nintPtr=%p, *intPtr=%d.\n", intPtr, *intPtr)
addressHolder := uintptr(unsafe.Pointer(intPtr)) + unsafe.Sizeof(intArray[0])
intPtr = (*int)(unsafe.Pointer(addressHolder))
fmt.Printf("\nintPtr=%p, *intPtr=%d.\n\n", intPtr, *intPtr)
}`}, 3, gosec.NewConfig()},
}
// SampleCodeG104 finds errors that aren't being handled
SampleCodeG104 = []CodeSample{
{[]string{`
package main
import "fmt"
func test() (int,error) {
return 0, nil
}
func main() {
v, _ := test()
fmt.Println(v)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"io/ioutil"
"os"
"fmt"
)
func a() error {
return fmt.Errorf("This is an error")
}
func b() {
fmt.Println("b")
ioutil.WriteFile("foo.txt", []byte("bar"), os.ModeExclusive)
}
func c() string {
return fmt.Sprintf("This isn't anything")
}
func main() {
_ = a()
a()
b()
c()
}`}, 2, gosec.NewConfig()}, {[]string{`
package main
import "fmt"
func test() error {
return nil
}
func main() {
e := test()
fmt.Println(e)
}`}, 0, gosec.NewConfig()}, {[]string{`
// +build go1.10
package main
import "strings"
func main() {
var buf strings.Builder
_, err := buf.WriteString("test string")
if err != nil {
panic(err)
}
}`, `
package main
func dummy(){}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"bytes"
)
type a struct {
buf *bytes.Buffer
}
func main() {
a := &a{
buf: new(bytes.Buffer),
}
a.buf.Write([]byte{0})
}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"io/ioutil"
"os"
"fmt"
)
func a() {
fmt.Println("a")
ioutil.WriteFile("foo.txt", []byte("bar"), os.ModeExclusive)
}
func main() {
a()
}`}, 0, gosec.Config{"G104": map[string]interface{}{"ioutil": []interface{}{"WriteFile"}}}}, {[]string{`
package main
import (
"bytes"
"fmt"
"io"
"os"
"strings"
)
func createBuffer() *bytes.Buffer {
return new(bytes.Buffer)
}
func main() {
new(bytes.Buffer).WriteString("*bytes.Buffer")
fmt.Fprintln(os.Stderr, "fmt")
new(strings.Builder).WriteString("*strings.Builder")
_, pw := io.Pipe()
pw.CloseWithError(io.EOF)
createBuffer().WriteString("*bytes.Buffer")
b := createBuffer()
b.WriteString("*bytes.Buffer")
}`}, 0, gosec.NewConfig()},
} // it shoudn't return any errors because all method calls are whitelisted by default
// SampleCodeG104Audit finds errors that aren't being handled in audit mode
SampleCodeG104Audit = []CodeSample{
{[]string{`
package main
import "fmt"
func test() (int,error) {
return 0, nil
}
func main() {
v, _ := test()
fmt.Println(v)
}`}, 1, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}}, {[]string{`
package main
import (
"io/ioutil"
"os"
"fmt"
)
func a() error {
return fmt.Errorf("This is an error")
}
func b() {
fmt.Println("b")
ioutil.WriteFile("foo.txt", []byte("bar"), os.ModeExclusive)
}
func c() string {
return fmt.Sprintf("This isn't anything")
}
func main() {
_ = a()
a()
b()
c()
}`}, 3, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}}, {[]string{`
package main
import "fmt"
func test() error {
return nil
}
func main() {
e := test()
fmt.Println(e)
}`}, 0, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}}, {[]string{`
// +build go1.10
package main
import "strings"
func main() {
var buf strings.Builder
_, err := buf.WriteString("test string")
if err != nil {
panic(err)
}
}`, `
package main
func dummy(){}
`}, 0, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}},
}
// SampleCodeG106 - ssh InsecureIgnoreHostKey
SampleCodeG106 = []CodeSample{{[]string{`
package main
import (
"golang.org/x/crypto/ssh"
)
func main() {
_ = ssh.InsecureIgnoreHostKey()
}`}, 1, gosec.NewConfig()}}
// SampleCodeG107 - SSRF via http requests with variable url
SampleCodeG107 = []CodeSample{{[]string{`
// Input from the std in is considered insecure
package main
import (
"net/http"
"io/ioutil"
"fmt"
"os"
"bufio"
)
func main() {
in := bufio.NewReader(os.Stdin)
url, err := in.ReadString('\n')
if err != nil {
panic(err)
}
resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Printf("%s", body)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Variable defined a package level can be changed at any time
// regardless of the initial value
package main
import (
"fmt"
"io/ioutil"
"net/http"
)
var url string = "https://www.google.com"
func main() {
resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Printf("%s", body)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Environmental variables are not considered as secure source
package main
import (
"net/http"
"io/ioutil"
"fmt"
"os"
)
func main() {
url := os.Getenv("tainted_url")
resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Printf("%s", body)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Constant variables or hard-coded strings are secure
package main
import (
"fmt"
"net/http"
)
const url = "http://127.0.0.1"
func main() {
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// A variable at function scope which is initialized to
// a constant string is secure (e.g. cannot be changed concurrently)
package main
import (
"fmt"
"net/http"
)
func main() {
var url string = "http://127.0.0.1"
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// A variable at function scope which is initialized to
// a constant string is secure (e.g. cannot be changed concurrently)
package main
import (
"fmt"
"net/http"
)
func main() {
url := "http://127.0.0.1"
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// A variable at function scope which is initialized to
// a constant string is secure (e.g. cannot be changed concurrently)
package main
import (
"fmt"
"net/http"
)
func main() {
url1 := "test"
var url2 string = "http://127.0.0.1"
url2 = url1
resp, err := http.Get(url2)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// An exported variable declared a packaged scope is not secure
// because it can changed at any time
package main
import (
"fmt"
"net/http"
)
var Url string
func main() {
resp, err := http.Get(Url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 1, gosec.NewConfig()}, {[]string{`
// An url provided as a function argument is not secure
package main
import (
"fmt"
"net/http"
)
func get(url string) {
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}
func main() {
url := "http://127.0.0.1"
get(url)
}`}, 1, gosec.NewConfig()}}
// SampleCodeG108 - pprof endpoint automatically exposed
SampleCodeG108 = []CodeSample{{[]string{`
package main
import (
"fmt"
"log"
"net/http"
_ "net/http/pprof"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World!")
})
log.Fatal(http.ListenAndServe(":8080", nil))
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"log"
"net/http"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World!")
})
log.Fatal(http.ListenAndServe(":8080", nil))
}`}, 0, gosec.NewConfig()}}
// SampleCodeG109 - Potential Integer OverFlow
SampleCodeG109 = []CodeSample{
{[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("2147483648")
if err != nil {
panic(err)
}
value := int32(bigValue)
fmt.Println(value)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("32768")
if err != nil {
panic(err)
}
if int16(bigValue) < 0 {
fmt.Println(bigValue)
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("2147483648")
if err != nil {
panic(err)
}
fmt.Println(bigValue)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("2147483648")
if err != nil {
panic(err)
}
fmt.Println(bigValue)
test()
}
func test() {
bigValue := 30
value := int32(bigValue)
fmt.Println(value)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
value := 10
if value == 10 {
value, _ := strconv.Atoi("2147483648")
fmt.Println(value)
}
v := int32(value)
fmt.Println(v)
}`}, 0, gosec.NewConfig()},
}
// SampleCodeG110 - potential DoS vulnerability via decompression bomb
SampleCodeG110 = []CodeSample{
{[]string{`
package main
import (
"bytes"
"compress/zlib"
"io"
"os"
)
func main() {
buff := []byte{120, 156, 202, 72, 205, 201, 201, 215, 81, 40, 207,
47, 202, 73, 225, 2, 4, 0, 0, 255, 255, 33, 231, 4, 147}
b := bytes.NewReader(buff)
r, err := zlib.NewReader(b)
if err != nil {
panic(err)
}
_, err = io.Copy(os.Stdout, r)
if err != nil {
panic(err)
}
r.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"bytes"
"compress/zlib"
"io"
"os"
)
func main() {
buff := []byte{120, 156, 202, 72, 205, 201, 201, 215, 81, 40, 207,
47, 202, 73, 225, 2, 4, 0, 0, 255, 255, 33, 231, 4, 147}
b := bytes.NewReader(buff)
r, err := zlib.NewReader(b)
if err != nil {
panic(err)
}
buf := make([]byte, 8)
_, err = io.CopyBuffer(os.Stdout, r, buf)
if err != nil {
panic(err)
}
r.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"archive/zip"
"io"
"os"
"strconv"
)
func main() {
r, err := zip.OpenReader("tmp.zip")
if err != nil {
panic(err)
}
defer r.Close()
for i, f := range r.File {
out, err := os.OpenFile("output" + strconv.Itoa(i), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
panic(err)
}
rc, err := f.Open()
if err != nil {
panic(err)
}
_, err = io.Copy(out, rc)
out.Close()
rc.Close()
if err != nil {
panic(err)
}
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"io"
"os"
)
func main() {
s, err := os.Open("src")
if err != nil {
panic(err)
}
defer s.Close()
d, err := os.Create("dst")
if err != nil {
panic(err)
}
defer d.Close()
_, err = io.Copy(d, s)
if err != nil {
panic(err)
}
}`}, 0, gosec.NewConfig()},
}
// SampleCodeG201 - SQL injection via format string
SampleCodeG201 = []CodeSample{
{[]string{`
// Format string without proper quoting
package main
import (
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM foo where name = '%s'", os.Args[1])
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// Format string without proper quoting case insensitive
package main
import (
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("select * from foo where name = '%s'", os.Args[1])
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// Format string without proper quoting with context
package main
import (
"context"
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("select * from foo where name = '%s'", os.Args[1])
rows, err := db.QueryContext(context.Background(), q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// Format string without proper quoting with transaction
package main
import (
"context"
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
tx, err := db.Begin()
if err != nil {
panic(err)
}
defer tx.Rollback()
q := fmt.Sprintf("select * from foo where name = '%s'", os.Args[1])
rows, err := tx.QueryContext(context.Background(), q)
if err != nil {
panic(err)
}
defer rows.Close()
if err := tx.Commit(); err != nil {
panic(err)
}
}`}, 1, gosec.NewConfig()}, {[]string{`
// Format string false positive, safe string spec.
package main
import (
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM foo where id = %d", os.Args[1])
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
// Format string false positive
package main
import (
"database/sql"
)
const staticQuery = "SELECT * FROM foo WHERE age < 32"
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query(staticQuery)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
// Format string false positive, quoted formatter argument.
package main
import (
"database/sql"
"fmt"
"os"
"github.com/lib/pq"
)
func main(){
db, err := sql.Open("postgres", "localhost")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM %s where id = 1", pq.QuoteIdentifier(os.Args[1]))
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
// false positive
package main
import (
"database/sql"
"fmt"
)
const Table = "foo"
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM %s where id = 1", Table)
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
)
func main(){
fmt.Sprintln()
}`}, 0, gosec.NewConfig()}, {[]string{`
// Format string with \n\r
package main
import (
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM foo where\n name = '%s'", os.Args[1])
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// Format string with \n\r
package main
import (
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM foo where\nname = '%s'", os.Args[1])
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// SQLI by db.Query(some).Scan(&other)
package main
import (
"database/sql"
"fmt"
"os"
)
func main() {
var name string
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT name FROM users where id = '%s'", os.Args[1])
row := db.QueryRow(q)
err = row.Scan(&name)
if err != nil {
panic(err)
}
defer db.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// SQLI by db.Query(some).Scan(&other)
package main
import (
"database/sql"
"fmt"
"os"
)
func main() {
var name string
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT name FROM users where id = '%s'", os.Args[1])
err = db.QueryRow(q).Scan(&name)
if err != nil {
panic(err)
}
defer db.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// SQLI by db.Prepare(some)
package main
import (
"database/sql"
"fmt"
"log"
"os"
)
const Table = "foo"
func main() {
var album string
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT name FROM users where '%s' = ?", os.Args[1])
stmt, err := db.Prepare(q)
if err != nil {
log.Fatal(err)
}
stmt.QueryRow(fmt.Sprintf("%s", os.Args[2])).Scan(&album)
if err != nil {
if err == sql.ErrNoRows {
log.Fatal(err)
}
}
defer stmt.Close()
}
`}, 1, gosec.NewConfig()}, {[]string{`
// SQLI by db.PrepareContext(some)
package main
import (
"context"
"database/sql"
"fmt"
"log"
"os"
)
const Table = "foo"
func main() {
var album string
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT name FROM users where '%s' = ?", os.Args[1])
stmt, err := db.PrepareContext(context.Background(), q)
if err != nil {
log.Fatal(err)
}
stmt.QueryRow(fmt.Sprintf("%s", os.Args[2])).Scan(&album)
if err != nil {
if err == sql.ErrNoRows {
log.Fatal(err)
}
}
defer stmt.Close()
}
`}, 1, gosec.NewConfig()}, {[]string{`
// false positive
package main
import (
"database/sql"
"fmt"
"log"
"os"
)
const Table = "foo"
func main() {
var album string
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
stmt, err := db.Prepare("SELECT * FROM album WHERE id = ?")
if err != nil {
log.Fatal(err)
}
stmt.QueryRow(fmt.Sprintf("%s", os.Args[1])).Scan(&album)
if err != nil {
if err == sql.ErrNoRows {
log.Fatal(err)
}
}
defer stmt.Close()
}
`}, 0, gosec.NewConfig()},
}
// SampleCodeG202 - SQL query string building via string concatenation
SampleCodeG202 = []CodeSample{
{[]string{`
package main
import (
"database/sql"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query("SELECT * FROM foo WHERE name = " + os.Args[1])
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// case insensitive match
package main
import (
"database/sql"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query("select * from foo where name = " + os.Args[1])
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// context match
package main
import (
"context"
"database/sql"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.QueryContext(context.Background(), "select * from foo where name = " + os.Args[1])
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// DB transaction check
package main
import (
"context"
"database/sql"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
tx, err := db.Begin()
if err != nil {
panic(err)
}
defer tx.Rollback()
rows, err := tx.QueryContext(context.Background(), "select * from foo where name = " + os.Args[1])
if err != nil {
panic(err)
}
defer rows.Close()
if err := tx.Commit(); err != nil {
panic(err)
}
}`}, 1, gosec.NewConfig()}, {[]string{`
// multiple string concatenation
package main
import (
"database/sql"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query("SELECT * FROM foo" + "WHERE name = " + os.Args[1])
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// false positive
package main
import (
"database/sql"
)
var staticQuery = "SELECT * FROM foo WHERE age < "
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query(staticQuery + "32")
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"database/sql"
)
const age = "32"
var staticQuery = "SELECT * FROM foo WHERE age < "
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query(staticQuery + age)
if err != nil {
panic(err)
}
defer rows.Close()
}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
const gender = "M"
`, `
package main
import (
"database/sql"
)
const age = "32"
var staticQuery = "SELECT * FROM foo WHERE age < "
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query("SELECT * FROM foo WHERE gender = " + gender)
if err != nil {
panic(err)
}
defer rows.Close()
}
`}, 0, gosec.NewConfig()}, {[]string{`
// ExecContext match
package main
import (
"context"
"database/sql"
"fmt"
"os"
)
func main() {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
result, err := db.ExecContext(context.Background(), "select * from foo where name = "+os.Args[1])
if err != nil {
panic(err)
}
fmt.Println(result)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Exec match
package main
import (
"database/sql"
"fmt"
"os"
)
func main() {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
result, err := db.Exec("select * from foo where name = " + os.Args[1])
if err != nil {
panic(err)
}
fmt.Println(result)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"database/sql"
"fmt"
)
const gender = "M"
const age = "32"
var staticQuery = "SELECT * FROM foo WHERE age < "
func main() {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
result, err := db.Exec("SELECT * FROM foo WHERE gender = " + gender)
if err != nil {
panic(err)
}
fmt.Println(result)
}
`}, 0, gosec.NewConfig()},
}
// SampleCodeG203 - Template checks
SampleCodeG203 = []CodeSample{
{[]string{`
// We assume that hardcoded template strings are safe as the programmer would
// need to be explicitly shooting themselves in the foot (as below)
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.HTML("<script>alert(1)</script>"),
}
t.Execute(os.Stdout, v)
}`}, 0, gosec.NewConfig()}, {[]string{
`
// Using a variable to initialize could potentially be dangerous. Under the
// current model this will likely produce some false positives.
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
a := "something from another place"
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.HTML(a),
}
t.Execute(os.Stdout, v)
}`,
}, 1, gosec.NewConfig()}, {[]string{
`
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
a := "something from another place"
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.JS(a),
}
t.Execute(os.Stdout, v)
}`,
}, 1, gosec.NewConfig()}, {[]string{
`
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
a := "something from another place"
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.URL(a),
}
t.Execute(os.Stdout, v)
}`,
}, 1, gosec.NewConfig()},
}
// SampleCodeG204 - Subprocess auditing
SampleCodeG204 = []CodeSample{
{[]string{`
package main
import (
"log"
"os/exec"
"context"
)
func main() {
err := exec.CommandContext(context.Background(), "git", "rev-parse", "--show-toplavel").Run()
if err != nil {
log.Fatal(err)
}
log.Printf("Command finished with error: %v", err)
}`}, 0, gosec.NewConfig()},
{[]string{`
// Calling any function which starts a new process with using
// command line arguments as it's arguments is considered dangerous
package main
import (
"context"
"log"
"os"
"os/exec"
)
func main() {
err := exec.CommandContext(context.Background(), os.Args[0], "5").Run()
if err != nil {
log.Fatal(err)
}
log.Printf("Command finished with error: %v", err)
}`}, 1, gosec.NewConfig()},
{[]string{`
// Initializing a local variable using a environmental
// variable is consider as a dangerous user input
package main
import (
"log"
"os"
"os/exec"
)
func main() {
run := "sleep" + os.Getenv("SOMETHING")
cmd := exec.Command(run, "5")
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
log.Printf("Command finished with error: %v", err)
}`}, 1, gosec.NewConfig()},
{[]string{`
// gosec doesn't have enough context to decide that the
// command argument of the RunCmd function is harcoded string
// and that's why it's better to warn the user so he can audit it
package main
import (
"log"
"os/exec"
)
func RunCmd(command string) {
cmd := exec.Command(command, "5")
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
}
func main() {
RunCmd("sleep")
}`}, 0, gosec.NewConfig()},
{[]string{`
package main
import (
"log"
"os/exec"
)
func RunCmd(a string, c string) {
cmd := exec.Command(c)
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
cmd = exec.Command(a)
err = cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
}
func main() {
RunCmd("ll", "ls")
}`}, 0, gosec.NewConfig()},
{[]string{`
// syscall.Exec function called with harcoded arguments
// shouldn't be consider as a command injection
package main
import (
"fmt"
"syscall"
)
func main() {
err := syscall.Exec("/bin/cat", []string{"/etc/passwd"}, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}`}, 0, gosec.NewConfig()},
{
[]string{`
package main
import (
"fmt"
"syscall"
)
func RunCmd(command string) {
_, err := syscall.ForkExec(command, []string{}, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}
func main() {
RunCmd("sleep")
}`}, 1, gosec.NewConfig(),
},
{
[]string{`
package main
import (
"fmt"
"syscall"
)
func RunCmd(command string) {
_, _, err := syscall.StartProcess(command, []string{}, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}
func main() {
RunCmd("sleep")
}`}, 1, gosec.NewConfig(),
},
{[]string{`
// starting a process with a variable as an argument
// even if not constant is not considered as dangerous
// because it has harcoded value
package main
import (
"log"
"os/exec"
)
func main() {
run := "sleep"
cmd := exec.Command(run, "5")
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
log.Printf("Command finished with error: %v", err)
}`}, 0, gosec.NewConfig()},
{[]string{`
// exec.Command from supplemental package sys/execabs
// using variable arguments
package main
import (
"context"
"log"
"os"
exec "golang.org/x/sys/execabs"
)
func main() {
err := exec.CommandContext(context.Background(), os.Args[0], "5").Run()
if err != nil {
log.Fatal(err)
}
log.Printf("Command finished with error: %v", err)
}
`}, 1, gosec.NewConfig()},
}
// SampleCodeG301 - mkdir permission check
SampleCodeG301 = []CodeSample{{[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Mkdir("/tmp/mydir", 0777)
if err != nil {
fmt.Println("Error when creating a directory!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.MkdirAll("/tmp/mydir", 0777)
if err != nil {
fmt.Println("Error when creating a directory!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Mkdir("/tmp/mydir", 0600)
if err != nil {
fmt.Println("Error when creating a directory!")
return
}
}`}, 0, gosec.NewConfig()}}
// SampleCodeG302 - file create / chmod permissions check
SampleCodeG302 = []CodeSample{{[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Chmod("/tmp/somefile", 0777)
if err != nil {
fmt.Println("Error when changing file permissions!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
_, err := os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
fmt.Println("Error opening a file!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Chmod("/tmp/mydir", 0400)
if err != nil {
fmt.Println("Error")
return
}
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
_, err := os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
fmt.Println("Error opening a file!")
return
}
}
`}, 0, gosec.NewConfig()}}
// SampleCodeG303 - bad tempfile permissions & hardcoded shared path
SampleCodeG303 = []CodeSample{{[]string{`
package samples
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
)
func main() {
err := ioutil.WriteFile("/tmp/demo2", []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
f, err := os.Create("/tmp/demo2")
if err != nil {
fmt.Println("Error while writing!")
} else if err = f.Close(); err != nil {
fmt.Println("Error while closing!")
}
err = os.WriteFile("/tmp/demo2", []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
err = os.WriteFile("/usr/tmp/demo2", []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
err = os.WriteFile("/tmp/" + "demo2", []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
err = os.WriteFile(os.TempDir() + "/demo2", []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
err = os.WriteFile(path.Join("/var/tmp", "demo2"), []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
err = os.WriteFile(path.Join(os.TempDir(), "demo2"), []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
err = os.WriteFile(filepath.Join(os.TempDir(), "demo2"), []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
}`}, 9, gosec.NewConfig()}}
// SampleCodeG304 - potential file inclusion vulnerability
SampleCodeG304 = []CodeSample{
{[]string{`
package main
import (
"os"
"io/ioutil"
"log"
)
func main() {
f := os.Getenv("tainted_file")
body, err := ioutil.ReadFile(f)
if err != nil {
log.Printf("Error: %v\n", err)
}
log.Print(body)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"os"
"log"
)
func main() {
f := os.Getenv("tainted_file")
body, err := os.ReadFile(f)
if err != nil {
log.Printf("Error: %v\n", err)
}
log.Print(body)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
title := r.URL.Query().Get("title")
f, err := os.Open(title)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
body := make([]byte, 5)
if _, err = f.Read(body); err != nil {
fmt.Printf("Error: %v\n", err)
}
fmt.Fprintf(w, "%s", body)
})
log.Fatal(http.ListenAndServe(":3000", nil))
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
title := r.URL.Query().Get("title")
f, err := os.OpenFile(title, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
body := make([]byte, 5)
if _, err = f.Read(body); err != nil {
fmt.Printf("Error: %v\n", err)
}
fmt.Fprintf(w, "%s", body)
})
log.Fatal(http.ListenAndServe(":3000", nil))
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"log"
"os"
"io/ioutil"
)
func main() {
f2 := os.Getenv("tainted_file2")
body, err := ioutil.ReadFile("/tmp/" + f2)
if err != nil {
log.Printf("Error: %v\n", err)
}
log.Print(body)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
)
func main() {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Please enter file to read: ")
file, _ := reader.ReadString('\n')
file = file[:len(file)-1]
f, err := os.Open(filepath.Join("/tmp/service/", file))
if err != nil {
fmt.Printf("Error: %v\n", err)
}
contents := make([]byte, 15)
if _, err = f.Read(contents); err != nil {
fmt.Printf("Error: %v\n", err)
}
fmt.Println(string(contents))
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"log"
"os"
"io/ioutil"
"path/filepath"
)
func main() {
dir := os.Getenv("server_root")
f3 := os.Getenv("tainted_file3")
// edge case where both a binary expression and file Join are used.
body, err := ioutil.ReadFile(filepath.Join("/var/"+dir, f3))
if err != nil {
log.Printf("Error: %v\n", err)
}
log.Print(body)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"os"
"path/filepath"
)
func main() {
repoFile := "path_of_file"
cleanRepoFile := filepath.Clean(repoFile)
_, err := os.OpenFile(cleanRepoFile, os.O_RDONLY, 0600)
if err != nil {
panic(err)
}
}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"os"
"path/filepath"
)
func openFile(filePath string) {
_, err := os.OpenFile(filepath.Clean(filePath), os.O_RDONLY, 0600)
if err != nil {
panic(err)
}
}
func main() {
repoFile := "path_of_file"
openFile(repoFile)
}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"os"
"path/filepath"
)
func main() {
repoFile := "path_of_file"
relFile, err := filepath.Rel("./", repoFile)
if err != nil {
panic(err)
}
_, err = os.OpenFile(relFile, os.O_RDONLY, 0600)
if err != nil {
panic(err)
}
}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"io"
"os"
)
func createFile(file string) *os.File {
f, err := os.Create(file)
if err != nil {
panic(err)
}
return f
}
func main() {
s, err := os.Open("src")
if err != nil {
panic(err)
}
defer s.Close()
d := createFile("dst")
defer d.Close()
_, err = io.Copy(d, s)
if err != nil {
panic(err)
}
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG305 - File path traversal when extracting zip/tar archives
SampleCodeG305 = []CodeSample{{[]string{`
package unzip
import (
"archive/zip"
"io"
"os"
"path/filepath"
)
func unzip(archive, target string) error {
reader, err := zip.OpenReader(archive)
if err != nil {
return err
}
if err := os.MkdirAll(target, 0750); err != nil {
return err
}
for _, file := range reader.File {
path := filepath.Join(target, file.Name)
if file.FileInfo().IsDir() {
os.MkdirAll(path, file.Mode()) //#nosec
continue
}
fileReader, err := file.Open()
if err != nil {
return err
}
defer fileReader.Close()
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
defer targetFile.Close()
if _, err := io.Copy(targetFile, fileReader); err != nil {
return err
}
}
return nil
}`}, 1, gosec.NewConfig()}, {[]string{`
package unzip
import (
"archive/zip"
"io"
"os"
"path/filepath"
)
func unzip(archive, target string) error {
reader, err := zip.OpenReader(archive)
if err != nil {
return err
}
if err := os.MkdirAll(target, 0750); err != nil {
return err
}
for _, file := range reader.File {
archiveFile := file.Name
path := filepath.Join(target, archiveFile)
if file.FileInfo().IsDir() {
os.MkdirAll(path, file.Mode()) //#nosec
continue
}
fileReader, err := file.Open()
if err != nil {
return err
}
defer fileReader.Close()
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
defer targetFile.Close()
if _, err := io.Copy(targetFile, fileReader); err != nil {
return err
}
}
return nil
}`}, 1, gosec.NewConfig()}, {[]string{`
package zip
import (
"archive/zip"
"io"
"os"
"path"
)
func extractFile(f *zip.File, destPath string) error {
filePath := path.Join(destPath, f.Name)
os.MkdirAll(path.Dir(filePath), os.ModePerm)
rc, err := f.Open()
if err != nil {
return err
}
defer rc.Close()
fw, err := os.Create(filePath)
if err != nil {
return err
}
defer fw.Close()
if _, err = io.Copy(fw, rc); err != nil {
return err
}
if f.FileInfo().Mode()&os.ModeSymlink != 0 {
return nil
}
if err = os.Chtimes(filePath, f.ModTime(), f.ModTime()); err != nil {
return err
}
return os.Chmod(filePath, f.FileInfo().Mode())
}`}, 1, gosec.NewConfig()}, {[]string{`
package tz
import (
"archive/tar"
"io"
"os"
"path"
)
func extractFile(f *tar.Header, tr *tar.Reader, destPath string) error {
filePath := path.Join(destPath, f.Name)
os.MkdirAll(path.Dir(filePath), os.ModePerm)
fw, err := os.Create(filePath)
if err != nil {
return err
}
defer fw.Close()
if _, err = io.Copy(fw, tr); err != nil {
return err
}
if f.FileInfo().Mode()&os.ModeSymlink != 0 {
return nil
}
if err = os.Chtimes(filePath, f.FileInfo().ModTime(), f.FileInfo().ModTime()); err != nil {
return err
}
return os.Chmod(filePath, f.FileInfo().Mode())
}`}, 1, gosec.NewConfig()}}
// SampleCodeG306 - Poor permissions for WriteFile
SampleCodeG306 = []CodeSample{
{[]string{`package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func main() {
d1 := []byte("hello\ngo\n")
err := ioutil.WriteFile("/tmp/dat1", d1, 0744)
check(err)
allowed := ioutil.WriteFile("/tmp/dat1", d1, 0600)
check(allowed)
f, err := os.Create("/tmp/dat2")
check(err)
defer f.Close()
d2 := []byte{115, 111, 109, 101, 10}
n2, err := f.Write(d2)
defer check(err)
fmt.Printf("wrote %d bytes\n", n2)
n3, err := f.WriteString("writes\n")
fmt.Printf("wrote %d bytes\n", n3)
f.Sync()
w := bufio.NewWriter(f)
n4, err := w.WriteString("buffered\n")
fmt.Printf("wrote %d bytes\n", n4)
w.Flush()
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG307 - Unsafe defer of os.Close
SampleCodeG307 = []CodeSample{
{[]string{`package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func main() {
d1 := []byte("hello\ngo\n")
err := ioutil.WriteFile("/tmp/dat1", d1, 0744)
check(err)
allowed := ioutil.WriteFile("/tmp/dat1", d1, 0600)
check(allowed)
f, err := os.Create("/tmp/dat2")
check(err)
defer f.Close()
d2 := []byte{115, 111, 109, 101, 10}
n2, err := f.Write(d2)
defer check(err)
fmt.Printf("wrote %d bytes\n", n2)
n3, err := f.WriteString("writes\n")
fmt.Printf("wrote %d bytes\n", n3)
f.Sync()
w := bufio.NewWriter(f)
n4, err := w.WriteString("buffered\n")
fmt.Printf("wrote %d bytes\n", n4)
w.Flush()
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG401 - Use of weak crypto MD5
SampleCodeG401 = []CodeSample{
{[]string{`
package main
import (
"crypto/md5"
"fmt"
"io"
"log"
"os"
)
func main() {
f, err := os.Open("file.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
defer func() {
err := f.Close()
if err != nil {
log.Printf("error closing the file: %s", err)
}
}()
h := md5.New()
if _, err := io.Copy(h, f); err != nil {
log.Fatal(err)
}
fmt.Printf("%x", h.Sum(nil))
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG401b - Use of weak crypto SHA1
SampleCodeG401b = []CodeSample{
{[]string{`
package main
import (
"crypto/sha1"
"fmt"
"io"
"log"
"os"
)
func main() {
f, err := os.Open("file.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
h := sha1.New()
if _, err := io.Copy(h, f); err != nil {
log.Fatal(err)
}
fmt.Printf("%x", h.Sum(nil))
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG402 - TLS settings
SampleCodeG402 = []CodeSample{
{[]string{`
// InsecureSkipVerify
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}`}, 1, gosec.NewConfig()},
{[]string{
`
// Insecure minimum version
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{MinVersion: 0},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}`,
}, 1, gosec.NewConfig()},
{[]string{
`
// Insecure minimum version
package main
import (
"crypto/tls"
"fmt"
)
func CaseNotError() *tls.Config {
var v uint16 = tls.VersionTLS13
return &tls.Config{
MinVersion: v,
}
}
func main() {
a := CaseNotError()
fmt.Printf("Debug: %v\n", a.MinVersion)
}`,
}, 0, gosec.NewConfig()},
{[]string{
`
// Insecure minimum version
package main
import (
"crypto/tls"
"fmt"
)
func CaseNotError() *tls.Config {
return &tls.Config{
MinVersion: tls.VersionTLS13,
}
}
func main() {
a := CaseNotError()
fmt.Printf("Debug: %v\n", a.MinVersion)
}`,
}, 0, gosec.NewConfig()},
{[]string{
`
// Insecure minimum version
package main
import (
"crypto/tls"
"fmt"
)
func CaseError() *tls.Config {
var v = &tls.Config{
MinVersion: 0,
}
return v
}
func main() {
a := CaseError()
fmt.Printf("Debug: %v\n", a.MinVersion)
}`,
}, 1, gosec.NewConfig()},
{[]string{
`
// Insecure minimum version
package main
import (
"crypto/tls"
"fmt"
)
func CaseError() *tls.Config {
var v = &tls.Config{
MinVersion: getVersion(),
}
return v
}
func getVersion() uint16 {
return tls.VersionTLS12
}
func main() {
a := CaseError()
fmt.Printf("Debug: %v\n", a.MinVersion)
}`,
}, 1, gosec.NewConfig()},
{[]string{
`
// Insecure minimum version
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
var theValue uint16 = 0x0304
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{MinVersion: theValue},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}
`,
}, 0, gosec.NewConfig()},
{[]string{`
// Insecure max version
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{MaxVersion: 0},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}
`}, 1, gosec.NewConfig()},
{
[]string{`
// Insecure ciphersuite selection
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{CipherSuites: []uint16{
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
},},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}`}, 1, gosec.NewConfig(),
},
{[]string{`
// secure max version when min version is specified
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{MaxVersion: 0, MinVersion: tls.VersionTLS13},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}`}, 0, gosec.NewConfig()},
{[]string{`
package p0
import "crypto/tls"
func TlsConfig0() *tls.Config {
var v uint16 = 0
return &tls.Config{MinVersion: v}
}
`, `
package p0
import "crypto/tls"
func TlsConfig1() *tls.Config {
return &tls.Config{MinVersion: 0x0304}
}
`}, 1, gosec.NewConfig()},
}
// SampleCodeG403 - weak key strength
SampleCodeG403 = []CodeSample{
{[]string{`
package main
import (
"crypto/rand"
"crypto/rsa"
"fmt"
)
func main() {
//Generate Private Key
pvk, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
fmt.Println(err)
}
fmt.Println(pvk)
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG404 - weak random number
SampleCodeG404 = []CodeSample{
{[]string{`
package main
import "crypto/rand"
func main() {
good, _ := rand.Read(nil)
println(good)
}`}, 0, gosec.NewConfig()},
{[]string{`
package main
import "math/rand"
func main() {
bad := rand.Int()
println(bad)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import (
"crypto/rand"
mrand "math/rand"
)
func main() {
good, _ := rand.Read(nil)
println(good)
bad := mrand.Int31()
println(bad)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import (
"math/rand"
)
func main() {
gen := rand.New(rand.NewSource(10))
bad := gen.Int()
println(bad)
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import (
"math/rand"
)
func main() {
bad := rand.Intn(10)
println(bad)
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG501 - Blocklisted import MD5
SampleCodeG501 = []CodeSample{
{[]string{`
package main
import (
"crypto/md5"
"fmt"
"os"
)
func main() {
for _, arg := range os.Args {
fmt.Printf("%x - %s\n", md5.Sum([]byte(arg)), arg)
}
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG502 - Blocklisted import DES
SampleCodeG502 = []CodeSample{
{[]string{`
package main
import (
"crypto/cipher"
"crypto/des"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
)
func main() {
block, err := des.NewCipher([]byte("sekritz"))
if err != nil {
panic(err)
}
plaintext := []byte("I CAN HAZ SEKRIT MSG PLZ")
ciphertext := make([]byte, des.BlockSize+len(plaintext))
iv := ciphertext[:des.BlockSize]
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
panic(err)
}
stream := cipher.NewCFBEncrypter(block, iv)
stream.XORKeyStream(ciphertext[des.BlockSize:], plaintext)
fmt.Println("Secret message is: %s", hex.EncodeToString(ciphertext))
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG503 - Blocklisted import RC4
SampleCodeG503 = []CodeSample{{[]string{`
package main
import (
"crypto/rc4"
"encoding/hex"
"fmt"
)
func main() {
cipher, err := rc4.NewCipher([]byte("sekritz"))
if err != nil {
panic(err)
}
plaintext := []byte("I CAN HAZ SEKRIT MSG PLZ")
ciphertext := make([]byte, len(plaintext))
cipher.XORKeyStream(ciphertext, plaintext)
fmt.Println("Secret message is: %s", hex.EncodeToString(ciphertext))
}`}, 1, gosec.NewConfig()}}
// SampleCodeG504 - Blocklisted import CGI
SampleCodeG504 = []CodeSample{{[]string{`
package main
import (
"net/http/cgi"
"net/http"
)
func main() {
cgi.Serve(http.FileServer(http.Dir("/usr/share/doc")))
}`}, 1, gosec.NewConfig()}}
// SampleCodeG505 - Blocklisted import SHA1
SampleCodeG505 = []CodeSample{
{[]string{`
package main
import (
"crypto/sha1"
"fmt"
"os"
)
func main() {
for _, arg := range os.Args {
fmt.Printf("%x - %s\n", sha1.Sum([]byte(arg)), arg)
}
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG601 - Implicit aliasing over range statement
SampleCodeG601 = []CodeSample{
{[]string{
`
package main
import "fmt"
var vector []*string
func appendVector(s *string) {
vector = append(vector, s)
}
func printVector() {
for _, item := range vector {
fmt.Printf("%s", *item)
}
fmt.Println()
}
func foo() (int, **string, *string) {
for _, item := range vector {
return 0, &item, item
}
return 0, nil, nil
}
func main() {
for _, item := range []string{"A", "B", "C"} {
appendVector(&item)
}
printVector()
zero, c_star, c := foo()
fmt.Printf("%d %v %s", zero, c_star, c)
}`,
}, 1, gosec.NewConfig()},
{[]string{`
// see: github.com/securego/gosec/issues/475
package main
import (
"fmt"
)
func main() {
sampleMap := map[string]string{}
sampleString := "A string"
for sampleString, _ = range sampleMap {
fmt.Println(sampleString)
}
}`}, 0, gosec.NewConfig()},
}
// SampleCodeBuildTag - G601 build tags
SampleCodeBuildTag = []CodeSample{{[]string{`
// +build tag
package main
func main() {
fmt.Println("no package imported error")
}`}, 1, gosec.NewConfig()}}
// SampleCodeCgo - Cgo file sample
SampleCodeCgo = []CodeSample{{[]string{`
package main
import (
"fmt"
"unsafe"
)
/*
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int printData(unsigned char *data) {
return printf("cData: %lu \"%s\"\n", (long unsigned int)strlen(data), data);
}
*/
import "C"
func main() {
// Allocate C data buffer.
width, height := 8, 2
lenData := width * height
// add string terminating null byte
cData := (*C.uchar)(C.calloc(C.size_t(lenData+1), C.sizeof_uchar))
// When no longer in use, free C allocations.
defer C.free(unsafe.Pointer(cData))
// Go slice reference to C data buffer,
// minus string terminating null byte
gData := (*[1 << 30]byte)(unsafe.Pointer(cData))[:lenData:lenData]
// Write and read cData via gData.
for i := range gData {
gData[i] = '.'
}
copy(gData[0:], "Data")
gData[len(gData)-1] = 'X'
fmt.Printf("gData: %d %q\n", len(gData), gData)
C.printData(cData)
}
`}, 0, gosec.NewConfig()}}
)
| [
"\"tainted_url\"",
"\"SOMETHING\"",
"\"tainted_file\"",
"\"tainted_file\"",
"\"tainted_file2\"",
"\"server_root\"",
"\"tainted_file3\""
] | [] | [
"tainted_url",
"tainted_file",
"tainted_file3",
"tainted_file2",
"SOMETHING",
"server_root"
] | [] | ["tainted_url", "tainted_file", "tainted_file3", "tainted_file2", "SOMETHING", "server_root"] | go | 6 | 0 | |
src/beacons.py | import _bleio
import adafruit_ble
import board
import digitalio
import binascii
import math
from adafruit_ble import BLERadio
from adafruit_ble.advertising import *
from _bleio import adapter
from math import floor
# CircuitPython packages used.
# specifically written for the Adafruit nRF52840 Bluefruit Feather
# 0x01 => flags
# 0x02 => incomplete list of 16-bit UUIDs
# 0x03 => complete list of 16-bit UUIDs
# 0x04 => incomplete list of 32-bit UUIDs
# 0x05 => complete list of 32-bit UUIDs
# 0x06 => incomplete list of 128-bit UUIDs
# 0x07 => complete list of 128-bit UUIDs
# 0x08 => shortened local name
# 0x09 => complete local name
# 0x0A => Tx power level of packet (1 byte) (-127 to +127 dBm)
# 0xFF => manufacturer specific payload
class AD:
# make your own Advertising Data packet.
def __init__(self, length, dtype, payload, endian='little'):
"""
length: The length (in base 16) of the AD packet including the type byte.
dtype: A 1-byte string representing one the datatype of the payload as documented in the Bluetooth Generic Access Profile (GAP)
payload: The actual payload. The length of this should be equal to (length - 1).
endian: Endianness of the datatype. Specify if 'big' or 'little'. Default 'little'.
All of these should be in String format. For example, if your dtype is a flag, which would be 0x01, dtype='01'.
Similarly for others too.
"""
self.length = bytes(length, 'utf-8')
self.dtype = bytes(dtype, 'utf-8')
self.payload = bytes(payload, 'utf-8') # convert all these to byte format
self.l = int(self.length, 16) # integer length of (payload + dtype) in bytes
if endian not in ['big', 'litle']:
self.endian = 'little'
else:
self.endian = endian
def parse(self):
p_len = binascii.unhexlify(self.length)
p_dtype = binascii.unhexlify(self.dtype)
p_pload = binascii.unhexlify(self.payload)
return p_len, p_dtype, p_pload
def join(self):
l, dt, pl = self.parse() # in pure hex byte representation
# convert from little to big endian if needed.
if self.endian != 'little':
pl = b''.join([pl[i: i+1] for i in range(len(pl)-1, -1, -1)])
if len(pl) > (self.l - 1):
pl = pl[:self.l - 1]
if len(pl) < (self.l - 1) and self.endian == 'little':
pl = '\x00' * (self.l - 1 - len(pl)) + pl
if len(pl) < (self.l - 1) and self.endian == 'big':
pl = pl + '\x00' * (self.l - 1 - len(pl))
return (l + dt + pl)
def get_AD(self):
return self.join()
def Flag(flag='0x06'):
"""
Get the AD for the Flag.
Default: 0x06
"""
l = '02'
dt = '01'
f = flag.split('0x')[1]
return AD(l, dt, f, 'little').join()
def ShortName(ShortName):
"""
Get AD for the short name of device.
"""
l = binascii.hexlify((len(ShortName)+1).to_bytes(1, 'little')).decode('utf-8') # converts the length to a hex representation in a byte in String format
pl = ''
for i in to_hex(bytes(ShortName, 'utf-8')).split(' '):
pl += i
return AD(l, '08', pl, 'little').join()
def CompleteName(fullName):
"""
Get AD for the complete name of the device.
"""
l = binascii.hexlify((len(fullName)+1).to_bytes(1, 'little')).decode('utf-8') # converts the length to a hex representation in a byte in String format
pl = ''
for i in to_hex(bytes(fullName, 'utf-8')).split(' '):
pl += i
return AD(l, '09', pl, 'little').join()
def UUID_16(uuid, complete=True):
"""
Get AD packet for in/complete list of 16-bit UUIDs.
`uuid` should be a string representing the hex, like '0x7e2f' => '7e2f'.
stored in big endian format.
"""
l = '03'
dt = '03' if complete else '02'
pl = uuid
return AD(l, dt, pl, 'big').join()
def UUID_128(uuid, complete=True):
"""
Get AD packet for in/complete list of 128-bit UUIDs.
`uuid` should be a string representing the hex, like '07f75536-cf60-4289-bb1c-6f50f8daf622'.
stored in big endian format.
"""
l = '11'
dt = '07' if complete else '06'
pl = ''.join(uuid.split('-'))
return AD(l, dt, pl, 'big').join()
| [] | [] | [] | [] | [] | python | null | null | null |
election/example/main.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"io"
"time"
election "k8s.io/contrib/election/lib"
"github.com/golang/glog"
flag "github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/api"
// "k8s.io/kubernetes/pkg/apimachinery/types"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util"
)
var (
flags = flag.NewFlagSet(
`elector --election=<name>`,
flag.ExitOnError)
name = flags.String("election", "", "The name of the election")
id = flags.String("id", "", "The id of this participant")
namespace = flags.String("election-namespace", api.NamespaceDefault, "The Kubernetes namespace for this election")
ttl = flags.Duration("ttl", 10*time.Second, "The TTL for this election")
inCluster = flags.Bool("use-cluster-credentials", false, "Should this request use cluster credentials?")
addr = flags.String("http", "", "If non-empty, stand up a simple webserver that reports the leader state")
leader = &LeaderData{}
)
type patchStringValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
func makeClient() (*client.Client, error) {
var cfg *restclient.Config
var err error
if *inCluster {
if cfg, err = restclient.InClusterConfig(); err != nil {
return nil, err
}
} else {
clientConfig := kubectl_util.DefaultClientConfig(flags)
if cfg, err = clientConfig.ClientConfig(); err != nil {
return nil, err
}
}
return client.New(cfg)
}
// LeaderData represents information about the current leader
type LeaderData struct {
Name string `json:"name"`
}
func webHandler(res http.ResponseWriter, req *http.Request) {
data, err := json.Marshal(leader)
if err != nil {
res.WriteHeader(http.StatusInternalServerError)
res.Write([]byte(err.Error()))
return
}
res.WriteHeader(http.StatusOK)
res.Write(data)
}
func webHealthHandler(res http.ResponseWriter, _ *http.Request) {
if leader == nil || leader.Name == "" {
res.WriteHeader(http.StatusInternalServerError)
io.WriteString(res, fmt.Sprintf("Invalid leader set: %v", leader))
return
}
res.WriteHeader(http.StatusOK)
io.WriteString(res, fmt.Sprintf("Valid leader set: %v", leader))
}
func webLeaderHandler(res http.ResponseWriter, _ *http.Request) {
if leader == nil || leader.Name == "" {
res.WriteHeader(http.StatusInternalServerError)
io.WriteString(res, fmt.Sprintf("Invalid leader set: %v", leader))
return
}
if(leader.Name == *id){
res.WriteHeader(http.StatusOK)
io.WriteString(res, fmt.Sprintf("Valid leader set: %v", leader))
return
}
res.WriteHeader(http.StatusGone)
}
func validateFlags() {
if len(*id) == 0 {
*id = os.Getenv("HOSTNAME")
if len(*id) == 0 {
glog.Fatal("--id cannot be empty")
}
}
if len(*name) == 0 {
glog.Fatal("--election cannot be empty")
}
}
func main() {
flags.Parse(os.Args)
validateFlags()
kubeClient, err := makeClient()
if err != nil {
glog.Fatalf("error connecting to the client: %v", err)
}
fn := func(str string) {
var payload [] patchStringValue
if(*id == str){
payload = []patchStringValue{{
Op: "add",
Path: "/metadata/labels/leader",
Value: "yes",
}}
} else {
payload = []patchStringValue{{
Op: "remove",
Path: "/metadata/labels/leader",
}}
}
// var updateErr error
payloadBytes, _ := json.Marshal(payload)
_, updateErr := kubeClient.Pods(*namespace).Patch(*id, "application/json-patch+json", payloadBytes)
if updateErr == nil {
fmt.Println(fmt.Sprintf("Pod %s labelled successfully.", *id))
} else {
fmt.Println(updateErr)
}
leader.Name = str
fmt.Printf("%s is the leader\n", leader.Name)
}
e, err := election.NewElection(*name, *id, *namespace, *ttl, fn, kubeClient)
if err != nil {
glog.Fatalf("failed to create election: %v", err)
}
go election.RunElection(e)
if len(*addr) > 0 {
http.HandleFunc("/health", webHealthHandler)
http.HandleFunc("/leader", webLeaderHandler)
http.HandleFunc("/", webHandler)
http.ListenAndServe(*addr, nil)
} else {
select {}
}
}
| [
"\"HOSTNAME\""
] | [] | [
"HOSTNAME"
] | [] | ["HOSTNAME"] | go | 1 | 0 | |
website/settings/defaults.py | # -*- coding: utf-8 -*-
"""
Base settings file, common to all environments.
These settings can be overridden in local.py.
"""
import datetime
import os
import json
import hashlib
import logging
from datetime import timedelta
from collections import OrderedDict
os_env = os.environ
def parent_dir(path):
'''Return the parent of a directory.'''
return os.path.abspath(os.path.join(path, os.pardir))
HERE = os.path.dirname(os.path.abspath(__file__))
BASE_PATH = parent_dir(HERE) # website/ directory
APP_PATH = parent_dir(BASE_PATH)
ADDON_PATH = os.path.join(APP_PATH, 'addons')
STATIC_FOLDER = os.path.join(BASE_PATH, 'static')
STATIC_URL_PATH = '/static'
ASSET_HASH_PATH = os.path.join(APP_PATH, 'webpack-assets.json')
ROOT = os.path.join(BASE_PATH, '..')
BCRYPT_LOG_ROUNDS = 12
LOG_LEVEL = logging.INFO
TEST_ENV = False
with open(os.path.join(APP_PATH, 'package.json'), 'r') as fobj:
VERSION = json.load(fobj)['version']
# Expiration time for verification key
EXPIRATION_TIME_DICT = {
'password': 24 * 60, # 24 hours in minutes for forgot and reset password
'confirm': 24 * 60, # 24 hours in minutes for confirm account and email
'claim': 30 * 24 * 60 # 30 days in minutes for claim contributor-ship
}
CITATION_STYLES_PATH = os.path.join(BASE_PATH, 'static', 'vendor', 'bower_components', 'styles')
# Minimum seconds between forgot password email attempts
SEND_EMAIL_THROTTLE = 30
# Minimum seconds between attempts to change password
CHANGE_PASSWORD_THROTTLE = 30
# Number of incorrect password attempts allowed before throttling.
INCORRECT_PASSWORD_ATTEMPTS_ALLOWED = 3
# Seconds that must elapse before updating a user's date_last_login field
DATE_LAST_LOGIN_THROTTLE = 60
# Seconds that must elapse before change password attempts are reset(currently 1 hour)
TIME_RESET_CHANGE_PASSWORD_ATTEMPTS = 3600
# Hours before pending embargo/retraction/registration automatically becomes active
RETRACTION_PENDING_TIME = datetime.timedelta(days=2)
EMBARGO_PENDING_TIME = datetime.timedelta(days=2)
EMBARGO_TERMINATION_PENDING_TIME = datetime.timedelta(days=2)
REGISTRATION_APPROVAL_TIME = datetime.timedelta(days=2)
# Date range for embargo periods
EMBARGO_END_DATE_MIN = datetime.timedelta(days=2)
EMBARGO_END_DATE_MAX = datetime.timedelta(days=1460) # Four years
# Question titles to be reomved for anonymized VOL
ANONYMIZED_TITLES = ['Authors']
LOAD_BALANCER = False
# May set these to True in local.py for development
DEV_MODE = False
DEBUG_MODE = False
SECURE_MODE = not DEBUG_MODE # Set secure cookie
PROTOCOL = 'https://' if SECURE_MODE else 'http://'
DOMAIN = PROTOCOL + 'localhost:5000/'
INTERNAL_DOMAIN = DOMAIN
API_DOMAIN = PROTOCOL + 'localhost:8000/'
PREPRINT_PROVIDER_DOMAINS = {
'enabled': False,
'prefix': PROTOCOL,
'suffix': '/'
}
# External Ember App Local Development
USE_EXTERNAL_EMBER = False
PROXY_EMBER_APPS = False
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
EXTERNAL_EMBER_SERVER_TIMEOUT = 3.05
EXTERNAL_EMBER_APPS = {}
LOG_PATH = os.path.join(APP_PATH, 'logs')
TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates')
# User management & registration
CONFIRM_REGISTRATIONS_BY_EMAIL = True
ALLOW_LOGIN = True
SEARCH_ENGINE = 'elastic' # Can be 'elastic', or None
ELASTIC_URI = '127.0.0.1:9200'
ELASTIC_TIMEOUT = 10
ELASTIC_INDEX = 'website'
ELASTIC_KWARGS = {
# 'use_ssl': False,
# 'verify_certs': True,
# 'ca_certs': None,
# 'client_cert': None,
# 'client_key': None
}
# Sessions
COOKIE_NAME = 'osf'
# TODO: Override OSF_COOKIE_DOMAIN in local.py in production
OSF_COOKIE_DOMAIN = None
# server-side verification timeout
OSF_SESSION_TIMEOUT = 30 * 24 * 60 * 60 # 30 days in seconds
# TODO: Override SECRET_KEY in local.py in production
SECRET_KEY = 'CHANGEME'
SESSION_COOKIE_SECURE = SECURE_MODE
SESSION_COOKIE_HTTPONLY = True
# local path to private key and cert for local development using https, overwrite in local.py
OSF_SERVER_KEY = None
OSF_SERVER_CERT = None
# External services
USE_CDN_FOR_CLIENT_LIBS = True
USE_EMAIL = True
FROM_EMAIL = '[email protected]'
# support email
OSF_SUPPORT_EMAIL = '[email protected]'
# contact email
OSF_CONTACT_EMAIL = '[email protected]'
# prereg email
PREREG_EMAIL = '[email protected]'
# Default settings for fake email address generation
FAKE_EMAIL_NAME = 'freddiemercury'
FAKE_EMAIL_DOMAIN = 'cos.io'
# SMTP Settings
MAIL_SERVER = 'smtp.sendgrid.net'
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = '' # Set this in local.py
# OR, if using Sendgrid's API
# WARNING: If `SENDGRID_WHITELIST_MODE` is True,
# `tasks.send_email` would only email recipients included in `SENDGRID_EMAIL_WHITELIST`
SENDGRID_API_KEY = None
SENDGRID_WHITELIST_MODE = False
SENDGRID_EMAIL_WHITELIST = []
# Mailchimp
MAILCHIMP_API_KEY = None
MAILCHIMP_WEBHOOK_SECRET_KEY = 'CHANGEME' # OSF secret key to ensure webhook is secure
ENABLE_EMAIL_SUBSCRIPTIONS = True
MAILCHIMP_GENERAL_LIST = 'Open Science Framework General'
#Triggered emails
OSF_HELP_LIST = 'Open Science Framework Help'
PREREG_AGE_LIMIT = timedelta(weeks=12)
PREREG_WAIT_TIME = timedelta(weeks=2)
WAIT_BETWEEN_MAILS = timedelta(days=7)
NO_ADDON_WAIT_TIME = timedelta(weeks=8)
NO_LOGIN_WAIT_TIME = timedelta(weeks=4)
WELCOME_OSF4M_WAIT_TIME = timedelta(weeks=2)
NO_LOGIN_OSF4M_WAIT_TIME = timedelta(weeks=6)
NEW_PUBLIC_PROJECT_WAIT_TIME = timedelta(hours=24)
WELCOME_OSF4M_WAIT_TIME_GRACE = timedelta(days=12)
# TODO: Override in local.py
MAILGUN_API_KEY = None
# Use Celery for file rendering
USE_CELERY = True
# TODO: Override in local.py in production
DB_HOST = 'localhost'
DB_PORT = os_env.get('OSF_DB_PORT', 27017)
# TODO: Configuration should not change between deploys - this should be dynamic.
COOKIE_DOMAIN = '.openscienceframework.org' # Beaker
# TODO: Combine Python and JavaScript config
# If you change COMMENT_MAXLENGTH, make sure you create a corresponding migration.
COMMENT_MAXLENGTH = 1000
# Profile image options
PROFILE_IMAGE_LARGE = 70
PROFILE_IMAGE_MEDIUM = 40
# Currently (8/21/2017) only gravatar supported.
PROFILE_IMAGE_PROVIDER = 'gravatar'
# Conference options
CONFERENCE_MIN_COUNT = 5
WIKI_WHITELIST = {
'tags': [
'a', 'abbr', 'acronym', 'b', 'bdo', 'big', 'blockquote', 'br',
'center', 'cite', 'code',
'dd', 'del', 'dfn', 'div', 'dl', 'dt', 'em', 'embed', 'font',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins',
'kbd', 'li', 'object', 'ol', 'param', 'pre', 'p', 'q',
's', 'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup',
'table', 'tbody', 'td', 'th', 'thead', 'tr', 'tt', 'ul', 'u',
'var', 'wbr',
],
'attributes': [
'align', 'alt', 'border', 'cite', 'class', 'dir',
'height', 'href', 'id', 'src', 'style', 'title', 'type', 'width',
'face', 'size', # font tags
'salign', 'align', 'wmode', 'target',
],
# Styles currently used in Reproducibility Project wiki pages
'styles': [
'top', 'left', 'width', 'height', 'position',
'background', 'font-size', 'text-align', 'z-index',
'list-style',
]
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
NODE_CATEGORY_MAP = OrderedDict([
('analysis', 'Analysis'),
('communication', 'Communication'),
('data', 'Data'),
('hypothesis', 'Hypothesis'),
('instrumentation', 'Instrumentation'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('project', 'Project'),
('software', 'Software'),
('other', 'Other'),
('', 'Uncategorized')
])
# Add-ons
# Load addons from addons.json
with open(os.path.join(ROOT, 'addons.json')) as fp:
addon_settings = json.load(fp)
ADDONS_REQUESTED = addon_settings['addons']
ADDONS_ARCHIVABLE = addon_settings['addons_archivable']
ADDONS_COMMENTABLE = addon_settings['addons_commentable']
ADDONS_BASED_ON_IDS = addon_settings['addons_based_on_ids']
ADDONS_DEFAULT = addon_settings['addons_default']
SYSTEM_ADDED_ADDONS = {
'user': [],
'node': [],
}
KEEN = {
'public': {
'project_id': None,
'master_key': 'changeme',
'write_key': '',
'read_key': '',
},
'private': {
'project_id': '',
'write_key': '',
'read_key': '',
},
}
SENTRY_DSN = None
SENTRY_DSN_JS = None
MISSING_FILE_NAME = 'untitled'
# Most Popular and New and Noteworthy Nodes
POPULAR_LINKS_NODE = None # TODO Override in local.py in production.
POPULAR_LINKS_REGISTRATIONS = None # TODO Override in local.py in production.
NEW_AND_NOTEWORTHY_LINKS_NODE = None # TODO Override in local.py in production.
MAX_POPULAR_PROJECTS = 10
NEW_AND_NOTEWORTHY_CONTRIBUTOR_BLACKLIST = [] # TODO Override in local.py in production.
# FOR EMERGENCIES ONLY: Setting this to True will disable forks, registrations,
# and uploads in order to save disk space.
DISK_SAVING_MODE = False
# Seconds before another notification email can be sent to a contributor when added to a project
CONTRIBUTOR_ADDED_EMAIL_THROTTLE = 24 * 3600
# Google Analytics
GOOGLE_ANALYTICS_ID = None
GOOGLE_SITE_VERIFICATION = None
DEFAULT_HMAC_SECRET = 'changeme'
DEFAULT_HMAC_ALGORITHM = hashlib.sha256
WATERBUTLER_URL = 'http://localhost:7777'
WATERBUTLER_INTERNAL_URL = WATERBUTLER_URL
####################
# Identifiers #
###################
DOI_URL_PREFIX = 'https://doi.org/'
# General Format for DOIs
DOI_FORMAT = '{prefix}/osf.io/{guid}'
# ezid
EZID_DOI_NAMESPACE = 'doi:10.5072'
EZID_ARK_NAMESPACE = 'ark:99999'
# datacite
DATACITE_USERNAME = None
DATACITE_PASSWORD = None
DATACITE_URL = None
DATACITE_PREFIX = '10.5072' # Datacite's test DOI prefix -- update in production
# Minting DOIs only works on Datacite's production server, so
# disable minting on staging and development environments by default
DATACITE_MINT_DOIS = not DEV_MODE
# crossref
CROSSREF_USERNAME = None
CROSSREF_PASSWORD = None
CROSSREF_URL = None # Location to POST crossref data. In production, change this to the production CrossRef API endpoint
CROSSREF_DEPOSITOR_EMAIL = 'None' # This email will receive confirmation/error messages from CrossRef on submission
ECSARXIV_CROSSREF_USERNAME = None
ECSARXIV_CROSSREF_PASSWORD = None
# Leave as `None` for production, test/staging/local envs must set
SHARE_PREPRINT_PROVIDER_PREPEND = None
SHARE_REGISTRATION_URL = ''
SHARE_URL = None
SHARE_API_TOKEN = None # Required to send project updates to SHARE
CAS_SERVER_URL = 'http://localhost:8080'
MFR_SERVER_URL = 'http://localhost:7778'
###### ARCHIVER ###########
ARCHIVE_PROVIDER = 'osfstorage'
MAX_ARCHIVE_SIZE = 5 * 1024 ** 3 # == math.pow(1024, 3) == 1 GB
ARCHIVE_TIMEOUT_TIMEDELTA = timedelta(1) # 24 hours
ENABLE_ARCHIVER = True
JWT_SECRET = 'changeme'
JWT_ALGORITHM = 'HS256'
##### CELERY #####
# Default RabbitMQ broker
RABBITMQ_USERNAME = os.environ.get('RABBITMQ_USERNAME', 'guest')
RABBITMQ_PASSWORD = os.environ.get('RABBITMQ_PASSWORD', 'guest')
RABBITMQ_HOST = os.environ.get('RABBITMQ_HOST', 'localhost')
RABBITMQ_PORT = os.environ.get('RABBITMQ_PORT', '5672')
RABBITMQ_VHOST = os.environ.get('RABBITMQ_VHOST', '/')
# Seconds, not an actual celery setting
CELERY_RETRY_BACKOFF_BASE = 5
class CeleryConfig:
"""
Celery Configuration
http://docs.celeryproject.org/en/latest/userguide/configuration.html
"""
timezone = 'UTC'
task_default_queue = 'celery'
task_low_queue = 'low'
task_med_queue = 'med'
task_high_queue = 'high'
low_pri_modules = {
'framework.analytics.tasks',
'framework.celery_tasks',
'scripts.osfstorage.usage_audit',
'scripts.stuck_registration_audit',
'scripts.analytics.tasks',
'scripts.populate_new_and_noteworthy_projects',
'scripts.populate_popular_projects_and_registrations',
'scripts.remind_draft_preregistrations',
'website.search.elastic_search',
'scripts.generate_sitemap',
'scripts.generate_prereg_csv',
'scripts.analytics.run_keen_summaries',
'scripts.analytics.run_keen_snapshots',
'scripts.analytics.run_keen_events',
'scripts.clear_sessions',
}
med_pri_modules = {
'framework.email.tasks',
'scripts.send_queued_mails',
'scripts.triggered_mails',
'website.mailchimp_utils',
'website.notifications.tasks',
}
high_pri_modules = {
'scripts.approve_embargo_terminations',
'scripts.approve_registrations',
'scripts.embargo_registrations',
'scripts.premigrate_created_modified',
'scripts.refresh_addon_tokens',
'scripts.retract_registrations',
'website.archiver.tasks',
'scripts.add_missing_identifiers_to_preprints'
}
try:
from kombu import Queue, Exchange
except ImportError:
pass
else:
task_queues = (
Queue(task_low_queue, Exchange(task_low_queue), routing_key=task_low_queue,
consumer_arguments={'x-priority': -1}),
Queue(task_default_queue, Exchange(task_default_queue), routing_key=task_default_queue,
consumer_arguments={'x-priority': 0}),
Queue(task_med_queue, Exchange(task_med_queue), routing_key=task_med_queue,
consumer_arguments={'x-priority': 1}),
Queue(task_high_queue, Exchange(task_high_queue), routing_key=task_high_queue,
consumer_arguments={'x-priority': 10}),
)
task_default_exchange_type = 'direct'
task_routes = ('framework.celery_tasks.routers.CeleryRouter', )
task_ignore_result = True
task_store_errors_even_if_ignored = True
broker_url = os.environ.get('BROKER_URL', 'amqp://{}:{}@{}:{}/{}'.format(RABBITMQ_USERNAME, RABBITMQ_PASSWORD, RABBITMQ_HOST, RABBITMQ_PORT, RABBITMQ_VHOST))
broker_use_ssl = False
# Default RabbitMQ backend
result_backend = 'django-db' # django-celery-results
beat_scheduler = 'django_celery_beat.schedulers:DatabaseScheduler'
# Modules to import when celery launches
imports = (
'framework.celery_tasks',
'framework.email.tasks',
'website.mailchimp_utils',
'website.notifications.tasks',
'website.archiver.tasks',
'website.search.search',
'website.project.tasks',
'scripts.populate_new_and_noteworthy_projects',
'scripts.populate_popular_projects_and_registrations',
'scripts.refresh_addon_tokens',
'scripts.remind_draft_preregistrations',
'scripts.retract_registrations',
'scripts.embargo_registrations',
'scripts.approve_registrations',
'scripts.approve_embargo_terminations',
'scripts.triggered_mails',
'scripts.clear_sessions',
'scripts.send_queued_mails',
'scripts.analytics.run_keen_summaries',
'scripts.analytics.run_keen_snapshots',
'scripts.analytics.run_keen_events',
'scripts.generate_sitemap',
'scripts.premigrate_created_modified',
'scripts.generate_prereg_csv',
'scripts.add_missing_identifiers_to_preprints',
)
# Modules that need metrics and release requirements
# imports += (
# 'scripts.osfstorage.usage_audit',
# 'scripts.stuck_registration_audit',
# 'scripts.analytics.tasks',
# 'scripts.analytics.upload',
# )
# celery.schedule will not be installed when running invoke requirements the first time.
try:
from celery.schedules import crontab
except ImportError:
pass
else:
# Setting up a scheduler, essentially replaces an independent cron job
# Note: these times must be in UTC
beat_schedule = {
'5-minute-emails': {
'task': 'website.notifications.tasks.send_users_email',
'schedule': crontab(minute='*/5'),
'args': ('email_transactional',),
},
'daily-emails': {
'task': 'website.notifications.tasks.send_users_email',
'schedule': crontab(minute=0, hour=5), # Daily at 12 a.m. EST
'args': ('email_digest',),
},
'refresh_addons': {
'task': 'scripts.refresh_addon_tokens',
'schedule': crontab(minute=0, hour=7), # Daily 2:00 a.m
'kwargs': {'dry_run': False, 'addons': {
'box': 60, # https://docs.box.com/docs/oauth-20#section-6-using-the-access-and-refresh-tokens
'googledrive': 14, # https://developers.google.com/identity/protocols/OAuth2#expiration
'mendeley': 14 # http://dev.mendeley.com/reference/topics/authorization_overview.html
}},
},
'retract_registrations': {
'task': 'scripts.retract_registrations',
'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'embargo_registrations': {
'task': 'scripts.embargo_registrations',
'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'add_missing_identifiers_to_preprints': {
'task': 'scripts.add_missing_identifiers_to_preprints',
'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'approve_registrations': {
'task': 'scripts.approve_registrations',
'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'approve_embargo_terminations': {
'task': 'scripts.approve_embargo_terminations',
'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'triggered_mails': {
'task': 'scripts.triggered_mails',
'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'clear_sessions': {
'task': 'scripts.clear_sessions',
'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'send_queued_mails': {
'task': 'scripts.send_queued_mails',
'schedule': crontab(minute=0, hour=17), # Daily 12 p.m.
'kwargs': {'dry_run': False},
},
'prereg_reminder': {
'task': 'scripts.remind_draft_preregistrations',
'schedule': crontab(minute=0, hour=12), # Daily 12 p.m.
'kwargs': {'dry_run': False},
},
'new-and-noteworthy': {
'task': 'scripts.populate_new_and_noteworthy_projects',
'schedule': crontab(minute=0, hour=7, day_of_week=6), # Saturday 2:00 a.m.
'kwargs': {'dry_run': False}
},
'update_popular_nodes': {
'task': 'scripts.populate_popular_projects_and_registrations',
'schedule': crontab(minute=0, hour=7), # Daily 2:00 a.m.
'kwargs': {'dry_run': False}
},
'run_keen_summaries': {
'task': 'scripts.analytics.run_keen_summaries',
'schedule': crontab(minute=0, hour=6), # Daily 1:00 a.m.
'kwargs': {'yesterday': True}
},
'run_keen_snapshots': {
'task': 'scripts.analytics.run_keen_snapshots',
'schedule': crontab(minute=0, hour=8), # Daily 3:00 a.m.
},
'run_keen_events': {
'task': 'scripts.analytics.run_keen_events',
'schedule': crontab(minute=0, hour=9), # Daily 4:00 a.m.
'kwargs': {'yesterday': True}
},
'generate_sitemap': {
'task': 'scripts.generate_sitemap',
'schedule': crontab(minute=0, hour=5), # Daily 12:00 a.m.
},
'generate_prereg_csv': {
'task': 'scripts.generate_prereg_csv',
'schedule': crontab(minute=0, hour=10, day_of_week=0), # Sunday 5:00 a.m.
},
}
# Tasks that need metrics and release requirements
# beat_schedule.update({
# 'usage_audit': {
# 'task': 'scripts.osfstorage.usage_audit',
# 'schedule': crontab(minute=0, hour=5), # Daily 12 a.m
# 'kwargs': {'send_mail': True},
# },
# 'stuck_registration_audit': {
# 'task': 'scripts.stuck_registration_audit',
# 'schedule': crontab(minute=0, hour=11), # Daily 6 a.m
# 'kwargs': {},
# },
# })
WATERBUTLER_JWE_SALT = 'yusaltydough'
WATERBUTLER_JWE_SECRET = 'CirclesAre4Squares'
WATERBUTLER_JWT_SECRET = 'ILiekTrianglesALot'
WATERBUTLER_JWT_ALGORITHM = 'HS256'
WATERBUTLER_JWT_EXPIRATION = 15
SENSITIVE_DATA_SALT = 'yusaltydough'
SENSITIVE_DATA_SECRET = 'TrainglesAre5Squares'
DRAFT_REGISTRATION_APPROVAL_PERIOD = datetime.timedelta(days=10)
assert (DRAFT_REGISTRATION_APPROVAL_PERIOD > EMBARGO_END_DATE_MIN), 'The draft registration approval period should be more than the minimum embargo end date.'
# TODO: Remove references to this flag
ENABLE_INSTITUTIONS = True
ENABLE_VARNISH = False
ENABLE_ESI = False
VARNISH_SERVERS = [] # This should be set in local.py or cache invalidation won't work
ESI_MEDIA_TYPES = {'application/vnd.api+json', 'application/json'}
# Used for gathering meta information about the current build
GITHUB_API_TOKEN = None
# switch for disabling things that shouldn't happen during
# the modm to django migration
RUNNING_MIGRATION = False
# External Identity Provider
EXTERNAL_IDENTITY_PROFILE = {
'OrcidProfile': 'ORCID',
}
# Source: https://github.com/maxd/fake_email_validator/blob/master/config/fake_domains.list
BLACKLISTED_DOMAINS = [
'0-mail.com',
'0815.ru',
'0815.su',
'0clickemail.com',
'0wnd.net',
'0wnd.org',
'10mail.org',
'10minut.com.pl',
'10minutemail.cf',
'10minutemail.co.uk',
'10minutemail.co.za',
'10minutemail.com',
'10minutemail.de',
'10minutemail.eu',
'10minutemail.ga',
'10minutemail.gq',
'10minutemail.info',
'10minutemail.ml',
'10minutemail.net',
'10minutemail.org',
'10minutemail.ru',
'10minutemail.us',
'10minutesmail.co.uk',
'10minutesmail.com',
'10minutesmail.eu',
'10minutesmail.net',
'10minutesmail.org',
'10minutesmail.ru',
'10minutesmail.us',
'123-m.com',
'15qm-mail.red',
'15qm.com',
'1chuan.com',
'1mail.ml',
'1pad.de',
'1usemail.com',
'1zhuan.com',
'20mail.in',
'20mail.it',
'20minutemail.com',
'2prong.com',
'30minutemail.com',
'30minutesmail.com',
'33mail.com',
'3d-painting.com',
'3mail.ga',
'4mail.cf',
'4mail.ga',
'4warding.com',
'4warding.net',
'4warding.org',
'5mail.cf',
'5mail.ga',
'60minutemail.com',
'675hosting.com',
'675hosting.net',
'675hosting.org',
'6ip.us',
'6mail.cf',
'6mail.ga',
'6mail.ml',
'6paq.com',
'6url.com',
'75hosting.com',
'75hosting.net',
'75hosting.org',
'7mail.ga',
'7mail.ml',
'7mail7.com',
'7tags.com',
'8mail.cf',
'8mail.ga',
'8mail.ml',
'99experts.com',
'9mail.cf',
'9ox.net',
'a-bc.net',
'a45.in',
'abcmail.email',
'abusemail.de',
'abyssmail.com',
'acentri.com',
'advantimo.com',
'afrobacon.com',
'agedmail.com',
'ajaxapp.net',
'alivance.com',
'ama-trade.de',
'amail.com',
'amail4.me',
'amilegit.com',
'amiri.net',
'amiriindustries.com',
'anappthat.com',
'ano-mail.net',
'anobox.ru',
'anonbox.net',
'anonmails.de',
'anonymail.dk',
'anonymbox.com',
'antichef.com',
'antichef.net',
'antireg.ru',
'antispam.de',
'antispammail.de',
'appixie.com',
'armyspy.com',
'artman-conception.com',
'asdasd.ru',
'azmeil.tk',
'baxomale.ht.cx',
'beddly.com',
'beefmilk.com',
'beerolympics.se',
'bestemailaddress.net',
'bigprofessor.so',
'bigstring.com',
'binkmail.com',
'bio-muesli.net',
'bladesmail.net',
'bloatbox.com',
'bobmail.info',
'bodhi.lawlita.com',
'bofthew.com',
'bootybay.de',
'bossmail.de',
'boun.cr',
'bouncr.com',
'boxformail.in',
'boximail.com',
'boxtemp.com.br',
'breakthru.com',
'brefmail.com',
'brennendesreich.de',
'broadbandninja.com',
'bsnow.net',
'bspamfree.org',
'buffemail.com',
'bugmenot.com',
'bumpymail.com',
'bund.us',
'bundes-li.ga',
'burnthespam.info',
'burstmail.info',
'buymoreplays.com',
'buyusedlibrarybooks.org',
'byom.de',
'c2.hu',
'cachedot.net',
'card.zp.ua',
'casualdx.com',
'cbair.com',
'cdnqa.com',
'cek.pm',
'cellurl.com',
'cem.net',
'centermail.com',
'centermail.net',
'chammy.info',
'cheatmail.de',
'chewiemail.com',
'childsavetrust.org',
'chogmail.com',
'choicemail1.com',
'chong-mail.com',
'chong-mail.net',
'chong-mail.org',
'clixser.com',
'clrmail.com',
'cmail.net',
'cmail.org',
'coldemail.info',
'consumerriot.com',
'cool.fr.nf',
'correo.blogos.net',
'cosmorph.com',
'courriel.fr.nf',
'courrieltemporaire.com',
'crapmail.org',
'crazymailing.com',
'cubiclink.com',
'curryworld.de',
'cust.in',
'cuvox.de',
'd3p.dk',
'dacoolest.com',
'daintly.com',
'dandikmail.com',
'dayrep.com',
'dbunker.com',
'dcemail.com',
'deadaddress.com',
'deadfake.cf',
'deadfake.ga',
'deadfake.ml',
'deadfake.tk',
'deadspam.com',
'deagot.com',
'dealja.com',
'delikkt.de',
'despam.it',
'despammed.com',
'devnullmail.com',
'dfgh.net',
'digitalsanctuary.com',
'dingbone.com',
'dingfone.com',
'discard.cf',
'discard.email',
'discard.ga',
'discard.gq',
'discard.ml',
'discard.tk',
'discardmail.com',
'discardmail.de',
'dispomail.eu',
'disposable-email.ml',
'disposable.cf',
'disposable.ga',
'disposable.ml',
'disposableaddress.com',
'disposableemailaddresses.com',
'disposableinbox.com',
'dispose.it',
'disposeamail.com',
'disposemail.com',
'dispostable.com',
'divermail.com',
'dodgeit.com',
'dodgemail.de',
'dodgit.com',
'dodgit.org',
'dodsi.com',
'doiea.com',
'domozmail.com',
'donemail.ru',
'dontmail.net',
'dontreg.com',
'dontsendmespam.de',
'dotmsg.com',
'drdrb.com',
'drdrb.net',
'droplar.com',
'dropmail.me',
'duam.net',
'dudmail.com',
'dump-email.info',
'dumpandjunk.com',
'dumpmail.de',
'dumpyemail.com',
'duskmail.com',
'e-mail.com',
'e-mail.org',
'e4ward.com',
'easytrashmail.com',
'ee1.pl',
'ee2.pl',
'eelmail.com',
'einmalmail.de',
'einrot.com',
'einrot.de',
'eintagsmail.de',
'email-fake.cf',
'email-fake.com',
'email-fake.ga',
'email-fake.gq',
'email-fake.ml',
'email-fake.tk',
'email60.com',
'email64.com',
'emailage.cf',
'emailage.ga',
'emailage.gq',
'emailage.ml',
'emailage.tk',
'emaildienst.de',
'emailgo.de',
'emailias.com',
'emailigo.de',
'emailinfive.com',
'emaillime.com',
'emailmiser.com',
'emailproxsy.com',
'emails.ga',
'emailsensei.com',
'emailspam.cf',
'emailspam.ga',
'emailspam.gq',
'emailspam.ml',
'emailspam.tk',
'emailtemporanea.com',
'emailtemporanea.net',
'emailtemporar.ro',
'emailtemporario.com.br',
'emailthe.net',
'emailtmp.com',
'emailto.de',
'emailwarden.com',
'emailx.at.hm',
'emailxfer.com',
'emailz.cf',
'emailz.ga',
'emailz.gq',
'emailz.ml',
'emeil.in',
'emeil.ir',
'emeraldwebmail.com',
'emil.com',
'emkei.cf',
'emkei.ga',
'emkei.gq',
'emkei.ml',
'emkei.tk',
'emz.net',
'enterto.com',
'ephemail.net',
'ero-tube.org',
'etranquil.com',
'etranquil.net',
'etranquil.org',
'evopo.com',
'example.com',
'explodemail.com',
'express.net.ua',
'eyepaste.com',
'facebook-email.cf',
'facebook-email.ga',
'facebook-email.ml',
'facebookmail.gq',
'facebookmail.ml',
'fake-box.com',
'fake-mail.cf',
'fake-mail.ga',
'fake-mail.ml',
'fakeinbox.cf',
'fakeinbox.com',
'fakeinbox.ga',
'fakeinbox.ml',
'fakeinbox.tk',
'fakeinformation.com',
'fakemail.fr',
'fakemailgenerator.com',
'fakemailz.com',
'fammix.com',
'fansworldwide.de',
'fantasymail.de',
'fastacura.com',
'fastchevy.com',
'fastchrysler.com',
'fastkawasaki.com',
'fastmazda.com',
'fastmitsubishi.com',
'fastnissan.com',
'fastsubaru.com',
'fastsuzuki.com',
'fasttoyota.com',
'fastyamaha.com',
'fatflap.com',
'fdfdsfds.com',
'fightallspam.com',
'fiifke.de',
'filzmail.com',
'fivemail.de',
'fixmail.tk',
'fizmail.com',
'fleckens.hu',
'flurre.com',
'flurred.com',
'flurred.ru',
'flyspam.com',
'footard.com',
'forgetmail.com',
'forward.cat',
'fr33mail.info',
'frapmail.com',
'free-email.cf',
'free-email.ga',
'freemails.cf',
'freemails.ga',
'freemails.ml',
'freundin.ru',
'friendlymail.co.uk',
'front14.org',
'fuckingduh.com',
'fudgerub.com',
'fux0ringduh.com',
'fyii.de',
'garliclife.com',
'gehensiemirnichtaufdensack.de',
'gelitik.in',
'germanmails.biz',
'get-mail.cf',
'get-mail.ga',
'get-mail.ml',
'get-mail.tk',
'get1mail.com',
'get2mail.fr',
'getairmail.cf',
'getairmail.com',
'getairmail.ga',
'getairmail.gq',
'getairmail.ml',
'getairmail.tk',
'getmails.eu',
'getonemail.com',
'getonemail.net',
'gfcom.com',
'ghosttexter.de',
'giantmail.de',
'girlsundertheinfluence.com',
'gishpuppy.com',
'gmial.com',
'goemailgo.com',
'gorillaswithdirtyarmpits.com',
'gotmail.com',
'gotmail.net',
'gotmail.org',
'gowikibooks.com',
'gowikicampus.com',
'gowikicars.com',
'gowikifilms.com',
'gowikigames.com',
'gowikimusic.com',
'gowikinetwork.com',
'gowikitravel.com',
'gowikitv.com',
'grandmamail.com',
'grandmasmail.com',
'great-host.in',
'greensloth.com',
'grr.la',
'gsrv.co.uk',
'guerillamail.biz',
'guerillamail.com',
'guerillamail.de',
'guerillamail.net',
'guerillamail.org',
'guerillamailblock.com',
'guerrillamail.biz',
'guerrillamail.com',
'guerrillamail.de',
'guerrillamail.info',
'guerrillamail.net',
'guerrillamail.org',
'guerrillamailblock.com',
'gustr.com',
'h8s.org',
'hacccc.com',
'haltospam.com',
'haqed.com',
'harakirimail.com',
'hartbot.de',
'hat-geld.de',
'hatespam.org',
'headstrong.de',
'hellodream.mobi',
'herp.in',
'hidemail.de',
'hideme.be',
'hidzz.com',
'hiru-dea.com',
'hmamail.com',
'hochsitze.com',
'hopemail.biz',
'hot-mail.cf',
'hot-mail.ga',
'hot-mail.gq',
'hot-mail.ml',
'hot-mail.tk',
'hotpop.com',
'hulapla.de',
'hushmail.com',
'ieatspam.eu',
'ieatspam.info',
'ieh-mail.de',
'ihateyoualot.info',
'iheartspam.org',
'ikbenspamvrij.nl',
'imails.info',
'imgof.com',
'imgv.de',
'imstations.com',
'inbax.tk',
'inbox.si',
'inboxalias.com',
'inboxclean.com',
'inboxclean.org',
'inboxproxy.com',
'incognitomail.com',
'incognitomail.net',
'incognitomail.org',
'ineec.net',
'infocom.zp.ua',
'inoutmail.de',
'inoutmail.eu',
'inoutmail.info',
'inoutmail.net',
'insorg-mail.info',
'instant-mail.de',
'instantemailaddress.com',
'instantlyemail.com',
'ip6.li',
'ipoo.org',
'irish2me.com',
'iwi.net',
'jetable.com',
'jetable.fr.nf',
'jetable.net',
'jetable.org',
'jnxjn.com',
'jourrapide.com',
'junk1e.com',
'junkmail.com',
'junkmail.ga',
'junkmail.gq',
'jupimail.com',
'kasmail.com',
'kaspop.com',
'keepmymail.com',
'killmail.com',
'killmail.net',
'kimsdisk.com',
'kingsq.ga',
'kiois.com',
'kir.ch.tc',
'klassmaster.com',
'klassmaster.net',
'klzlk.com',
'kook.ml',
'koszmail.pl',
'kulturbetrieb.info',
'kurzepost.de',
'l33r.eu',
'labetteraverouge.at',
'lackmail.net',
'lags.us',
'landmail.co',
'lastmail.co',
'lawlita.com',
'lazyinbox.com',
'legitmail.club',
'letthemeatspam.com',
'lhsdv.com',
'libox.fr',
'lifebyfood.com',
'link2mail.net',
'litedrop.com',
'loadby.us',
'login-email.cf',
'login-email.ga',
'login-email.ml',
'login-email.tk',
'lol.ovpn.to',
'lolfreak.net',
'lookugly.com',
'lopl.co.cc',
'lortemail.dk',
'lovemeleaveme.com',
'lr78.com',
'lroid.com',
'lukop.dk',
'm21.cc',
'm4ilweb.info',
'maboard.com',
'mail-filter.com',
'mail-temporaire.fr',
'mail.by',
'mail.mezimages.net',
'mail.zp.ua',
'mail114.net',
'mail1a.de',
'mail21.cc',
'mail2rss.org',
'mail333.com',
'mail4trash.com',
'mailbidon.com',
'mailbiz.biz',
'mailblocks.com',
'mailblog.biz',
'mailbucket.org',
'mailcat.biz',
'mailcatch.com',
'mailde.de',
'mailde.info',
'maildrop.cc',
'maildrop.cf',
'maildrop.ga',
'maildrop.gq',
'maildrop.ml',
'maildu.de',
'maildx.com',
'maileater.com',
'mailed.ro',
'maileimer.de',
'mailexpire.com',
'mailfa.tk',
'mailforspam.com',
'mailfree.ga',
'mailfree.gq',
'mailfree.ml',
'mailfreeonline.com',
'mailfs.com',
'mailguard.me',
'mailhazard.com',
'mailhazard.us',
'mailhz.me',
'mailimate.com',
'mailin8r.com',
'mailinater.com',
'mailinator.com',
'mailinator.gq',
'mailinator.net',
'mailinator.org',
'mailinator.us',
'mailinator2.com',
'mailinator2.net',
'mailincubator.com',
'mailismagic.com',
'mailjunk.cf',
'mailjunk.ga',
'mailjunk.gq',
'mailjunk.ml',
'mailjunk.tk',
'mailmate.com',
'mailme.gq',
'mailme.ir',
'mailme.lv',
'mailme24.com',
'mailmetrash.com',
'mailmoat.com',
'mailms.com',
'mailnator.com',
'mailnesia.com',
'mailnull.com',
'mailorg.org',
'mailpick.biz',
'mailproxsy.com',
'mailquack.com',
'mailrock.biz',
'mailscrap.com',
'mailshell.com',
'mailsiphon.com',
'mailslapping.com',
'mailslite.com',
'mailspeed.ru',
'mailtemp.info',
'mailtome.de',
'mailtothis.com',
'mailtrash.net',
'mailtv.net',
'mailtv.tv',
'mailzilla.com',
'mailzilla.org',
'mailzilla.orgmbx.cc',
'makemetheking.com',
'mallinator.com',
'manifestgenerator.com',
'manybrain.com',
'mbx.cc',
'mciek.com',
'mega.zik.dj',
'meinspamschutz.de',
'meltmail.com',
'messagebeamer.de',
'mezimages.net',
'mfsa.ru',
'mierdamail.com',
'migmail.pl',
'migumail.com',
'mindless.com',
'ministry-of-silly-walks.de',
'mintemail.com',
'misterpinball.de',
'mjukglass.nu',
'moakt.com',
'mobi.web.id',
'mobileninja.co.uk',
'moburl.com',
'mohmal.com',
'moncourrier.fr.nf',
'monemail.fr.nf',
'monmail.fr.nf',
'monumentmail.com',
'msa.minsmail.com',
'mt2009.com',
'mt2014.com',
'mt2015.com',
'mx0.wwwnew.eu',
'my10minutemail.com',
'myalias.pw',
'mycard.net.ua',
'mycleaninbox.net',
'myemailboxy.com',
'mymail-in.net',
'mymailoasis.com',
'mynetstore.de',
'mypacks.net',
'mypartyclip.de',
'myphantomemail.com',
'mysamp.de',
'myspaceinc.com',
'myspaceinc.net',
'myspaceinc.org',
'myspacepimpedup.com',
'myspamless.com',
'mytemp.email',
'mytempemail.com',
'mytempmail.com',
'mytrashmail.com',
'nabuma.com',
'neomailbox.com',
'nepwk.com',
'nervmich.net',
'nervtmich.net',
'netmails.com',
'netmails.net',
'netzidiot.de',
'neverbox.com',
'nice-4u.com',
'nincsmail.com',
'nincsmail.hu',
'nmail.cf',
'nnh.com',
'no-spam.ws',
'noblepioneer.com',
'nobulk.com',
'noclickemail.com',
'nogmailspam.info',
'nomail.pw',
'nomail.xl.cx',
'nomail2me.com',
'nomorespamemails.com',
'nonspam.eu',
'nonspammer.de',
'noref.in',
'nospam.ze.tc',
'nospam4.us',
'nospamfor.us',
'nospammail.net',
'nospamthanks.info',
'notmailinator.com',
'notsharingmy.info',
'nowhere.org',
'nowmymail.com',
'nurfuerspam.de',
'nwldx.com',
'objectmail.com',
'obobbo.com',
'odaymail.com',
'odnorazovoe.ru',
'one-time.email',
'oneoffemail.com',
'oneoffmail.com',
'onewaymail.com',
'onlatedotcom.info',
'online.ms',
'oopi.org',
'opayq.com',
'opentrash.com',
'ordinaryamerican.net',
'otherinbox.com',
'ourklips.com',
'outlawspam.com',
'ovpn.to',
'owlpic.com',
'pancakemail.com',
'paplease.com',
'pepbot.com',
'pfui.ru',
'pimpedupmyspace.com',
'pjjkp.com',
'plexolan.de',
'poczta.onet.pl',
'politikerclub.de',
'poofy.org',
'pookmail.com',
'pop3.xyz',
'postalmail.biz',
'privacy.net',
'privatdemail.net',
'privy-mail.com',
'privymail.de',
'proxymail.eu',
'prtnx.com',
'prtz.eu',
'pubmail.io',
'punkass.com',
'putthisinyourspamdatabase.com',
'pwrby.com',
'q314.net',
'qisdo.com',
'qisoa.com',
'qoika.com',
'quickinbox.com',
'quickmail.nl',
'rainmail.biz',
'rcpt.at',
're-gister.com',
'reallymymail.com',
'realtyalerts.ca',
'recode.me',
'reconmail.com',
'recursor.net',
'recyclemail.dk',
'regbypass.com',
'regbypass.comsafe-mail.net',
'rejectmail.com',
'reliable-mail.com',
'remail.cf',
'remail.ga',
'renraku.in',
'rhyta.com',
'rklips.com',
'rmqkr.net',
'royal.net',
'rppkn.com',
'rtrtr.com',
's0ny.net',
'safe-mail.net',
'safersignup.de',
'safetymail.info',
'safetypost.de',
'sandelf.de',
'sayawaka-dea.info',
'saynotospams.com',
'scatmail.com',
'schafmail.de',
'schrott-email.de',
'secretemail.de',
'secure-mail.biz',
'secure-mail.cc',
'selfdestructingmail.com',
'selfdestructingmail.org',
'sendspamhere.com',
'senseless-entertainment.com',
'services391.com',
'sharedmailbox.org',
'sharklasers.com',
'shieldedmail.com',
'shieldemail.com',
'shiftmail.com',
'shitmail.me',
'shitmail.org',
'shitware.nl',
'shmeriously.com',
'shortmail.net',
'showslow.de',
'sibmail.com',
'sinnlos-mail.de',
'siteposter.net',
'skeefmail.com',
'slapsfromlastnight.com',
'slaskpost.se',
'slipry.net',
'slopsbox.com',
'slowslow.de',
'slushmail.com',
'smashmail.de',
'smellfear.com',
'smellrear.com',
'smoug.net',
'snakemail.com',
'sneakemail.com',
'sneakmail.de',
'snkmail.com',
'sofimail.com',
'sofort-mail.de',
'softpls.asia',
'sogetthis.com',
'soisz.com',
'solvemail.info',
'soodonims.com',
'spam.la',
'spam.su',
'spam4.me',
'spamail.de',
'spamarrest.com',
'spamavert.com',
'spambob.com',
'spambob.net',
'spambob.org',
'spambog.com',
'spambog.de',
'spambog.net',
'spambog.ru',
'spambooger.com',
'spambox.info',
'spambox.irishspringrealty.com',
'spambox.us',
'spambpg.com',
'spamcannon.com',
'spamcannon.net',
'spamcero.com',
'spamcon.org',
'spamcorptastic.com',
'spamcowboy.com',
'spamcowboy.net',
'spamcowboy.org',
'spamday.com',
'spamex.com',
'spamfighter.cf',
'spamfighter.ga',
'spamfighter.gq',
'spamfighter.ml',
'spamfighter.tk',
'spamfree.eu',
'spamfree24.com',
'spamfree24.de',
'spamfree24.eu',
'spamfree24.info',
'spamfree24.net',
'spamfree24.org',
'spamgoes.in',
'spamgourmet.com',
'spamgourmet.net',
'spamgourmet.org',
'spamherelots.com',
'spamhereplease.com',
'spamhole.com',
'spamify.com',
'spaminator.de',
'spamkill.info',
'spaml.com',
'spaml.de',
'spammotel.com',
'spamobox.com',
'spamoff.de',
'spamsalad.in',
'spamslicer.com',
'spamsphere.com',
'spamspot.com',
'spamstack.net',
'spamthis.co.uk',
'spamthisplease.com',
'spamtrail.com',
'spamtroll.net',
'speed.1s.fr',
'spikio.com',
'spoofmail.de',
'spybox.de',
'squizzy.de',
'ssoia.com',
'startkeys.com',
'stexsy.com',
'stinkefinger.net',
'stop-my-spam.cf',
'stop-my-spam.com',
'stop-my-spam.ga',
'stop-my-spam.ml',
'stop-my-spam.tk',
'streetwisemail.com',
'stuffmail.de',
'super-auswahl.de',
'supergreatmail.com',
'supermailer.jp',
'superrito.com',
'superstachel.de',
'suremail.info',
'sute.jp',
'svk.jp',
'sweetxxx.de',
'tafmail.com',
'tagyourself.com',
'talkinator.com',
'tapchicuoihoi.com',
'teewars.org',
'teleworm.com',
'teleworm.us',
'temp-mail.com',
'temp-mail.net',
'temp-mail.org',
'temp-mail.ru',
'temp15qm.com',
'tempail.com',
'tempalias.com',
'tempe-mail.com',
'tempemail.biz',
'tempemail.co.za',
'tempemail.com',
'tempemail.net',
'tempemail.org',
'tempinbox.co.uk',
'tempinbox.com',
'tempmail.de',
'tempmail.eu',
'tempmail.it',
'tempmail2.com',
'tempmaildemo.com',
'tempmailer.com',
'tempmailer.de',
'tempomail.fr',
'temporarily.de',
'temporarioemail.com.br',
'temporaryemail.net',
'temporaryemail.us',
'temporaryforwarding.com',
'temporaryinbox.com',
'temporarymailaddress.com',
'tempsky.com',
'tempthe.net',
'tempymail.com',
'test.com',
'thanksnospam.info',
'thankyou2010.com',
'thc.st',
'thecloudindex.com',
'thisisnotmyrealemail.com',
'thismail.net',
'thismail.ru',
'throam.com',
'throwam.com',
'throwawayemailaddress.com',
'throwawaymail.com',
'tilien.com',
'tittbit.in',
'tizi.com',
'tmail.ws',
'tmailinator.com',
'tmpeml.info',
'toiea.com',
'tokenmail.de',
'toomail.biz',
'topranklist.de',
'tormail.net',
'tormail.org',
'tradermail.info',
'trash-amil.com',
'trash-mail.at',
'trash-mail.cf',
'trash-mail.com',
'trash-mail.de',
'trash-mail.ga',
'trash-mail.gq',
'trash-mail.ml',
'trash-mail.tk',
'trash-me.com',
'trash2009.com',
'trash2010.com',
'trash2011.com',
'trashdevil.com',
'trashdevil.de',
'trashemail.de',
'trashmail.at',
'trashmail.com',
'trashmail.de',
'trashmail.me',
'trashmail.net',
'trashmail.org',
'trashmail.ws',
'trashmailer.com',
'trashymail.com',
'trashymail.net',
'trayna.com',
'trbvm.com',
'trialmail.de',
'trickmail.net',
'trillianpro.com',
'tryalert.com',
'turual.com',
'twinmail.de',
'twoweirdtricks.com',
'tyldd.com',
'ubismail.net',
'uggsrock.com',
'umail.net',
'unlimit.com',
'unmail.ru',
'upliftnow.com',
'uplipht.com',
'uroid.com',
'us.af',
'valemail.net',
'venompen.com',
'vermutlich.net',
'veryrealemail.com',
'vidchart.com',
'viditag.com',
'viewcastmedia.com',
'viewcastmedia.net',
'viewcastmedia.org',
'viralplays.com',
'vmail.me',
'voidbay.com',
'vomoto.com',
'vpn.st',
'vsimcard.com',
'vubby.com',
'w3internet.co.uk',
'walala.org',
'walkmail.net',
'watchever.biz',
'webemail.me',
'webm4il.info',
'webuser.in',
'wee.my',
'weg-werf-email.de',
'wegwerf-email-addressen.de',
'wegwerf-email.at',
'wegwerf-emails.de',
'wegwerfadresse.de',
'wegwerfemail.com',
'wegwerfemail.de',
'wegwerfmail.de',
'wegwerfmail.info',
'wegwerfmail.net',
'wegwerfmail.org',
'wem.com',
'wetrainbayarea.com',
'wetrainbayarea.org',
'wh4f.org',
'whatiaas.com',
'whatpaas.com',
'whatsaas.com',
'whopy.com',
'whyspam.me',
'wickmail.net',
'wilemail.com',
'willhackforfood.biz',
'willselfdestruct.com',
'winemaven.info',
'wmail.cf',
'writeme.com',
'wronghead.com',
'wuzup.net',
'wuzupmail.net',
'wwwnew.eu',
'wzukltd.com',
'xagloo.com',
'xemaps.com',
'xents.com',
'xmaily.com',
'xoxy.net',
'xww.ro',
'xyzfree.net',
'yapped.net',
'yep.it',
'yogamaven.com',
'yomail.info',
'yopmail.com',
'yopmail.fr',
'yopmail.gq',
'yopmail.net',
'yopmail.org',
'yoru-dea.com',
'you-spam.com',
'youmail.ga',
'yourdomain.com',
'ypmail.webarnak.fr.eu.org',
'yuurok.com',
'yyhmail.com',
'z1p.biz',
'za.com',
'zebins.com',
'zebins.eu',
'zehnminuten.de',
'zehnminutenmail.de',
'zetmail.com',
'zippymail.info',
'zoaxe.com',
'zoemail.com',
'zoemail.net',
'zoemail.org',
'zomg.info',
'zxcv.com',
'zxcvbnm.com',
'zzz.com',
]
# reCAPTCHA API
# NOTE: Using the recaptcha.net domain h/t https://github.com/google/recaptcha/issues/87#issuecomment-368252094
RECAPTCHA_SITE_KEY = None
RECAPTCHA_SECRET_KEY = None
RECAPTCHA_VERIFY_URL = 'https://recaptcha.net/recaptcha/api/siteverify'
# akismet spam check
AKISMET_APIKEY = None
SPAM_CHECK_ENABLED = False
SPAM_CHECK_PUBLIC_ONLY = True
SPAM_ACCOUNT_SUSPENSION_ENABLED = False
SPAM_ACCOUNT_SUSPENSION_THRESHOLD = timedelta(hours=24)
SPAM_FLAGGED_MAKE_NODE_PRIVATE = False
SPAM_FLAGGED_REMOVE_FROM_SEARCH = False
SHARE_API_TOKEN = None
# refresh campaign every 5 minutes
CAMPAIGN_REFRESH_THRESHOLD = 5 * 60 # 5 minutes in seconds
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
# sitemap default settings
SITEMAP_TO_S3 = False
SITEMAP_AWS_BUCKET = None
SITEMAP_URL_MAX = 25000
SITEMAP_INDEX_MAX = 50000
SITEMAP_STATIC_URLS = [
OrderedDict([('loc', ''), ('changefreq', 'yearly'), ('priority', '0.5')]),
OrderedDict([('loc', 'preprints'), ('changefreq', 'yearly'), ('priority', '0.5')]),
OrderedDict([('loc', 'prereg'), ('changefreq', 'yearly'), ('priority', '0.5')]),
OrderedDict([('loc', 'meetings'), ('changefreq', 'yearly'), ('priority', '0.5')]),
OrderedDict([('loc', 'registries'), ('changefreq', 'yearly'), ('priority', '0.5')]),
OrderedDict([('loc', 'reviews'), ('changefreq', 'yearly'), ('priority', '0.5')]),
OrderedDict([('loc', 'explore/activity'), ('changefreq', 'weekly'), ('priority', '0.5')]),
OrderedDict([('loc', 'support'), ('changefreq', 'yearly'), ('priority', '0.5')]),
OrderedDict([('loc', 'faq'), ('changefreq', 'yearly'), ('priority', '0.5')]),
]
SITEMAP_USER_CONFIG = OrderedDict([('loc', ''), ('changefreq', 'yearly'), ('priority', '0.5')])
SITEMAP_NODE_CONFIG = OrderedDict([('loc', ''), ('lastmod', ''), ('changefreq', 'monthly'), ('priority', '0.5')])
SITEMAP_PREPRINT_CONFIG = OrderedDict([('loc', ''), ('lastmod', ''), ('changefreq', 'yearly'), ('priority', '0.5')])
SITEMAP_PREPRINT_FILE_CONFIG = OrderedDict([('loc', ''), ('lastmod', ''), ('changefreq', 'yearly'), ('priority', '0.5')])
# For preventing indexing of QA nodes by Elastic and SHARE
DO_NOT_INDEX_LIST = {
'tags': ['qatest', 'qa test'],
'titles': ['Bulk stress 201', 'Bulk stress 202', 'OSF API Registration test'],
}
CUSTOM_CITATIONS = {
'bluebook-law-review': 'bluebook',
'bluebook2': 'bluebook',
'bluebook-inline': 'bluebook'
}
#Email templates logo
OSF_LOGO = 'osf_logo'
OSF_PREPRINTS_LOGO = 'osf_preprints'
OSF_MEETINGS_LOGO = 'osf_meetings'
OSF_PREREG_LOGO = 'osf_prereg'
OSF_REGISTRIES_LOGO = 'osf_registries'
OSF_LOGO_LIST = [OSF_LOGO, OSF_PREPRINTS_LOGO, OSF_MEETINGS_LOGO, OSF_PREREG_LOGO, OSF_REGISTRIES_LOGO]
FOOTER_LINKS = {
'terms': 'https://github.com/CenterForOpenScience/centerforopenscience.org/blob/master/TERMS_OF_USE.md',
'privacyPolicy': 'https://github.com/CenterForOpenScience/centerforopenscience.org/blob/master/PRIVACY_POLICY.md',
'cookies': 'https://github.com/CenterForOpenScience/centerforopenscience.org/blob/master/PRIVACY_POLICY.md#f-cookies',
'cos': 'https://cos.io',
'statusPage': 'https://status.cos.io/',
'apiDocs': 'https://developer.osf.io/',
'topGuidelines': 'http://cos.io/top/',
'rpp': 'https://osf.io/ezcuj/wiki/home/',
'rpcb': 'https://osf.io/e81xl/wiki/home/',
'twitter': 'http://twitter.com/OSFramework',
'facebook': 'https://www.facebook.com/CenterForOpenScience/',
'googleGroup': 'https://groups.google.com/forum/#!forum/openscienceframework',
'github': 'https://www.github.com/centerforopenscience',
}
| [] | [] | [
"RABBITMQ_VHOST",
"RABBITMQ_HOST",
"BROKER_URL",
"RABBITMQ_USERNAME",
"RABBITMQ_PORT",
"RABBITMQ_PASSWORD"
] | [] | ["RABBITMQ_VHOST", "RABBITMQ_HOST", "BROKER_URL", "RABBITMQ_USERNAME", "RABBITMQ_PORT", "RABBITMQ_PASSWORD"] | python | 6 | 0 | |
db/events/purge.go | package events
import (
"context"
"database/sql"
"os"
"strconv"
"strings"
"time"
"github.com/packethost/pkg/log"
"github.com/pkg/errors"
)
const defaultEventsTTL = "60m"
// Purge periodically checks the events table and
// purges the events that have passed the defined EVENTS_TTL
func Purge(db *sql.DB, logger log.Logger) error {
env := os.Getenv("EVENTS_TTL")
if env == "" {
env = defaultEventsTTL
}
val := strings.TrimRight(env, "m")
ttl, err := strconv.Atoi(val)
if err != nil {
return err
}
ticker := time.NewTicker(time.Duration(ttl) * time.Minute)
for range ticker.C {
then := time.Now().Local().Add(time.Duration(int64(-ttl) * int64(time.Minute)))
tx, err := db.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelSerializable})
if err != nil {
return errors.Wrap(err, "BEGIN transaction")
}
_, err = tx.Exec("DELETE FROM events WHERE created_at <= $1;", then)
if err != nil {
return errors.Wrap(err, "DELETE")
}
logger.Info("purging events")
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "COMMIT")
}
}
return nil
}
| [
"\"EVENTS_TTL\""
] | [] | [
"EVENTS_TTL"
] | [] | ["EVENTS_TTL"] | go | 1 | 0 | |
src/sasctl/utils/cli.py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import inspect
import json
import logging
import os
import pkgutil
import warnings
from collections import namedtuple, defaultdict
from importlib import import_module
from pprint import pprint
ArgInfo = namedtuple('ArgInfo', ['name', 'type', 'required', 'default', 'doc'])
def sasctl_command(name, subname=None):
"""Decorator that tags the function as being usable from the command line.
Parameters
----------
name : str
the name of the command that will be shown on the command line.
subname : str
the name of the service that the command will be listed under
Returns
-------
function
Examples
--------
Define a command called 'cmd' not associated with a service
>>> @sasctl_command('cmd')
>>> def func():
...
Define a command called 'cmd' associated with the 'svc' service
>>> @sasctl_command('svc', 'cmd')
>>> def func():
...
Define a command and allow it's name and service to be auto-assigned
>>> @sasctl_command
>>> def func():
...
"""
def decorator(func):
if isinstance(name, str):
if isinstance(subname, str):
command_name = subname
service_name = name
else:
command_name = name
service_name = subname
else:
command_name = func.__name__
if any(
command_name.startswith(x)
for x in ['list_', 'update_', 'get_', 'create_', 'delete_']
):
parts = command_name.split('_')
command_name = parts[0]
service_name = parts[-1]
else:
service_name = subname
def parse_args():
"""Retrieve argument metadata from function signature and docstring."""
arg_spec = inspect.getargspec(func)
defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []
required = [True] * (len(arg_spec.args) - len(defaults)) + [False] * len(
defaults
)
defaults = [None] * (len(arg_spec.args) - len(defaults)) + defaults
types = []
help_doc = []
doc = inspect.getdoc(func)
if doc and doc.find('Parameters\n'):
doc_lines = doc[doc.find('Parameters\n') :].splitlines()
doc_lines.pop(0) # First line is "Parameters"
if doc_lines and doc_lines[0].startswith('---'):
doc_lines.pop(
0
) # Discard ----------- line under "Parameters" heading
while doc_lines:
var = doc_lines.pop(0)
if var.startswith('Returns') or var.strip() == '':
break
if ':' in var:
types.append(var.split(':')[-1].strip())
else:
types.append('str')
if doc_lines and doc_lines[0].startswith(' '):
help_doc.append(doc_lines.pop(0).strip())
else:
help_doc.append('')
else:
types = ['str'] * len(arg_spec.args)
help_doc = [None] * len(arg_spec.args)
return [
ArgInfo(n, t, r, d, o)
for n, t, r, d, o in zip(
arg_spec.args, types, required, defaults, help_doc
)
]
func._cli_command = command_name
func._cli_service = service_name
func._cli_arguments = parse_args
return func
if callable(name):
# allow direct decoration without arguments
return decorator(name)
return decorator
def _find_services(module='sasctl'):
"""Recursively find all functions in all modules that have been decorated as CLI commands."""
m = __import__(module, fromlist=['']) # returns a module
def find_recurse(module, services):
for obj in dir(module):
obj = getattr(module, obj)
source_module = getattr(obj, '__module__', type(obj).__module__)
# Module-level functions that are tagged as commands
if hasattr(obj, '_cli_command') and hasattr(obj, '_cli_service'):
services[obj._cli_service][obj._cli_command] = obj
# Check methods on service classes
elif source_module.startswith('sasctl._services'):
for atr in dir(obj):
atr = getattr(obj, atr)
if hasattr(atr, '_cli_command') and hasattr(atr, '_cli_service'):
services[atr._cli_service][atr._cli_command] = atr
# recurse into submodules
submodules = pkgutil.iter_modules(getattr(module, '__path__', []))
for submodule in submodules:
# ModuleInfo returned py 3.6 has .name
# Tuple of (module_loader, name, ispkg) returned by older versions
submodule_name = getattr(submodule, 'name', submodule[1])
# TODO: Temporary until pzmm fully merged with sasctl
if submodule_name == 'pzmm':
continue
submodule = import_module('.' + submodule_name, package=module.__name__)
# if hasattr(submodule, 'name'):
# # ModuleInfo returned py 3.6
# submodule = import_module('.' + submodule.name, package=module.__name__)
# else:
# # Tuple of (module_loader, name, ispkg) returned by older versions
# submodule = import_module('.' + submodule[1], package=module.__name__)
services = find_recurse(submodule, services)
return services
services = find_recurse(m, defaultdict(dict))
return services
def _get_func_description(func):
description = getattr(func, '__doc__', '')
lines = description.split('\n')
if lines:
return lines[0]
def _build_parser(services):
from sasctl import __version__
# TODO: Set command docstring
# Create standard, top-level arguments
parser = argparse.ArgumentParser(
prog='sasctl', description='sasctl interacts with a SAS Viya environment.'
)
parser.add_argument(
'-k', '--insecure', action='store_true', help='skip SSL verification'
)
parser.add_argument(
'-f', '--format', choices=['json'], default='json', help='output format'
)
parser.add_argument('-v', '--verbose', action='count')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + __version__
)
subparsers = parser.add_subparsers(title='service', dest='service')
subparsers.required = True
for service, commands in services.items():
service_parser = subparsers.add_parser(service)
service_subparser = service_parser.add_subparsers(
title='command', dest='command'
)
service_subparser.required = True
# Add the command and arguments for each command
for command in commands:
func = services[service][command]
cmd_parser = service_subparser.add_parser(
command, help=_get_func_description(func)
)
for arg in func._cli_arguments():
if arg.name in ('self', 'cls'):
continue
if arg.required:
cmd_parser.add_argument(arg.name, help=arg.doc)
else:
cmd_parser.add_argument(
'--' + arg.name,
required=arg.required,
default=arg.default,
help=arg.doc,
)
return parser
def main(args=None):
"""Main entry point when executed as a command line utility."""
from sasctl import Session, current_session
# Find all services and associated commands
services = _find_services()
parser = _build_parser(services)
args = parser.parse_args(args)
if args.verbose is None or args.verbose == 0:
lvl = logging.WARNING
elif args.verbose == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
handler = logging.StreamHandler()
handler.setLevel(lvl)
logging.getLogger('sasctl.core').addHandler(handler)
logging.getLogger('sasctl.core').setLevel(lvl)
warnings.simplefilter('ignore')
func = services[args.service][args.command]
kwargs = vars(args).copy()
# Remove args that shouldn't be passed to the underlying command
for k in ['command', 'service', 'insecure', 'verbose', 'format']:
kwargs.pop(k, None)
username = os.environ.get('SASCTL_USER_NAME')
password = os.environ.get('SASCTL_PASSWORD')
server = os.environ.get('SASCTL_SERVER_NAME')
if server is None:
parser.error(
"Hostname must be specified in the 'SASCTL_SERVER_NAME' environment variable."
)
verify_ssl = not args.insecure
try:
# current_session() should never be set when executing from the
# command line but it allows us to provide a pre-created session
# during testing
with current_session() or Session(
server, username, password, verify_ssl=verify_ssl
):
result = func(**kwargs)
if isinstance(result, list):
pprint([str(x) for x in result])
elif isinstance(result, dict) and args.format == 'json':
print(json.dumps(result, indent=2))
else:
pprint(result)
except RuntimeError as e:
parser.error(e)
| [] | [] | [
"SASCTL_SERVER_NAME",
"SASCTL_USER_NAME",
"SASCTL_PASSWORD"
] | [] | ["SASCTL_SERVER_NAME", "SASCTL_USER_NAME", "SASCTL_PASSWORD"] | python | 3 | 0 | |
cmd/agi/main.go | // Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The gapid command launches the GAPID UI. It looks for the JVM (bundled or
// from the system), the GAPIC JAR (bundled or from the build output) and
// launches GAPIC with the correct JVM flags and environment variables.
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
)
const (
versionPrefix = `version "`
googleInfix = "-google-"
minJavaMajor = 1
minJavaMinor = 8
)
type config struct {
cwd string
vm string
vmArgs []string
gapic string
args []string
help bool
console bool
verbose bool
}
func main() {
os.Exit(run())
}
func run() int {
c := newConfig()
if c.console {
createConsole()
if runtime.GOOS == "windows" {
defer func() {
fmt.Println()
fmt.Println("Press enter to continue")
os.Stdin.Read(make([]byte, 1))
}()
}
}
if c.help {
defer func() {
fmt.Println()
fmt.Println("Launcher Flags:")
fmt.Println(" --jar Path to the gapic JAR to use")
fmt.Println(" --vm Path to the JVM to use")
fmt.Println(" --vmarg Extra argument for the JVM (repeatable)")
fmt.Println(" --console Run AGI inside a terminal console")
fmt.Println(" --verbose-startup Log verbosely in the launcher")
}()
}
if err := c.locateCWD(); err != nil {
fmt.Println(err)
return 1
}
if err := c.locateVM(); err != nil {
fmt.Println(err)
if !c.verbose {
fmt.Println("Use --verbose-startup for additional details")
}
return 1
}
if err := c.locateGAPIC(); err != nil {
fmt.Println(err)
return 1
}
fmt.Println("Starting", c.vm, c.gapic)
cmd := exec.Command(c.vm, append(append(c.vmArgs, "-jar", c.gapic), c.args...)...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = append(os.Environ(), "GAPID="+c.cwd)
if runtime.GOOS == "linux" {
cmd.Env = append(cmd.Env, "LIBOVERLAY_SCROLLBAR=0")
cmd.Env = append(cmd.Env, "GTK_OVERLAY_SCROLLING=0")
}
// If run via 'bazel run', use the shell's CWD, not bazel's.
if cwd := os.Getenv("BUILD_WORKING_DIRECTORY"); cwd != "" {
cmd.Dir = cwd
}
if err := cmd.Run(); err != nil {
if _, ok := err.(*exec.ExitError); !ok {
fmt.Println("Failed to start GAPIC:", err)
}
return 1
}
return 0
}
func newConfig() *config {
c := &config{}
// Doing our own flag handling (rather than using go's flag package) to avoid
// it attempting to parse the GAPIC flags, which may be in a different format.
// This loop simply looks for the launcher flags, but hands everything else to
// GAPIC verbatim.
args := os.Args[1:]
for i := 0; i < len(args); i++ {
switch {
case args[i] == "--jar" && i < len(args)-1:
i++
c.gapic = args[i]
case args[i] == "--vm" && i < len(args)-1:
i++
c.vm = args[i]
case args[i] == "--vmarg" && i < len(args)-1:
i++
c.vmArgs = append(c.vmArgs, args[i])
case args[i] == "--console":
c.console = true
case args[i] == "--verbose-startup":
c.verbose = true
default:
c.help = c.help || args[i] == "--help" || args[i] == "--fullhelp"
c.args = append(c.args, args[i])
}
}
c.console = c.console || c.help
if runtime.GOOS == "darwin" {
c.vmArgs = append(c.vmArgs, "-XstartOnFirstThread")
}
return c
}
func (c *config) logIfVerbose(args ...interface{}) {
if c.verbose {
fmt.Println(args...)
}
}
func (c *config) locateCWD() error {
cwd, err := os.Executable()
if err != nil {
return err
}
cwd, err = filepath.EvalSymlinks(cwd)
if err == nil {
c.cwd = filepath.Dir(cwd)
c.logIfVerbose("CWD:", c.cwd)
}
return err
}
func (c *config) locateVM() error {
if c.vm != "" {
if c.checkVM(c.vm, false) {
return nil
}
if runtime.GOOS == "windows" && c.checkVM(c.vm+".exe", false) {
c.vm += ".exe"
return nil
}
if java := c.javaInHome(c.vm); c.checkVM(java, false) {
c.vm = java
return nil
}
return fmt.Errorf("JVM '%s' not found/usable", c.vm)
}
if java := c.javaInHome(filepath.Join(c.cwd, "jre")); c.checkVM(java, true) {
c.vm = java
return nil
}
if runtime.GOOS == "linux" {
if java := "/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java"; c.checkVM(java, true) {
c.vm = java
return nil
}
}
if home := os.Getenv("JAVA_HOME"); home != "" {
if java := c.javaInHome(home); c.checkVM(java, true) {
c.vm = java
return nil
}
}
if java, err := exec.LookPath(c.javaExecutable()); err == nil && c.checkVM(java, true) {
c.vm = java
return nil
}
return fmt.Errorf("No suitable JVM found. A JRE >= %d.%d is required.", minJavaMajor, minJavaMinor)
}
func (c *config) javaExecutable() string {
if runtime.GOOS == "windows" {
if c.console {
return "java.exe"
}
return "javaw.exe"
}
return "java"
}
func (c *config) javaInHome(home string) string {
return filepath.Join(home, "bin", c.javaExecutable())
}
func (c *config) checkVM(java string, checkVersion bool) bool {
if stat, err := os.Stat(java); err != nil || stat.IsDir() {
c.logIfVerbose("Not using " + java + ": not a file")
return false
}
if !checkVersion {
return true
}
version, err := exec.Command(java, "-version").CombinedOutput()
if err != nil {
c.logIfVerbose("Not using " + java + ": failed to get version info")
return false
}
versionStr := string(version)
// Blacklist the Google custom JDKs.
if p := strings.Index(versionStr, googleInfix); p >= 0 {
c.logIfVerbose("Not using " + java + ": is a Google JDK (go/gapid-jdk)")
return false
}
// Looks for the pattern: <product> version "<major>.<minor>.<micro><build>"
// Not using regular expressions to avoid binary bloat.
if p := strings.Index(versionStr, versionPrefix); p >= 0 {
p += len(versionPrefix)
if q := strings.Index(versionStr[p:], "."); q > 0 {
if r := strings.Index(versionStr[p+q+1:], "."); r > 0 {
major, _ := strconv.Atoi(versionStr[p : p+q])
minor, _ := strconv.Atoi(versionStr[p+q+1 : p+q+r+1])
useIt := major > minJavaMajor || (major == minJavaMajor && minor >= minJavaMinor)
if !useIt {
c.logIfVerbose("Not using " + java + ": unsupported version")
}
return useIt
}
}
}
c.logIfVerbose("Not using " + java + ": failed to parse version")
return false
}
func (c *config) locateGAPIC() error {
gapic := c.gapic
if gapic == "" {
gapic = filepath.Join(c.cwd, "lib", "gapic.jar")
}
if abs, err := filepath.Abs(gapic); err == nil {
gapic = abs
}
if _, err := os.Stat(gapic); !os.IsNotExist(err) {
c.gapic = gapic
return nil
}
return fmt.Errorf("GAPIC JAR '%s' not found", gapic)
}
| [
"\"BUILD_WORKING_DIRECTORY\"",
"\"JAVA_HOME\""
] | [] | [
"JAVA_HOME",
"BUILD_WORKING_DIRECTORY"
] | [] | ["JAVA_HOME", "BUILD_WORKING_DIRECTORY"] | go | 2 | 0 | |
cmd/newmanExecute_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type newmanExecuteOptions struct {
NewmanCollection string `json:"newmanCollection,omitempty"`
NewmanRunCommand string `json:"newmanRunCommand,omitempty"`
RunOptions []string `json:"runOptions,omitempty"`
NewmanInstallCommand string `json:"newmanInstallCommand,omitempty"`
NewmanEnvironment string `json:"newmanEnvironment,omitempty"`
NewmanGlobals string `json:"newmanGlobals,omitempty"`
FailOnError bool `json:"failOnError,omitempty"`
CfAppsWithSecrets []string `json:"cfAppsWithSecrets,omitempty"`
}
type newmanExecuteInflux struct {
step_data struct {
fields struct {
newman bool
}
tags struct {
}
}
}
func (i *newmanExecuteInflux) persist(path, resourceName string) {
measurementContent := []struct {
measurement string
valType string
name string
value interface{}
}{
{valType: config.InfluxField, measurement: "step_data", name: "newman", value: i.step_data.fields.newman},
}
errCount := 0
for _, metric := range measurementContent {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(metric.measurement, fmt.Sprintf("%vs", metric.valType), metric.name), metric.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting influx environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Influx environment")
}
}
// NewmanExecuteCommand Installs newman and executes specified newman collections.
func NewmanExecuteCommand() *cobra.Command {
const STEP_NAME = "newmanExecute"
metadata := newmanExecuteMetadata()
var stepConfig newmanExecuteOptions
var startTime time.Time
var influx newmanExecuteInflux
var logCollector *log.CollectorHook
var createNewmanExecuteCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Installs newman and executes specified newman collections.",
Long: `This script executes [Postman](https://www.getpostman.com) tests from a collection via the [Newman](https://www.getpostman.com/docs/v6/postman/collection_runs/command_line_integration_with_newman) command line tool.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
influx.persist(GeneralConfig.EnvRootPath, "influx")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
newmanExecute(stepConfig, &telemetryData, &influx)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addNewmanExecuteFlags(createNewmanExecuteCmd, &stepConfig)
return createNewmanExecuteCmd
}
func addNewmanExecuteFlags(cmd *cobra.Command, stepConfig *newmanExecuteOptions) {
cmd.Flags().StringVar(&stepConfig.NewmanCollection, "newmanCollection", `**/*.postman_collection.json`, "The test collection that should be executed. This could also be a file pattern.")
cmd.Flags().StringVar(&stepConfig.NewmanRunCommand, "newmanRunCommand", os.Getenv("PIPER_newmanRunCommand"), "+++ Deprecated +++ Please use list parameter `runOptions` instead.")
cmd.Flags().StringSliceVar(&stepConfig.RunOptions, "runOptions", []string{`run`, `{{.NewmanCollection}}`, `--reporters`, `cli,junit,html`, `--reporter-junit-export`, `target/newman/TEST-{{.CollectionDisplayName}}.xml`, `--reporter-html-export`, `target/newman/TEST-{{.CollectionDisplayName}}.html`}, "The newman command that will be executed inside the docker container.")
cmd.Flags().StringVar(&stepConfig.NewmanInstallCommand, "newmanInstallCommand", `npm install newman newman-reporter-html --global --quiet`, "The shell command that will be executed inside the docker container to install Newman.")
cmd.Flags().StringVar(&stepConfig.NewmanEnvironment, "newmanEnvironment", os.Getenv("PIPER_newmanEnvironment"), "Specify an environment file path or URL. Environments provide a set of variables that one can use within collections.")
cmd.Flags().StringVar(&stepConfig.NewmanGlobals, "newmanGlobals", os.Getenv("PIPER_newmanGlobals"), "Specify the file path or URL for global variables. Global variables are similar to environment variables but have a lower precedence and can be overridden by environment variables having the same name.")
cmd.Flags().BoolVar(&stepConfig.FailOnError, "failOnError", true, "Defines the behavior, in case tests fail.")
cmd.Flags().StringSliceVar(&stepConfig.CfAppsWithSecrets, "cfAppsWithSecrets", []string{}, "List of CloudFoundry apps with secrets")
}
// retrieve step metadata
func newmanExecuteMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "newmanExecute",
Aliases: []config.Alias{},
Description: "Installs newman and executes specified newman collections.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Resources: []config.StepResources{
{Name: "tests", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "newmanCollection",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `**/*.postman_collection.json`,
},
{
Name: "newmanRunCommand",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_newmanRunCommand"),
},
{
Name: "runOptions",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{`run`, `{{.NewmanCollection}}`, `--reporters`, `cli,junit,html`, `--reporter-junit-export`, `target/newman/TEST-{{.CollectionDisplayName}}.xml`, `--reporter-html-export`, `target/newman/TEST-{{.CollectionDisplayName}}.html`},
},
{
Name: "newmanInstallCommand",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `npm install newman newman-reporter-html --global --quiet`,
},
{
Name: "newmanEnvironment",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_newmanEnvironment"),
},
{
Name: "newmanGlobals",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_newmanGlobals"),
},
{
Name: "failOnError",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: true,
},
{
Name: "cfAppsWithSecrets",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
},
},
Containers: []config.Container{
{Name: "newman", Image: "node:lts-stretch", WorkingDir: "/home/node"},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "influx",
Type: "influx",
Parameters: []map[string]interface{}{
{"Name": "step_data"}, {"fields": []map[string]string{{"name": "newman"}}},
},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_newmanRunCommand\"",
"\"PIPER_newmanEnvironment\"",
"\"PIPER_newmanGlobals\"",
"\"PIPER_newmanRunCommand\"",
"\"PIPER_newmanEnvironment\"",
"\"PIPER_newmanGlobals\""
] | [] | [
"PIPER_newmanRunCommand",
"PIPER_newmanGlobals",
"PIPER_newmanEnvironment"
] | [] | ["PIPER_newmanRunCommand", "PIPER_newmanGlobals", "PIPER_newmanEnvironment"] | go | 3 | 0 | |
frontend/src/web.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import os
import collections
import api
import monitorfrontend
import tablesfrontend
import sprocsfrontend
import indexesfrontend
import report
import performance
import hosts
import export
import hostsfrontend
import welcomefrontend
import datadb
import tplE
import yaml
from argparse import ArgumentParser
from cherrypy._cpdispatch import Dispatcher
class Healthcheck(object):
def default(self, *args, **kwargs):
return {}
default.exposed = True
class HostIdAndShortnameDispatcher(Dispatcher):
def __call__(self, path_info):
splits = path_info.split('/')
if len(splits) > 1 and splits[1]:
if splits[1].isdigit() or splits[1] in hosts.getAllHostUinamesSorted():
return Dispatcher.__call__(self, '/host' + path_info)
return Dispatcher.__call__(self, path_info.lower())
def main():
parser = ArgumentParser(description='PGObserver Frontend')
parser.add_argument('-c', '--config', help='Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]')
parser.add_argument('--s3-config-path', help='Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var',
metavar='https://s3-region.amazonaws.com/x/y/file.yaml',
default=os.getenv('PGOBS_CONFIG_S3_BUCKET'))
parser.add_argument('-p', '--port', help='Web server port. Overrides value from config file', type=int)
args = parser.parse_args()
settings = collections.defaultdict(dict)
if args.s3_config_path: # S3 has precedence if specified
import aws_s3_configreader
settings = aws_s3_configreader.get_config_as_dict_from_s3_file(args.s3_config_path)
elif args.config:
args.config = os.path.expanduser(args.config)
if not os.path.exists(args.config):
print 'WARNING. Config file {} not found! exiting...'.format(args.config)
return
print "trying to read config file from {}".format(args.config)
with open(args.config, 'rb') as fd:
settings = yaml.load(fd)
# Make env vars overwrite yaml file, to run via docker without changing config file
settings['database']['host'] = (os.getenv('PGOBS_HOST') or settings['database'].get('host'))
settings['database']['port'] = (os.getenv('PGOBS_PORT') or settings['database'].get('port') or 5432)
settings['database']['name'] = (os.getenv('PGOBS_DATABASE') or settings['database'].get('name'))
settings['database']['frontend_user'] = (os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
settings['database']['frontend_password'] = (os.getenv('PGOBS_PASSWORD') or settings['database'].get('frontend_password'))
if not (settings['database'].get('host') and settings['database'].get('name') and settings['database'].get('frontend_user')):
print 'Mandatory datastore connect details missing!'
print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
print ''
parser.print_help()
return
conn_string = ' '.join((
'dbname=' + settings['database']['name'],
'host=' + settings['database']['host'],
'user=' + settings['database']['frontend_user'],
'port=' + str(settings['database']['port']),
))
print 'Setting connection string to ... ' + conn_string
# finished print conn_string to the world, password can be added
conn_string = conn_string + ' password=' + settings['database']['frontend_password']
datadb.setConnectionString(conn_string)
current_dir = os.path.dirname(os.path.abspath(__file__))
conf = {
'global': {'server.socket_host': '0.0.0.0', 'server.socket_port': args.port or settings.get('frontend',
{}).get('port') or 8080},
'/': {'tools.staticdir.root': current_dir, 'request.dispatch': HostIdAndShortnameDispatcher()},
'/healthcheck': {'tools.sessions.on': False},
'/static': {'tools.staticdir.dir': 'static', 'tools.staticdir.on': True, 'tools.sessions.on': False},
'/manifest.info': {'tools.staticfile.on': True, 'tools.staticfile.filename': os.path.join(current_dir, '..',
'MANIFEST.MF'), 'tools.auth_basic.on': False, 'tools.sessions.on': False},
}
tplE.setup(settings) # setup of global variables and host data for usage in views
root = welcomefrontend.WelcomeFrontend()
root.host = monitorfrontend.MonitorFrontend()
root.report = report.Report()
root.export = export.Export()
root.perftables = performance.PerfTables()
root.perfapi = performance.PerfApi()
root.perfindexes = performance.PerfIndexes()
root.perfschemas = performance.PerfUnusedSchemas()
root.perflocks = performance.PerfLocksReport()
root.perfstatstatements = performance.PerfStatStatementsReport()
root.perfbloat = performance.PerfBloat()
root.sprocs = sprocsfrontend.SprocFrontend()
root.tables = tablesfrontend.TableFrontend()
root.indexes = indexesfrontend.IndexesFrontend()
root.hosts = hostsfrontend.HostsFrontend()
root.api = api.Root(root) # JSON api exposure, enabling integration with other monitoring tools
root.healthcheck = Healthcheck()
if settings.get('oauth', {}).get('enable_oauth', False):
print 'switching on oauth ...'
import oauth
root.oauth = oauth.Oauth(settings['oauth'])
cherrypy.config.update({'tools.oauthtool.on': True, 'tools.sessions.on': True,
'tools.sessions.timeout': settings['oauth'].get('session_timeout', 43200)})
cherrypy.quickstart(root, config=conf)
if __name__ == '__main__':
main()
| [] | [] | [
"PGOBS_HOST",
"PGOBS_PASSWORD",
"PGOBS_PORT",
"PGOBS_DATABASE",
"PGOBS_CONFIG_S3_BUCKET",
"PGOBS_USER"
] | [] | ["PGOBS_HOST", "PGOBS_PASSWORD", "PGOBS_PORT", "PGOBS_DATABASE", "PGOBS_CONFIG_S3_BUCKET", "PGOBS_USER"] | python | 6 | 0 | |
webhook_test.go | // © Copyright 2016 GREAT BEYOND AB
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mailchimp
import (
"context"
"os"
check "gopkg.in/check.v1"
t "github.com/greatbeyond/mailchimp/testing"
)
var _ = check.Suite(&WebhookTestSuite{})
type WebhookTestSuite struct {
client *Client
server *t.MockServer
ctx context.Context
}
func (s *WebhookTestSuite) SetUpSuite(c *check.C) {}
func (s *WebhookTestSuite) SetUpTest(c *check.C) {
s.server = t.NewMockServer()
s.server.SetChecker(c)
s.client = NewClient()
s.client.HTTPClient = s.server.HTTPClient
s.ctx = NewContextWithToken(context.Background(), os.Getenv("MAILCHIMP_TEST_TOKEN"))
// We need http to use the mock server
s.ctx = NewContextWithURL(s.ctx, "http://us13.api.mailchimp.com/3.0/")
}
func (s *WebhookTestSuite) TearDownTest(c *check.C) {}
func (s *WebhookTestSuite) Skip_CreateWebhook(c *check.C) {
createWebhookResponse, err := s.client.CreateWebhook(s.ctx, &CreateWebhook{
ListID: "1",
URL: "http://test.url/webhook",
Events: &WebhookEvents{
Subscribe: true,
Unsubscribe: true,
},
Sources: &WebhookSources{
User: true,
},
})
c.Assert(err, check.IsNil)
c.Assert(createWebhookResponse, check.NotNil)
}
func (s *WebhookTestSuite) Skip_GetWebhook(c *check.C) {
getWebhookResponse, err := s.client.GetWebhook(s.ctx, "1", "2")
c.Assert(err, check.IsNil)
c.Assert(getWebhookResponse, check.NotNil)
}
func (s *WebhookTestSuite) Skip_GetWebhooks(c *check.C) {
getWebhooksResponse, err := s.client.GetWebhooks(s.ctx, "1")
c.Assert(err, check.IsNil)
c.Assert(getWebhooksResponse, check.NotNil)
}
func (s *WebhookTestSuite) Skip_DeleteWebhook(c *check.C) {
getWebhookResponse, err := s.client.GetWebhook(s.ctx, "1", "2")
c.Assert(err, check.IsNil)
c.Assert(getWebhookResponse, check.NotNil)
err = getWebhookResponse.DeleteWebhook(s.ctx)
c.Assert(err, check.IsNil)
}
| [
"\"MAILCHIMP_TEST_TOKEN\""
] | [] | [
"MAILCHIMP_TEST_TOKEN"
] | [] | ["MAILCHIMP_TEST_TOKEN"] | go | 1 | 0 | |
clients/google-api-services-cloudfunctions/v1/1.31.0/com/google/api/services/cloudfunctions/v1/CloudFunctions.java | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.cloudfunctions.v1;
/**
* Service definition for CloudFunctions (v1).
*
* <p>
* Manages lightweight user-provided functions executed in response to events.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://cloud.google.com/functions" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link CloudFunctionsRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class CloudFunctions extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.31.0 of the Cloud Functions API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://cloudfunctions.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://cloudfunctions.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public CloudFunctions(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
CloudFunctions(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Operations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudFunctions cloudfunctions = new CloudFunctions(...);}
* {@code CloudFunctions.Operations.List request = cloudfunctions.operations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Operations operations() {
return new Operations();
}
/**
* The "operations" collection of methods.
*/
public class Operations {
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.Operation> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^operations/[^/]+$");
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudFunctions.this, "GET", REST_PATH, null, com.google.api.services.cloudfunctions.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override
* the binding to use different resource name schemes, such as `users/operations`. To override the
* binding, API services can add a binding such as `"/v1/{name=users}/operations"` to their service
* configuration. For backwards compatibility, the default name includes the operations collection
* id, however overriding users must ensure the name binding is the parent resource, without the
* operations collection id.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.ListOperationsResponse> {
private static final String REST_PATH = "v1/operations";
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
* override the binding to use different resource name schemes, such as `users/operations`. To
* override the binding, API services can add a binding such as `"/v1/{name=users}/operations"` to
* their service configuration. For backwards compatibility, the default name includes the
* operations collection id, however overriding users must ensure the name binding is the parent
* resource, without the operations collection id.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(CloudFunctions.this, "GET", REST_PATH, null, com.google.api.services.cloudfunctions.v1.model.ListOperationsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. A filter for matching the requested operations. The supported formats of *filter*
* are: To query for a specific function: project:*,location:*,function:* To query for all of
* the latest operations for a project: project:*,latest:true
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Required. A filter for matching the requested operations. The supported formats of *filter* are: To
query for a specific function: project:*,location:*,function:* To query for all of the latest
operations for a project: project:*,latest:true
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Required. A filter for matching the requested operations. The supported formats of *filter*
* are: To query for a specific function: project:*,location:*,function:* To query for all of
* the latest operations for a project: project:*,latest:true
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** Must not be set. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Must not be set.
*/
public java.lang.String getName() {
return name;
}
/** Must not be set. */
public List setName(java.lang.String name) {
this.name = name;
return this;
}
/**
* The maximum number of records that should be returned. Requested page size cannot exceed
* 100. If not set, the default page size is 100. Pagination is only supported when querying
* for a specific function.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of records that should be returned. Requested page size cannot exceed 100. If
not set, the default page size is 100. Pagination is only supported when querying for a specific
function.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* The maximum number of records that should be returned. Requested page size cannot exceed
* 100. If not set, the default page size is 100. Pagination is only supported when querying
* for a specific function.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* Token identifying which result to start with, which is returned by a previous list call.
* Pagination is only supported when querying for a specific function.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Token identifying which result to start with, which is returned by a previous list call. Pagination
is only supported when querying for a specific function.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* Token identifying which result to start with, which is returned by a previous list call.
* Pagination is only supported when querying for a specific function.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudFunctions cloudfunctions = new CloudFunctions(...);}
* {@code CloudFunctions.Projects.List request = cloudfunctions.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* An accessor for creating requests from the Locations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudFunctions cloudfunctions = new CloudFunctions(...);}
* {@code CloudFunctions.Locations.List request = cloudfunctions.locations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Locations locations() {
return new Locations();
}
/**
* The "locations" collection of methods.
*/
public class Locations {
/**
* Lists information about the supported locations for this service.
*
* Create a request for the method "locations.list".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param name The resource that owns the locations collection, if applicable.
* @return the request
*/
public List list(java.lang.String name) throws java.io.IOException {
List result = new List(name);
initialize(result);
return result;
}
public class List extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.ListLocationsResponse> {
private static final String REST_PATH = "v1/{+name}/locations";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+$");
/**
* Lists information about the supported locations for this service.
*
* Create a request for the method "locations.list".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource that owns the locations collection, if applicable.
* @since 1.13
*/
protected List(java.lang.String name) {
super(CloudFunctions.this, "GET", REST_PATH, null, com.google.api.services.cloudfunctions.v1.model.ListLocationsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The resource that owns the locations collection, if applicable. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource that owns the locations collection, if applicable.
*/
public java.lang.String getName() {
return name;
}
/** The resource that owns the locations collection, if applicable. */
public List setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+$");
}
this.name = name;
return this;
}
/** The standard list filter. */
@com.google.api.client.util.Key
private java.lang.String filter;
/** The standard list filter.
*/
public java.lang.String getFilter() {
return filter;
}
/** The standard list filter. */
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The standard list page size. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The standard list page size.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The standard list page size. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The standard list page token. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The standard list page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The standard list page token. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Functions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudFunctions cloudfunctions = new CloudFunctions(...);}
* {@code CloudFunctions.Functions.List request = cloudfunctions.functions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Functions functions() {
return new Functions();
}
/**
* The "functions" collection of methods.
*/
public class Functions {
/**
* Synchronously invokes a deployed Cloud Function. To be used for testing purposes as very limited
* traffic is allowed. For more information on the actual limits, refer to [Rate
* Limits](https://cloud.google.com/functions/quotas#rate_limits).
*
* Create a request for the method "functions.call".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link Call#execute()} method to invoke the remote operation.
*
* @param name Required. The name of the function to be called.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.CallFunctionRequest}
* @return the request
*/
public Call call(java.lang.String name, com.google.api.services.cloudfunctions.v1.model.CallFunctionRequest content) throws java.io.IOException {
Call result = new Call(name, content);
initialize(result);
return result;
}
public class Call extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.CallFunctionResponse> {
private static final String REST_PATH = "v1/{+name}:call";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Synchronously invokes a deployed Cloud Function. To be used for testing purposes as very
* limited traffic is allowed. For more information on the actual limits, refer to [Rate
* Limits](https://cloud.google.com/functions/quotas#rate_limits).
*
* Create a request for the method "functions.call".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link Call#execute()} method to invoke the remote operation. <p>
* {@link Call#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the function to be called.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.CallFunctionRequest}
* @since 1.13
*/
protected Call(java.lang.String name, com.google.api.services.cloudfunctions.v1.model.CallFunctionRequest content) {
super(CloudFunctions.this, "POST", REST_PATH, content, com.google.api.services.cloudfunctions.v1.model.CallFunctionResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public Call set$Xgafv(java.lang.String $Xgafv) {
return (Call) super.set$Xgafv($Xgafv);
}
@Override
public Call setAccessToken(java.lang.String accessToken) {
return (Call) super.setAccessToken(accessToken);
}
@Override
public Call setAlt(java.lang.String alt) {
return (Call) super.setAlt(alt);
}
@Override
public Call setCallback(java.lang.String callback) {
return (Call) super.setCallback(callback);
}
@Override
public Call setFields(java.lang.String fields) {
return (Call) super.setFields(fields);
}
@Override
public Call setKey(java.lang.String key) {
return (Call) super.setKey(key);
}
@Override
public Call setOauthToken(java.lang.String oauthToken) {
return (Call) super.setOauthToken(oauthToken);
}
@Override
public Call setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Call) super.setPrettyPrint(prettyPrint);
}
@Override
public Call setQuotaUser(java.lang.String quotaUser) {
return (Call) super.setQuotaUser(quotaUser);
}
@Override
public Call setUploadType(java.lang.String uploadType) {
return (Call) super.setUploadType(uploadType);
}
@Override
public Call setUploadProtocol(java.lang.String uploadProtocol) {
return (Call) super.setUploadProtocol(uploadProtocol);
}
/** Required. The name of the function to be called. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the function to be called.
*/
public java.lang.String getName() {
return name;
}
/** Required. The name of the function to be called. */
public Call setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Call set(String parameterName, Object value) {
return (Call) super.set(parameterName, value);
}
}
/**
* Creates a new function. If a function with the given name already exists in the specified
* project, the long running operation will return `ALREADY_EXISTS` error.
*
* Create a request for the method "functions.create".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param location Required. The project and location in which the function should be created, specified in the format
* `projects/locations`
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.CloudFunction}
* @return the request
*/
public Create create(java.lang.String location, com.google.api.services.cloudfunctions.v1.model.CloudFunction content) throws java.io.IOException {
Create result = new Create(location, content);
initialize(result);
return result;
}
public class Create extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.Operation> {
private static final String REST_PATH = "v1/{+location}/functions";
private final java.util.regex.Pattern LOCATION_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Creates a new function. If a function with the given name already exists in the specified
* project, the long running operation will return `ALREADY_EXISTS` error.
*
* Create a request for the method "functions.create".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param location Required. The project and location in which the function should be created, specified in the format
* `projects/locations`
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.CloudFunction}
* @since 1.13
*/
protected Create(java.lang.String location, com.google.api.services.cloudfunctions.v1.model.CloudFunction content) {
super(CloudFunctions.this, "POST", REST_PATH, content, com.google.api.services.cloudfunctions.v1.model.Operation.class);
this.location = com.google.api.client.util.Preconditions.checkNotNull(location, "Required parameter location must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(LOCATION_PATTERN.matcher(location).matches(),
"Parameter location must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The project and location in which the function should be created, specified
* in the format `projects/locations`
*/
@com.google.api.client.util.Key
private java.lang.String location;
/** Required. The project and location in which the function should be created, specified in the format
`projects/locations`
*/
public java.lang.String getLocation() {
return location;
}
/**
* Required. The project and location in which the function should be created, specified
* in the format `projects/locations`
*/
public Create setLocation(java.lang.String location) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(LOCATION_PATTERN.matcher(location).matches(),
"Parameter location must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.location = location;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a function with the given name from the specified project. If the given function is used
* by some trigger, the trigger will be updated to remove this function.
*
* Create a request for the method "functions.delete".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name Required. The name of the function which should be deleted.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.Operation> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Deletes a function with the given name from the specified project. If the given function is
* used by some trigger, the trigger will be updated to remove this function.
*
* Create a request for the method "functions.delete".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the function which should be deleted.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(CloudFunctions.this, "DELETE", REST_PATH, null, com.google.api.services.cloudfunctions.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The name of the function which should be deleted. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the function which should be deleted.
*/
public java.lang.String getName() {
return name;
}
/** Required. The name of the function which should be deleted. */
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Returns a signed URL for downloading deployed function source code. The URL is only valid for a
* limited period and should be used within minutes after generation. For more information about the
* signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls
*
* Create a request for the method "functions.generateDownloadUrl".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link GenerateDownloadUrl#execute()} method to invoke the remote
* operation.
*
* @param name The name of function for which source code Google Cloud Storage signed URL should be generated.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.GenerateDownloadUrlRequest}
* @return the request
*/
public GenerateDownloadUrl generateDownloadUrl(java.lang.String name, com.google.api.services.cloudfunctions.v1.model.GenerateDownloadUrlRequest content) throws java.io.IOException {
GenerateDownloadUrl result = new GenerateDownloadUrl(name, content);
initialize(result);
return result;
}
public class GenerateDownloadUrl extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.GenerateDownloadUrlResponse> {
private static final String REST_PATH = "v1/{+name}:generateDownloadUrl";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Returns a signed URL for downloading deployed function source code. The URL is only valid for a
* limited period and should be used within minutes after generation. For more information about
* the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls
*
* Create a request for the method "functions.generateDownloadUrl".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link GenerateDownloadUrl#execute()} method to invoke the remote
* operation. <p> {@link GenerateDownloadUrl#initialize(com.google.api.client.googleapis.services.
* AbstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param name The name of function for which source code Google Cloud Storage signed URL should be generated.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.GenerateDownloadUrlRequest}
* @since 1.13
*/
protected GenerateDownloadUrl(java.lang.String name, com.google.api.services.cloudfunctions.v1.model.GenerateDownloadUrlRequest content) {
super(CloudFunctions.this, "POST", REST_PATH, content, com.google.api.services.cloudfunctions.v1.model.GenerateDownloadUrlResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public GenerateDownloadUrl set$Xgafv(java.lang.String $Xgafv) {
return (GenerateDownloadUrl) super.set$Xgafv($Xgafv);
}
@Override
public GenerateDownloadUrl setAccessToken(java.lang.String accessToken) {
return (GenerateDownloadUrl) super.setAccessToken(accessToken);
}
@Override
public GenerateDownloadUrl setAlt(java.lang.String alt) {
return (GenerateDownloadUrl) super.setAlt(alt);
}
@Override
public GenerateDownloadUrl setCallback(java.lang.String callback) {
return (GenerateDownloadUrl) super.setCallback(callback);
}
@Override
public GenerateDownloadUrl setFields(java.lang.String fields) {
return (GenerateDownloadUrl) super.setFields(fields);
}
@Override
public GenerateDownloadUrl setKey(java.lang.String key) {
return (GenerateDownloadUrl) super.setKey(key);
}
@Override
public GenerateDownloadUrl setOauthToken(java.lang.String oauthToken) {
return (GenerateDownloadUrl) super.setOauthToken(oauthToken);
}
@Override
public GenerateDownloadUrl setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GenerateDownloadUrl) super.setPrettyPrint(prettyPrint);
}
@Override
public GenerateDownloadUrl setQuotaUser(java.lang.String quotaUser) {
return (GenerateDownloadUrl) super.setQuotaUser(quotaUser);
}
@Override
public GenerateDownloadUrl setUploadType(java.lang.String uploadType) {
return (GenerateDownloadUrl) super.setUploadType(uploadType);
}
@Override
public GenerateDownloadUrl setUploadProtocol(java.lang.String uploadProtocol) {
return (GenerateDownloadUrl) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of function for which source code Google Cloud Storage signed URL should be
* generated.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of function for which source code Google Cloud Storage signed URL should be generated.
*/
public java.lang.String getName() {
return name;
}
/**
* The name of function for which source code Google Cloud Storage signed URL should be
* generated.
*/
public GenerateDownloadUrl setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public GenerateDownloadUrl set(String parameterName, Object value) {
return (GenerateDownloadUrl) super.set(parameterName, value);
}
}
/**
* Returns a signed URL for uploading a function source code. For more information about the signed
* URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls. Once the
* function source code upload is complete, the used signed URL should be provided in CreateFunction
* or UpdateFunction request as a reference to the function source code. When uploading source code
* to the generated signed URL, please follow these restrictions: * Source file type should be a zip
* file. * Source file size should not exceed 100MB limit. * No credentials should be attached - the
* signed URLs provide access to the target bucket using internal service identity; if credentials
* were attached, the identity from the credentials would be used, but that identity does not have
* permissions to upload files to the URL. When making a HTTP PUT request, these two headers need to
* be specified: * `content-type: application/zip` * `x-goog-content-length-range: 0,104857600` And
* this header SHOULD NOT be specified: * `Authorization: Bearer YOUR_TOKEN`
*
* Create a request for the method "functions.generateUploadUrl".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link GenerateUploadUrl#execute()} method to invoke the remote
* operation.
*
* @param parent The project and location in which the Google Cloud Storage signed URL should be generated, specified
* in the format `projects/locations`.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.GenerateUploadUrlRequest}
* @return the request
*/
public GenerateUploadUrl generateUploadUrl(java.lang.String parent, com.google.api.services.cloudfunctions.v1.model.GenerateUploadUrlRequest content) throws java.io.IOException {
GenerateUploadUrl result = new GenerateUploadUrl(parent, content);
initialize(result);
return result;
}
public class GenerateUploadUrl extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.GenerateUploadUrlResponse> {
private static final String REST_PATH = "v1/{+parent}/functions:generateUploadUrl";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Returns a signed URL for uploading a function source code. For more information about the
* signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls. Once
* the function source code upload is complete, the used signed URL should be provided in
* CreateFunction or UpdateFunction request as a reference to the function source code. When
* uploading source code to the generated signed URL, please follow these restrictions: * Source
* file type should be a zip file. * Source file size should not exceed 100MB limit. * No
* credentials should be attached - the signed URLs provide access to the target bucket using
* internal service identity; if credentials were attached, the identity from the credentials
* would be used, but that identity does not have permissions to upload files to the URL. When
* making a HTTP PUT request, these two headers need to be specified: * `content-type:
* application/zip` * `x-goog-content-length-range: 0,104857600` And this header SHOULD NOT be
* specified: * `Authorization: Bearer YOUR_TOKEN`
*
* Create a request for the method "functions.generateUploadUrl".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link GenerateUploadUrl#execute()} method to invoke the remote
* operation. <p> {@link GenerateUploadUrl#initialize(com.google.api.client.googleapis.services.Ab
* stractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param parent The project and location in which the Google Cloud Storage signed URL should be generated, specified
* in the format `projects/locations`.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.GenerateUploadUrlRequest}
* @since 1.13
*/
protected GenerateUploadUrl(java.lang.String parent, com.google.api.services.cloudfunctions.v1.model.GenerateUploadUrlRequest content) {
super(CloudFunctions.this, "POST", REST_PATH, content, com.google.api.services.cloudfunctions.v1.model.GenerateUploadUrlResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public GenerateUploadUrl set$Xgafv(java.lang.String $Xgafv) {
return (GenerateUploadUrl) super.set$Xgafv($Xgafv);
}
@Override
public GenerateUploadUrl setAccessToken(java.lang.String accessToken) {
return (GenerateUploadUrl) super.setAccessToken(accessToken);
}
@Override
public GenerateUploadUrl setAlt(java.lang.String alt) {
return (GenerateUploadUrl) super.setAlt(alt);
}
@Override
public GenerateUploadUrl setCallback(java.lang.String callback) {
return (GenerateUploadUrl) super.setCallback(callback);
}
@Override
public GenerateUploadUrl setFields(java.lang.String fields) {
return (GenerateUploadUrl) super.setFields(fields);
}
@Override
public GenerateUploadUrl setKey(java.lang.String key) {
return (GenerateUploadUrl) super.setKey(key);
}
@Override
public GenerateUploadUrl setOauthToken(java.lang.String oauthToken) {
return (GenerateUploadUrl) super.setOauthToken(oauthToken);
}
@Override
public GenerateUploadUrl setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GenerateUploadUrl) super.setPrettyPrint(prettyPrint);
}
@Override
public GenerateUploadUrl setQuotaUser(java.lang.String quotaUser) {
return (GenerateUploadUrl) super.setQuotaUser(quotaUser);
}
@Override
public GenerateUploadUrl setUploadType(java.lang.String uploadType) {
return (GenerateUploadUrl) super.setUploadType(uploadType);
}
@Override
public GenerateUploadUrl setUploadProtocol(java.lang.String uploadProtocol) {
return (GenerateUploadUrl) super.setUploadProtocol(uploadProtocol);
}
/**
* The project and location in which the Google Cloud Storage signed URL should be
* generated, specified in the format `projects/locations`.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** The project and location in which the Google Cloud Storage signed URL should be generated,
specified in the format `projects/locations`.
*/
public java.lang.String getParent() {
return parent;
}
/**
* The project and location in which the Google Cloud Storage signed URL should be
* generated, specified in the format `projects/locations`.
*/
public GenerateUploadUrl setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
@Override
public GenerateUploadUrl set(String parameterName, Object value) {
return (GenerateUploadUrl) super.set(parameterName, value);
}
}
/**
* Returns a function with the given name from the requested project.
*
* Create a request for the method "functions.get".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. The name of the function which details should be obtained.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.CloudFunction> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Returns a function with the given name from the requested project.
*
* Create a request for the method "functions.get".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the function which details should be obtained.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudFunctions.this, "GET", REST_PATH, null, com.google.api.services.cloudfunctions.v1.model.CloudFunction.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The name of the function which details should be obtained. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the function which details should be obtained.
*/
public java.lang.String getName() {
return name;
}
/** Required. The name of the function which details should be obtained. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Gets the IAM access control policy for a function. Returns an empty policy if the function exists
* and does not have a policy set.
*
* Create a request for the method "functions.getIamPolicy".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote
* operation.
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @return the request
*/
public GetIamPolicy getIamPolicy(java.lang.String resource) throws java.io.IOException {
GetIamPolicy result = new GetIamPolicy(resource);
initialize(result);
return result;
}
public class GetIamPolicy extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.Policy> {
private static final String REST_PATH = "v1/{+resource}:getIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Gets the IAM access control policy for a function. Returns an empty policy if the function
* exists and does not have a policy set.
*
* Create a request for the method "functions.getIamPolicy".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote
* operation. <p> {@link
* GetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @since 1.13
*/
protected GetIamPolicy(java.lang.String resource) {
super(CloudFunctions.this, "GET", REST_PATH, null, com.google.api.services.cloudfunctions.v1.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (GetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public GetIamPolicy setAccessToken(java.lang.String accessToken) {
return (GetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public GetIamPolicy setAlt(java.lang.String alt) {
return (GetIamPolicy) super.setAlt(alt);
}
@Override
public GetIamPolicy setCallback(java.lang.String callback) {
return (GetIamPolicy) super.setCallback(callback);
}
@Override
public GetIamPolicy setFields(java.lang.String fields) {
return (GetIamPolicy) super.setFields(fields);
}
@Override
public GetIamPolicy setKey(java.lang.String key) {
return (GetIamPolicy) super.setKey(key);
}
@Override
public GetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (GetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public GetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (GetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public GetIamPolicy setUploadType(java.lang.String uploadType) {
return (GetIamPolicy) super.setUploadType(uploadType);
}
@Override
public GetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (GetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being requested. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public GetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.resource = resource;
return this;
}
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3.
* Requests specifying an invalid value will be rejected. Requests for policies with any
* conditional bindings must specify version 3. Policies without any conditional bindings
* may specify any valid value or leave the field unset. To learn which resources support
* conditions in their IAM policies, see the [IAM
* documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
@com.google.api.client.util.Key("options.requestedPolicyVersion")
private java.lang.Integer optionsRequestedPolicyVersion;
/** Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests
specifying an invalid value will be rejected. Requests for policies with any conditional bindings
must specify version 3. Policies without any conditional bindings may specify any valid value or
leave the field unset. To learn which resources support conditions in their IAM policies, see the
[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
public java.lang.Integer getOptionsRequestedPolicyVersion() {
return optionsRequestedPolicyVersion;
}
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3.
* Requests specifying an invalid value will be rejected. Requests for policies with any
* conditional bindings must specify version 3. Policies without any conditional bindings
* may specify any valid value or leave the field unset. To learn which resources support
* conditions in their IAM policies, see the [IAM
* documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
public GetIamPolicy setOptionsRequestedPolicyVersion(java.lang.Integer optionsRequestedPolicyVersion) {
this.optionsRequestedPolicyVersion = optionsRequestedPolicyVersion;
return this;
}
@Override
public GetIamPolicy set(String parameterName, Object value) {
return (GetIamPolicy) super.set(parameterName, value);
}
}
/**
* Returns a list of functions that belong to the requested project.
*
* Create a request for the method "functions.list".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent The project and location from which the function should be listed, specified in the format
* `projects/locations` If you want to list functions in all locations, use "-" in place of a
* location. When listing functions in all locations, if one or more location(s) are
* unreachable, the response will contain functions from all reachable locations along with
* the names of any unreachable locations.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.ListFunctionsResponse> {
private static final String REST_PATH = "v1/{+parent}/functions";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Returns a list of functions that belong to the requested project.
*
* Create a request for the method "functions.list".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent The project and location from which the function should be listed, specified in the format
* `projects/locations` If you want to list functions in all locations, use "-" in place of a
* location. When listing functions in all locations, if one or more location(s) are
* unreachable, the response will contain functions from all reachable locations along with
* the names of any unreachable locations.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(CloudFunctions.this, "GET", REST_PATH, null, com.google.api.services.cloudfunctions.v1.model.ListFunctionsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* The project and location from which the function should be listed, specified in the
* format `projects/locations` If you want to list functions in all locations, use "-" in
* place of a location. When listing functions in all locations, if one or more
* location(s) are unreachable, the response will contain functions from all reachable
* locations along with the names of any unreachable locations.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** The project and location from which the function should be listed, specified in the format
`projects/locations` If you want to list functions in all locations, use "-" in place of a
location. When listing functions in all locations, if one or more location(s) are unreachable, the
response will contain functions from all reachable locations along with the names of any
unreachable locations.
*/
public java.lang.String getParent() {
return parent;
}
/**
* The project and location from which the function should be listed, specified in the
* format `projects/locations` If you want to list functions in all locations, use "-" in
* place of a location. When listing functions in all locations, if one or more
* location(s) are unreachable, the response will contain functions from all reachable
* locations along with the names of any unreachable locations.
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/** Maximum number of functions to return per call. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of functions to return per call.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** Maximum number of functions to return per call. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* The value returned by the last `ListFunctionsResponse`; indicates that this is a
* continuation of a prior `ListFunctions` call, and that the system should return the
* next page of data.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The value returned by the last `ListFunctionsResponse`; indicates that this is a continuation of a
prior `ListFunctions` call, and that the system should return the next page of data.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* The value returned by the last `ListFunctionsResponse`; indicates that this is a
* continuation of a prior `ListFunctions` call, and that the system should return the
* next page of data.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates existing function.
*
* Create a request for the method "functions.patch".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param name A user-defined name of the function. Function names must be unique globally and match pattern
* `projects/locations/functions`
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.CloudFunction}
* @return the request
*/
public Patch patch(java.lang.String name, com.google.api.services.cloudfunctions.v1.model.CloudFunction content) throws java.io.IOException {
Patch result = new Patch(name, content);
initialize(result);
return result;
}
public class Patch extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.Operation> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Updates existing function.
*
* Create a request for the method "functions.patch".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name A user-defined name of the function. Function names must be unique globally and match pattern
* `projects/locations/functions`
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.CloudFunction}
* @since 1.13
*/
protected Patch(java.lang.String name, com.google.api.services.cloudfunctions.v1.model.CloudFunction content) {
super(CloudFunctions.this, "PATCH", REST_PATH, content, com.google.api.services.cloudfunctions.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/**
* A user-defined name of the function. Function names must be unique globally and match
* pattern `projects/locations/functions`
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** A user-defined name of the function. Function names must be unique globally and match pattern
`projects/locations/functions`
*/
public java.lang.String getName() {
return name;
}
/**
* A user-defined name of the function. Function names must be unique globally and match
* pattern `projects/locations/functions`
*/
public Patch setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.name = name;
return this;
}
/** Required list of fields to be updated in this request. */
@com.google.api.client.util.Key
private String updateMask;
/** Required list of fields to be updated in this request.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required list of fields to be updated in this request. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* Sets the IAM access control policy on the specified function. Replaces any existing policy.
*
* Create a request for the method "functions.setIamPolicy".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote
* operation.
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.SetIamPolicyRequest}
* @return the request
*/
public SetIamPolicy setIamPolicy(java.lang.String resource, com.google.api.services.cloudfunctions.v1.model.SetIamPolicyRequest content) throws java.io.IOException {
SetIamPolicy result = new SetIamPolicy(resource, content);
initialize(result);
return result;
}
public class SetIamPolicy extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.Policy> {
private static final String REST_PATH = "v1/{+resource}:setIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Sets the IAM access control policy on the specified function. Replaces any existing policy.
*
* Create a request for the method "functions.setIamPolicy".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote
* operation. <p> {@link
* SetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.SetIamPolicyRequest}
* @since 1.13
*/
protected SetIamPolicy(java.lang.String resource, com.google.api.services.cloudfunctions.v1.model.SetIamPolicyRequest content) {
super(CloudFunctions.this, "POST", REST_PATH, content, com.google.api.services.cloudfunctions.v1.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public SetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (SetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public SetIamPolicy setAccessToken(java.lang.String accessToken) {
return (SetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public SetIamPolicy setAlt(java.lang.String alt) {
return (SetIamPolicy) super.setAlt(alt);
}
@Override
public SetIamPolicy setCallback(java.lang.String callback) {
return (SetIamPolicy) super.setCallback(callback);
}
@Override
public SetIamPolicy setFields(java.lang.String fields) {
return (SetIamPolicy) super.setFields(fields);
}
@Override
public SetIamPolicy setKey(java.lang.String key) {
return (SetIamPolicy) super.setKey(key);
}
@Override
public SetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (SetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public SetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public SetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (SetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public SetIamPolicy setUploadType(java.lang.String uploadType) {
return (SetIamPolicy) super.setUploadType(uploadType);
}
@Override
public SetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (SetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being specified. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
public SetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public SetIamPolicy set(String parameterName, Object value) {
return (SetIamPolicy) super.set(parameterName, value);
}
}
/**
* Tests the specified permissions against the IAM access control policy for a function. If the
* function does not exist, this will return an empty set of permissions, not a NOT_FOUND error.
*
* Create a request for the method "functions.testIamPermissions".
*
* This request holds the parameters needed by the cloudfunctions server. After setting any
* optional parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote
* operation.
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.TestIamPermissionsRequest}
* @return the request
*/
public TestIamPermissions testIamPermissions(java.lang.String resource, com.google.api.services.cloudfunctions.v1.model.TestIamPermissionsRequest content) throws java.io.IOException {
TestIamPermissions result = new TestIamPermissions(resource, content);
initialize(result);
return result;
}
public class TestIamPermissions extends CloudFunctionsRequest<com.google.api.services.cloudfunctions.v1.model.TestIamPermissionsResponse> {
private static final String REST_PATH = "v1/{+resource}:testIamPermissions";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
/**
* Tests the specified permissions against the IAM access control policy for a function. If the
* function does not exist, this will return an empty set of permissions, not a NOT_FOUND error.
*
* Create a request for the method "functions.testIamPermissions".
*
* This request holds the parameters needed by the the cloudfunctions server. After setting any
* optional parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote
* operation. <p> {@link TestIamPermissions#initialize(com.google.api.client.googleapis.services.A
* bstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.cloudfunctions.v1.model.TestIamPermissionsRequest}
* @since 1.13
*/
protected TestIamPermissions(java.lang.String resource, com.google.api.services.cloudfunctions.v1.model.TestIamPermissionsRequest content) {
super(CloudFunctions.this, "POST", REST_PATH, content, com.google.api.services.cloudfunctions.v1.model.TestIamPermissionsResponse.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
}
@Override
public TestIamPermissions set$Xgafv(java.lang.String $Xgafv) {
return (TestIamPermissions) super.set$Xgafv($Xgafv);
}
@Override
public TestIamPermissions setAccessToken(java.lang.String accessToken) {
return (TestIamPermissions) super.setAccessToken(accessToken);
}
@Override
public TestIamPermissions setAlt(java.lang.String alt) {
return (TestIamPermissions) super.setAlt(alt);
}
@Override
public TestIamPermissions setCallback(java.lang.String callback) {
return (TestIamPermissions) super.setCallback(callback);
}
@Override
public TestIamPermissions setFields(java.lang.String fields) {
return (TestIamPermissions) super.setFields(fields);
}
@Override
public TestIamPermissions setKey(java.lang.String key) {
return (TestIamPermissions) super.setKey(key);
}
@Override
public TestIamPermissions setOauthToken(java.lang.String oauthToken) {
return (TestIamPermissions) super.setOauthToken(oauthToken);
}
@Override
public TestIamPermissions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (TestIamPermissions) super.setPrettyPrint(prettyPrint);
}
@Override
public TestIamPermissions setQuotaUser(java.lang.String quotaUser) {
return (TestIamPermissions) super.setQuotaUser(quotaUser);
}
@Override
public TestIamPermissions setUploadType(java.lang.String uploadType) {
return (TestIamPermissions) super.setUploadType(uploadType);
}
@Override
public TestIamPermissions setUploadProtocol(java.lang.String uploadProtocol) {
return (TestIamPermissions) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the
* operation documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy detail is being requested. See the operation
documentation for the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the
* operation documentation for the appropriate value for this field.
*/
public TestIamPermissions setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/functions/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public TestIamPermissions set(String parameterName, Object value) {
return (TestIamPermissions) super.set(parameterName, value);
}
}
}
}
}
/**
* Builder for {@link CloudFunctions}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link CloudFunctions}. */
@Override
public CloudFunctions build() {
return new CloudFunctions(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link CloudFunctionsRequestInitializer}.
*
* @since 1.12
*/
public Builder setCloudFunctionsRequestInitializer(
CloudFunctionsRequestInitializer cloudfunctionsRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(cloudfunctionsRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| [
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] | [] | [
"GOOGLE_API_USE_MTLS_ENDPOINT"
] | [] | ["GOOGLE_API_USE_MTLS_ENDPOINT"] | java | 1 | 0 | |
integration/integration_test.go | // This is the main file that sets up integration tests using go-check.
package integration
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"text/template"
"github.com/containous/traefik/pkg/log"
"github.com/fatih/structs"
"github.com/go-check/check"
compose "github.com/libkermit/compose/check"
checker "github.com/vdemeester/shakers"
)
var integration = flag.Bool("integration", false, "run integration tests")
var container = flag.Bool("container", false, "run container integration tests")
var host = flag.Bool("host", false, "run host integration tests")
var showLog = flag.Bool("tlog", false, "always show Traefik logs")
func Test(t *testing.T) {
check.TestingT(t)
}
func init() {
flag.Parse()
if !*integration {
log.Info("Integration tests disabled.")
return
}
if *container {
// tests launched from a container
check.Suite(&AccessLogSuite{})
check.Suite(&AcmeSuite{})
check.Suite(&DockerComposeSuite{})
check.Suite(&DockerSuite{})
check.Suite(&ErrorPagesSuite{})
check.Suite(&FileSuite{})
check.Suite(&GRPCSuite{})
check.Suite(&HealthCheckSuite{})
check.Suite(&HeadersSuite{})
check.Suite(&HostResolverSuite{})
check.Suite(&HTTPSSuite{})
check.Suite(&KeepAliveSuite{})
check.Suite(&LogRotationSuite{})
check.Suite(&MarathonSuite{})
check.Suite(&MarathonSuite15{})
// TODO: disable temporarily
// check.Suite(&RateLimitSuite{})
check.Suite(&RestSuite{})
check.Suite(&RetrySuite{})
check.Suite(&SimpleSuite{})
check.Suite(&TimeoutSuite{})
check.Suite(&TLSClientHeadersSuite{})
check.Suite(&TracingSuite{})
check.Suite(&WebsocketSuite{})
}
if *host {
// tests launched from the host
check.Suite(&K8sSuite{})
check.Suite(&ProxyProtocolSuite{})
check.Suite(&TCPSuite{})
}
}
var traefikBinary = "../dist/traefik"
type BaseSuite struct {
composeProject *compose.Project
}
func (s *BaseSuite) TearDownSuite(c *check.C) {
// shutdown and delete compose project
if s.composeProject != nil {
s.composeProject.Stop(c)
}
}
func (s *BaseSuite) createComposeProject(c *check.C, name string) {
projectName := fmt.Sprintf("integration-test-%s", name)
composeFile := fmt.Sprintf("resources/compose/%s.yml", name)
addrs, err := net.InterfaceAddrs()
c.Assert(err, checker.IsNil)
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
c.Assert(err, checker.IsNil)
if !ip.IsLoopback() && ip.To4() != nil {
os.Setenv("DOCKER_HOST_IP", ip.String())
break
}
}
s.composeProject = compose.CreateProject(c, projectName, composeFile)
}
func withConfigFile(file string) string {
return "--configFile=" + file
}
func (s *BaseSuite) cmdTraefik(args ...string) (*exec.Cmd, *bytes.Buffer) {
cmd := exec.Command(traefikBinary, args...)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
return cmd, &out
}
func (s *BaseSuite) traefikCmd(args ...string) (*exec.Cmd, func(*check.C)) {
cmd, out := s.cmdTraefik(args...)
return cmd, func(c *check.C) {
if c.Failed() || *showLog {
s.displayTraefikLog(c, out)
}
}
}
func (s *BaseSuite) displayTraefikLog(c *check.C, output *bytes.Buffer) {
if output == nil || output.Len() == 0 {
log.Infof("%s: No Traefik logs.", c.TestName())
} else {
log.Infof("%s: Traefik logs: ", c.TestName())
log.Infof(output.String())
}
}
func (s *BaseSuite) getDockerHost() string {
dockerHost := os.Getenv("DOCKER_HOST")
if dockerHost == "" {
// Default docker socket
dockerHost = "unix:///var/run/docker.sock"
}
return dockerHost
}
func (s *BaseSuite) adaptFile(c *check.C, path string, tempObjects interface{}) string {
// Load file
tmpl, err := template.ParseFiles(path)
c.Assert(err, checker.IsNil)
folder, prefix := filepath.Split(path)
tmpFile, err := ioutil.TempFile(folder, strings.TrimSuffix(prefix, filepath.Ext(prefix))+"_*"+filepath.Ext(prefix))
c.Assert(err, checker.IsNil)
defer tmpFile.Close()
model := structs.Map(tempObjects)
model["SelfFilename"] = tmpFile.Name()
err = tmpl.ExecuteTemplate(tmpFile, prefix, model)
c.Assert(err, checker.IsNil)
err = tmpFile.Sync()
c.Assert(err, checker.IsNil)
return tmpFile.Name()
}
| [
"\"DOCKER_HOST\""
] | [] | [
"DOCKER_HOST"
] | [] | ["DOCKER_HOST"] | go | 1 | 0 | |
vendor/golang.org/x/build/cmd/release/release.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(adg): add flag so that we can choose to run make.bash only
// Command release builds a Go release.
package main
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"flag"
"fmt"
gobuild "go/build"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"golang.org/x/build"
"golang.org/x/build/buildenv"
"golang.org/x/build/buildlet"
"golang.org/x/build/dashboard"
)
var (
target = flag.String("target", "", "If specified, build specific target platform ('linux-amd64')")
watch = flag.Bool("watch", false, "Watch the build. Only compatible with -target")
rev = flag.String("rev", "", "Go revision to build")
toolsRev = flag.String("tools", "", "Tools revision to build")
tourRev = flag.String("tour", "master", "Tour revision to include")
blogRev = flag.String("blog", "master", "Blog revision to include")
netRev = flag.String("net", "master", "Net revision to include")
version = flag.String("version", "", "Version string (go1.5.2)")
user = flag.String("user", username(), "coordinator username, appended to 'user-'")
skipTests = flag.Bool("skip_tests", false, "skip tests; run make.bash instead of all.bash (only use if you ran trybots first)")
uploadMode = flag.Bool("upload", false, "Upload files (exclusive to all other flags)")
)
var (
coordClient *buildlet.CoordinatorClient
buildEnv *buildenv.Environment
)
func main() {
flag.Parse()
if *uploadMode {
if err := upload(flag.Args()); err != nil {
log.Fatal(err)
}
return
}
if err := findReleaselet(); err != nil {
log.Fatalf("couldn't find releaselet source: %v", err)
}
if *rev == "" {
log.Fatal("must specify -rev flag")
}
if *toolsRev == "" {
log.Fatal("must specify -tools flag")
}
if *version == "" {
log.Fatal("must specify -version flag")
}
coordClient = coordinatorClient()
buildEnv = buildenv.Production
var wg sync.WaitGroup
for _, b := range builds {
b := b
if *target != "" && b.String() != *target {
continue
}
b.logf("Start.")
wg.Add(1)
go func() {
defer wg.Done()
if err := b.make(); err != nil {
b.logf("Error: %v", err)
} else {
b.logf("Done.")
}
}()
}
wg.Wait()
}
var releaselet = "releaselet.go"
func findReleaselet() error {
// First try the working directory.
if _, err := os.Stat(releaselet); err == nil {
return nil
}
// Then, try to locate the release command in the workspace.
const importPath = "golang.org/x/build/cmd/release"
pkg, err := gobuild.Import(importPath, "", gobuild.FindOnly)
if err != nil {
return fmt.Errorf("finding %q: %v", importPath, err)
}
r := filepath.Join(pkg.Dir, releaselet)
if _, err := os.Stat(r); err != nil {
return err
}
releaselet = r
return nil
}
type Build struct {
OS, Arch string
Source bool
Race bool // Build race detector.
Builder string // Key for dashboard.Builders.
Goarm int // GOARM value if set.
}
func (b *Build) String() string {
if b.Source {
return "src"
}
if b.Goarm != 0 {
return fmt.Sprintf("%v-%vv%vl", b.OS, b.Arch, b.Goarm)
}
return fmt.Sprintf("%v-%v", b.OS, b.Arch)
}
func (b *Build) toolDir() string { return "go/pkg/tool/" + b.OS + "_" + b.Arch }
func (b *Build) logf(format string, args ...interface{}) {
format = fmt.Sprintf("%v: %s", b, format)
log.Printf(format, args...)
}
var builds = []*Build{
{
Source: true,
Builder: "linux-amd64",
},
{
OS: "linux",
Arch: "386",
Builder: "linux-386",
},
{
OS: "linux",
Arch: "arm",
Builder: "linux-arm",
Goarm: 6, // for compatibility with all Raspberry Pi models.
},
{
OS: "linux",
Arch: "amd64",
Race: true,
Builder: "linux-amd64",
},
{
OS: "freebsd",
Arch: "386",
Builder: "freebsd-386-gce101",
},
{
OS: "freebsd",
Arch: "amd64",
Race: true,
Builder: "freebsd-amd64-gce101",
},
{
OS: "windows",
Arch: "386",
Builder: "windows-386-gce",
},
{
OS: "windows",
Arch: "amd64",
Race: true,
Builder: "windows-amd64-gce",
},
{
OS: "darwin",
Arch: "amd64",
Race: true,
Builder: "darwin-amd64-10_10",
},
}
const (
toolsRepo = "golang.org/x/tools"
blogRepo = "golang.org/x/blog"
tourRepo = "golang.org/x/tour"
)
var toolPaths = []string{
"golang.org/x/tools/cmd/godoc",
"golang.org/x/tour/gotour",
}
var preBuildCleanFiles = []string{
".gitattributes",
".gitignore",
".hgignore",
".hgtags",
"misc/dashboard",
"misc/makerelease",
}
var postBuildCleanFiles = []string{
"VERSION.cache",
"pkg/bootstrap",
"src/cmd/api",
}
func (b *Build) buildlet() (*buildlet.Client, error) {
b.logf("Creating buildlet.")
bc, err := coordClient.CreateBuildlet(b.Builder)
if err != nil {
return nil, err
}
return bc, nil
}
func (b *Build) make() error {
bc, ok := dashboard.Builders[b.Builder]
if !ok {
return fmt.Errorf("unknown builder: %v", bc)
}
client, err := b.buildlet()
if err != nil {
return err
}
defer client.Close()
work, err := client.WorkDir()
if err != nil {
return err
}
// Push source to VM
b.logf("Pushing source to VM.")
const (
goDir = "go"
goPath = "gopath"
go14 = "go1.4"
)
for _, r := range []struct {
repo, rev string
}{
{"go", *rev},
{"tools", *toolsRev},
{"blog", *blogRev},
{"tour", *tourRev},
{"net", *netRev},
} {
if b.Source && r.repo != "go" {
continue
}
dir := goDir
if r.repo != "go" {
dir = goPath + "/src/golang.org/x/" + r.repo
}
tar := "https://go.googlesource.com/" + r.repo + "/+archive/" + r.rev + ".tar.gz"
if err := client.PutTarFromURL(tar, dir); err != nil {
return err
}
}
if u := bc.GoBootstrapURL(buildEnv); u != "" && !b.Source {
b.logf("Installing go1.4.")
if err := client.PutTarFromURL(u, go14); err != nil {
return err
}
}
// Write out version file.
b.logf("Writing VERSION file.")
if err := client.Put(strings.NewReader(*version), "go/VERSION", 0644); err != nil {
return err
}
b.logf("Cleaning goroot (pre-build).")
if err := client.RemoveAll(addPrefix(goDir, preBuildCleanFiles)...); err != nil {
return err
}
if b.Source {
b.logf("Skipping build.")
return b.fetchTarball(client)
}
// Set up build environment.
sep := "/"
if b.OS == "windows" {
sep = "\\"
}
env := append(bc.Env(),
"GOROOT_FINAL="+bc.GorootFinal(),
"GOROOT="+work+sep+goDir,
"GOPATH="+work+sep+goPath,
"GOBIN=",
)
if b.Goarm > 0 {
env = append(env, fmt.Sprintf("GOARM=%d", b.Goarm))
}
// Execute build
b.logf("Building.")
out := new(bytes.Buffer)
script := bc.AllScript()
scriptArgs := bc.AllScriptArgs()
if *skipTests {
script = bc.MakeScript()
scriptArgs = bc.MakeScriptArgs()
}
all := filepath.Join(goDir, script)
var execOut io.Writer = out
if *watch && *target != "" {
execOut = io.MultiWriter(out, os.Stdout)
}
remoteErr, err := client.Exec(all, buildlet.ExecOpts{
Output: execOut,
ExtraEnv: env,
Args: scriptArgs,
})
if err != nil {
return err
}
if remoteErr != nil {
return fmt.Errorf("Build failed: %v\nOutput:\n%v", remoteErr, out)
}
goCmd := path.Join(goDir, "bin/go")
if b.OS == "windows" {
goCmd += ".exe"
}
runGo := func(args ...string) error {
out := new(bytes.Buffer)
var execOut io.Writer = out
if *watch && *target != "" {
execOut = io.MultiWriter(out, os.Stdout)
}
remoteErr, err := client.Exec(goCmd, buildlet.ExecOpts{
Output: execOut,
Dir: ".", // root of buildlet work directory
Args: args,
ExtraEnv: env,
})
if err != nil {
return err
}
if remoteErr != nil {
return fmt.Errorf("go %v: %v\n%s", strings.Join(args, " "), remoteErr, out)
}
return nil
}
if b.Race {
b.logf("Building race detector.")
if err := runGo("install", "-race", "std"); err != nil {
return err
}
}
b.logf("Building %v.", strings.Join(toolPaths, ", "))
if err := runGo(append([]string{"install"}, toolPaths...)...); err != nil {
return err
}
b.logf("Cleaning goroot (post-build).")
if err := client.RemoveAll(addPrefix(goDir, postBuildCleanFiles)...); err != nil {
return err
}
if err := client.RemoveAll(b.toolDir() + "/api"); err != nil {
return err
}
b.logf("Pushing and running releaselet.")
f, err := os.Open(releaselet)
if err != nil {
return err
}
err = client.Put(f, "releaselet.go", 0666)
f.Close()
if err != nil {
return err
}
if err := runGo("run", "releaselet.go"); err != nil {
return err
}
cleanFiles := []string{"releaselet.go", goPath, go14}
switch b.OS {
case "darwin":
filename := *version + "." + b.String() + ".pkg"
if err := b.fetchFile(client, filename, "pkg"); err != nil {
return err
}
cleanFiles = append(cleanFiles, "pkg")
case "windows":
filename := *version + "." + b.String() + ".msi"
if err := b.fetchFile(client, filename, "msi"); err != nil {
return err
}
cleanFiles = append(cleanFiles, "msi")
}
// Need to delete everything except the final "go" directory,
// as we make the tarball relative to workdir.
b.logf("Cleaning workdir.")
if err := client.RemoveAll(cleanFiles...); err != nil {
return err
}
if b.OS == "windows" {
return b.fetchZip(client)
}
return b.fetchTarball(client)
}
func (b *Build) fetchTarball(client *buildlet.Client) error {
b.logf("Downloading tarball.")
tgz, err := client.GetTar(".")
if err != nil {
return err
}
filename := *version + "." + b.String() + ".tar.gz"
return b.writeFile(filename, tgz)
}
func (b *Build) fetchZip(client *buildlet.Client) error {
b.logf("Downloading tarball and re-compressing as zip.")
tgz, err := client.GetTar(".")
if err != nil {
return err
}
defer tgz.Close()
filename := *version + "." + b.String() + ".zip"
f, err := os.Create(filename)
if err != nil {
return err
}
if err := tgzToZip(f, tgz); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
b.logf("Wrote %q.", filename)
return nil
}
func tgzToZip(w io.Writer, r io.Reader) error {
zr, err := gzip.NewReader(r)
if err != nil {
return err
}
tr := tar.NewReader(zr)
zw := zip.NewWriter(w)
for {
th, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
fi := th.FileInfo()
zh, err := zip.FileInfoHeader(fi)
if err != nil {
return err
}
zh.Name = th.Name // for the full path
switch strings.ToLower(path.Ext(zh.Name)) {
case ".jpg", ".jpeg", ".png", ".gif":
// Don't re-compress already compressed files.
zh.Method = zip.Store
default:
zh.Method = zip.Deflate
}
if fi.IsDir() {
zh.Method = zip.Store
}
w, err := zw.CreateHeader(zh)
if err != nil {
return err
}
if fi.IsDir() {
continue
}
if _, err := io.Copy(w, tr); err != nil {
return err
}
}
return zw.Close()
}
// fetchFile fetches the specified directory from the given buildlet, and
// writes the first file it finds in that directory to dest.
func (b *Build) fetchFile(client *buildlet.Client, dest, dir string) error {
b.logf("Downloading file from %q.", dir)
tgz, err := client.GetTar(dir)
if err != nil {
return err
}
defer tgz.Close()
zr, err := gzip.NewReader(tgz)
if err != nil {
return err
}
tr := tar.NewReader(zr)
for {
h, err := tr.Next()
if err == io.EOF {
return io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if !h.FileInfo().IsDir() {
break
}
}
return b.writeFile(dest, tr)
}
func (b *Build) writeFile(name string, r io.Reader) error {
f, err := os.Create(name)
if err != nil {
return err
}
if _, err := io.Copy(f, r); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
b.logf("Wrote %q.", name)
return nil
}
func addPrefix(prefix string, in []string) []string {
var out []string
for _, s := range in {
out = append(out, path.Join(prefix, s))
}
return out
}
func coordinatorClient() *buildlet.CoordinatorClient {
return &buildlet.CoordinatorClient{
Auth: buildlet.UserPass{
Username: "user-" + *user,
Password: userToken(),
},
Instance: build.ProdCoordinator,
}
}
func homeDir() string {
if runtime.GOOS == "windows" {
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
return os.Getenv("HOME")
}
func configDir() string {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "Gomote")
}
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
return filepath.Join(xdg, "gomote")
}
return filepath.Join(homeDir(), ".config", "gomote")
}
func username() string {
if runtime.GOOS == "windows" {
return os.Getenv("USERNAME")
}
return os.Getenv("USER")
}
func userToken() string {
if *user == "" {
panic("userToken called with user flag empty")
}
keyDir := configDir()
baseFile := "user-" + *user + ".token"
tokenFile := filepath.Join(keyDir, baseFile)
slurp, err := ioutil.ReadFile(tokenFile)
if os.IsNotExist(err) {
log.Printf("Missing file %s for user %q. Change --user or obtain a token and place it there.",
tokenFile, *user)
}
if err != nil {
log.Fatal(err)
}
return strings.TrimSpace(string(slurp))
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"HOME\"",
"\"APPDATA\"",
"\"XDG_CONFIG_HOME\"",
"\"USERNAME\"",
"\"USER\""
] | [] | [
"USERNAME",
"APPDATA",
"HOMEPATH",
"HOMEDRIVE",
"USER",
"HOME",
"XDG_CONFIG_HOME"
] | [] | ["USERNAME", "APPDATA", "HOMEPATH", "HOMEDRIVE", "USER", "HOME", "XDG_CONFIG_HOME"] | go | 7 | 0 | |
server/server_test.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
"time"
"github.com/go-sql-driver/mysql"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
tmysql "github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/versioninfo"
"go.uber.org/zap"
)
var (
regression = true
)
func TestT(t *testing.T) {
defaultConfig := config.NewConfig()
globalConfig := config.GetGlobalConfig()
// Test for issue 22162. the global config shouldn't be changed by other pkg init function.
if !reflect.DeepEqual(defaultConfig, globalConfig) {
t.Fatalf("%#v != %#v\n", defaultConfig, globalConfig)
}
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
err := logutil.InitZapLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
if err != nil {
t.Fatal(err)
}
TestingT(t)
}
type configOverrider func(*mysql.Config)
// testServerClient config server connect parameters and provider several
// method to communicate with server and run tests
type testServerClient struct {
port uint
statusPort uint
statusScheme string
}
// newTestServerClient return a testServerClient with unique address
func newTestServerClient() *testServerClient {
return &testServerClient{
port: 0,
statusPort: 0,
statusScheme: "http",
}
}
// statusURL return the full URL of a status path
func (cli *testServerClient) statusURL(path string) string {
return fmt.Sprintf("%s://localhost:%d%s", cli.statusScheme, cli.statusPort, path)
}
// fetchStatus exec http.Get to server status port
func (cli *testServerClient) fetchStatus(path string) (*http.Response, error) {
return http.Get(cli.statusURL(path))
}
// postStatus exec http.Port to server status port
func (cli *testServerClient) postStatus(path, contentType string, body io.Reader) (*http.Response, error) {
return http.Post(cli.statusURL(path), contentType, body)
}
// formStatus post a form request to server status address
func (cli *testServerClient) formStatus(path string, data url.Values) (*http.Response, error) {
return http.PostForm(cli.statusURL(path), data)
}
// getDSN generates a DSN string for MySQL connection.
func (cli *testServerClient) getDSN(overriders ...configOverrider) string {
config := mysql.NewConfig()
config.User = "root"
config.Net = "tcp"
config.Addr = fmt.Sprintf("127.0.0.1:%d", cli.port)
config.DBName = "test"
config.Params = make(map[string]string)
for _, overrider := range overriders {
if overrider != nil {
overrider(config)
}
}
return config.FormatDSN()
}
// runTests runs tests using the default database `test`.
func (cli *testServerClient) runTests(c *C, overrider configOverrider, tests ...func(dbt *DBTest)) {
db, err := sql.Open("mysql", cli.getDSN(overrider))
c.Assert(err, IsNil, Commentf("Error connecting"))
defer func() {
err := db.Close()
c.Assert(err, IsNil)
}()
_, err = db.Exec("DROP TABLE IF EXISTS test")
c.Assert(err, IsNil)
dbt := &DBTest{c, db}
for _, test := range tests {
test(dbt)
// fixed query error
_, _ = dbt.db.Exec("DROP TABLE IF EXISTS test")
}
}
// runTestsOnNewDB runs tests using a specified database which will be created before the test and destroyed after the test.
func (cli *testServerClient) runTestsOnNewDB(c *C, overrider configOverrider, dbName string, tests ...func(dbt *DBTest)) {
dsn := cli.getDSN(overrider, func(config *mysql.Config) {
config.DBName = ""
})
db, err := sql.Open("mysql", dsn)
c.Assert(err, IsNil, Commentf("Error connecting"))
defer func() {
err := db.Close()
c.Assert(err, IsNil)
}()
_, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS `%s`;", dbName))
if err != nil {
fmt.Println(err)
}
c.Assert(err, IsNil, Commentf("Error drop database %s: %s", dbName, err))
_, err = db.Exec(fmt.Sprintf("CREATE DATABASE `%s`;", dbName))
c.Assert(err, IsNil, Commentf("Error create database %s: %s", dbName, err))
defer func() {
_, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS `%s`;", dbName))
c.Assert(err, IsNil, Commentf("Error drop database %s: %s", dbName, err))
}()
_, err = db.Exec(fmt.Sprintf("USE `%s`;", dbName))
c.Assert(err, IsNil, Commentf("Error use database %s: %s", dbName, err))
dbt := &DBTest{c, db}
for _, test := range tests {
test(dbt)
// to fix : no db selected
_, _ = dbt.db.Exec("DROP TABLE IF EXISTS test")
}
}
type DBTest struct {
*C
db *sql.DB
}
func (dbt *DBTest) fail(method, query string, err error) {
if len(query) > 300 {
query = "[query too large to print]"
}
dbt.Fatalf("Error on %s %s: %s", method, query, err.Error())
}
func (dbt *DBTest) mustPrepare(query string) *sql.Stmt {
stmt, err := dbt.db.Prepare(query)
dbt.Assert(err, IsNil, Commentf("Prepare %s", query))
return stmt
}
func (dbt *DBTest) mustExecPrepared(stmt *sql.Stmt, args ...interface{}) sql.Result {
res, err := stmt.Exec(args...)
dbt.Assert(err, IsNil, Commentf("Execute prepared with args: %s", args))
return res
}
func (dbt *DBTest) mustQueryPrepared(stmt *sql.Stmt, args ...interface{}) *sql.Rows {
rows, err := stmt.Query(args...)
dbt.Assert(err, IsNil, Commentf("Query prepared with args: %s", args))
return rows
}
func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
res, err := dbt.db.Exec(query, args...)
dbt.Assert(err, IsNil, Commentf("Exec %s", query))
return res
}
func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
rows, err := dbt.db.Query(query, args...)
dbt.Assert(err, IsNil, Commentf("Query %s", query))
return rows
}
func (dbt *DBTest) mustQueryRows(query string, args ...interface{}) {
rows := dbt.mustQuery(query, args...)
dbt.Assert(rows.Next(), IsTrue)
rows.Close()
}
func (cli *testServerClient) runTestRegression(c *C, overrider configOverrider, dbName string) {
cli.runTestsOnNewDB(c, overrider, dbName, func(dbt *DBTest) {
// Show the user
dbt.mustExec("select user()")
// Create Table
dbt.mustExec("CREATE TABLE test (val TINYINT)")
// Test for unexpected data
var out bool
rows := dbt.mustQuery("SELECT * FROM test")
dbt.Assert(rows.Next(), IsFalse, Commentf("unexpected data in empty table"))
// Create Data
res := dbt.mustExec("INSERT INTO test VALUES (1)")
// res := dbt.mustExec("INSERT INTO test VALUES (?)", 1)
count, err := res.RowsAffected()
dbt.Assert(err, IsNil)
dbt.Check(count, Equals, int64(1))
id, err := res.LastInsertId()
dbt.Assert(err, IsNil)
dbt.Check(id, Equals, int64(0))
// Read
rows = dbt.mustQuery("SELECT val FROM test")
if rows.Next() {
err = rows.Scan(&out)
c.Assert(err, IsNil)
dbt.Check(out, IsTrue)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
} else {
dbt.Error("no data")
}
rows.Close()
// Update
res = dbt.mustExec("UPDATE test SET val = 0 WHERE val = ?", 1)
count, err = res.RowsAffected()
dbt.Assert(err, IsNil)
dbt.Check(count, Equals, int64(1))
// Check Update
rows = dbt.mustQuery("SELECT val FROM test")
if rows.Next() {
err = rows.Scan(&out)
c.Assert(err, IsNil)
dbt.Check(out, IsFalse)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
} else {
dbt.Error("no data")
}
rows.Close()
// Delete
res = dbt.mustExec("DELETE FROM test WHERE val = 0")
// res = dbt.mustExec("DELETE FROM test WHERE val = ?", 0)
count, err = res.RowsAffected()
dbt.Assert(err, IsNil)
dbt.Check(count, Equals, int64(1))
// Check for unexpected rows
res = dbt.mustExec("DELETE FROM test")
count, err = res.RowsAffected()
dbt.Assert(err, IsNil)
dbt.Check(count, Equals, int64(0))
dbt.mustQueryRows("SELECT 1")
var b = make([]byte, 0)
if err := dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
dbt.Fatal(err)
}
if b == nil {
dbt.Error("nil echo from non-nil input")
}
})
}
func (cli *testServerClient) runTestPrepareResultFieldType(t *C) {
var param int64 = 83
cli.runTests(t, nil, func(dbt *DBTest) {
stmt, err := dbt.db.Prepare(`SELECT ?`)
if err != nil {
dbt.Fatal(err)
}
defer stmt.Close()
row := stmt.QueryRow(param)
var result int64
err = row.Scan(&result)
if err != nil {
dbt.Fatal(err)
}
if result != param {
dbt.Fatal("Unexpected result value")
}
})
}
func (cli *testServerClient) runTestSpecialType(t *C) {
cli.runTestsOnNewDB(t, nil, "SpecialType", func(dbt *DBTest) {
dbt.mustExec("create table test (a decimal(10, 5), b datetime, c time, d bit(8))")
dbt.mustExec("insert test values (1.4, '2012-12-21 12:12:12', '4:23:34', b'1000')")
rows := dbt.mustQuery("select * from test where a > ?", 0)
t.Assert(rows.Next(), IsTrue)
var outA float64
var outB, outC string
var outD []byte
err := rows.Scan(&outA, &outB, &outC, &outD)
t.Assert(err, IsNil)
t.Assert(outA, Equals, 1.4)
t.Assert(outB, Equals, "2012-12-21 12:12:12")
t.Assert(outC, Equals, "04:23:34")
t.Assert(outD, BytesEquals, []byte{8})
})
}
func (cli *testServerClient) runTestClientWithCollation(t *C) {
cli.runTests(t, func(config *mysql.Config) {
config.Collation = "utf8mb4_general_ci"
}, func(dbt *DBTest) {
var name, charset, collation string
// check session variable collation_connection
rows := dbt.mustQuery("show variables like 'collation_connection'")
t.Assert(rows.Next(), IsTrue)
err := rows.Scan(&name, &collation)
t.Assert(err, IsNil)
t.Assert(collation, Equals, "utf8mb4_general_ci")
// check session variable character_set_client
rows = dbt.mustQuery("show variables like 'character_set_client'")
t.Assert(rows.Next(), IsTrue)
err = rows.Scan(&name, &charset)
t.Assert(err, IsNil)
t.Assert(charset, Equals, "utf8mb4")
// check session variable character_set_results
rows = dbt.mustQuery("show variables like 'character_set_results'")
t.Assert(rows.Next(), IsTrue)
err = rows.Scan(&name, &charset)
t.Assert(err, IsNil)
t.Assert(charset, Equals, "utf8mb4")
// check session variable character_set_connection
rows = dbt.mustQuery("show variables like 'character_set_connection'")
t.Assert(rows.Next(), IsTrue)
err = rows.Scan(&name, &charset)
t.Assert(err, IsNil)
t.Assert(charset, Equals, "utf8mb4")
})
}
func (cli *testServerClient) runTestPreparedString(t *C) {
cli.runTestsOnNewDB(t, nil, "PreparedString", func(dbt *DBTest) {
dbt.mustExec("create table test (a char(10), b char(10))")
dbt.mustExec("insert test values (?, ?)", "abcdeabcde", "abcde")
rows := dbt.mustQuery("select * from test where 1 = ?", 1)
t.Assert(rows.Next(), IsTrue)
var outA, outB string
err := rows.Scan(&outA, &outB)
t.Assert(err, IsNil)
t.Assert(outA, Equals, "abcdeabcde")
t.Assert(outB, Equals, "abcde")
})
}
// runTestPreparedTimestamp does not really cover binary timestamp format, because MySQL driver in golang
// does not use this format. MySQL driver in golang will convert the timestamp to a string.
// This case guarantees it could work.
func (cli *testServerClient) runTestPreparedTimestamp(t *C) {
cli.runTestsOnNewDB(t, nil, "prepared_timestamp", func(dbt *DBTest) {
dbt.mustExec("create table test (a timestamp, b time)")
dbt.mustExec("set time_zone='+00:00'")
insertStmt := dbt.mustPrepare("insert test values (?, ?)")
defer insertStmt.Close()
vts := time.Unix(1, 1)
vt := time.Unix(-1, 1)
dbt.mustExecPrepared(insertStmt, vts, vt)
selectStmt := dbt.mustPrepare("select * from test where a = ? and b = ?")
defer selectStmt.Close()
rows := dbt.mustQueryPrepared(selectStmt, vts, vt)
t.Assert(rows.Next(), IsTrue)
var outA, outB string
err := rows.Scan(&outA, &outB)
t.Assert(err, IsNil)
t.Assert(outA, Equals, "1970-01-01 00:00:01")
t.Assert(outB, Equals, "23:59:59")
})
}
func (cli *testServerClient) runTestLoadDataWithSelectIntoOutfile(c *C, server *Server) {
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "SelectIntoOutfile", func(dbt *DBTest) {
dbt.mustExec("create table t (i int, r real, d decimal(10, 5), s varchar(100), dt datetime, ts timestamp, j json)")
dbt.mustExec("insert into t values (1, 1.1, 0.1, 'a', '2000-01-01', '01:01:01', '[1]')")
dbt.mustExec("insert into t values (2, 2.2, 0.2, 'b', '2000-02-02', '02:02:02', '[1,2]')")
dbt.mustExec("insert into t values (null, null, null, null, '2000-03-03', '03:03:03', '[1,2,3]')")
dbt.mustExec("insert into t values (4, 4.4, 0.4, 'd', null, null, null)")
outfile := filepath.Join(os.TempDir(), fmt.Sprintf("select_into_outfile_%v_%d.csv", time.Now().UnixNano(), rand.Int()))
// On windows use fmt.Sprintf("%q") to escape \ for SQL,
// outfile may be 'C:\Users\genius\AppData\Local\Temp\select_into_outfile_1582732846769492000_8074605509026837941.csv'
// Without quote, after SQL escape it would become:
// 'C:UsersgeniusAppDataLocalTempselect_into_outfile_1582732846769492000_8074605509026837941.csv'
dbt.mustExec(fmt.Sprintf("select * from t into outfile %q", outfile))
defer func() {
c.Assert(os.Remove(outfile), IsNil)
}()
dbt.mustExec("create table t1 (i int, r real, d decimal(10, 5), s varchar(100), dt datetime, ts timestamp, j json)")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t1", outfile))
fetchResults := func(table string) [][]interface{} {
var res [][]interface{}
row := dbt.mustQuery("select * from " + table + " order by i")
for row.Next() {
r := make([]interface{}, 7)
c.Assert(row.Scan(&r[0], &r[1], &r[2], &r[3], &r[4], &r[5], &r[6]), IsNil)
res = append(res, r)
}
c.Assert(row.Close(), IsNil)
return res
}
res := fetchResults("t")
res1 := fetchResults("t1")
c.Assert(len(res), Equals, len(res1))
for i := range res {
for j := range res[i] {
// using Sprintf to avoid some uncomparable types
c.Assert(fmt.Sprintf("%v", res[i][j]), Equals, fmt.Sprintf("%v", res1[i][j]))
}
}
})
}
func (cli *testServerClient) runTestLoadDataForSlowLog(c *C, server *Server) {
path := "/tmp/load_data_test.csv"
fp, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
defer func() {
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
}()
_, err = fp.WriteString(
"1 1\n" +
"2 2\n" +
"3 3\n" +
"4 4\n" +
"5 5\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "load_data_slow_query", func(dbt *DBTest) {
dbt.mustExec("create table t_slow (a int key, b int)")
defer func() {
dbt.mustExec("set tidb_slow_log_threshold=300;")
dbt.mustExec("set @@global.tidb_enable_stmt_summary=0")
}()
dbt.mustExec("set tidb_slow_log_threshold=0;")
dbt.mustExec("set @@global.tidb_enable_stmt_summary=1")
query := fmt.Sprintf("load data local infile %q into table t_slow", path)
dbt.mustExec(query)
dbt.mustExec("insert ignore into t_slow values (1,1);")
checkPlan := func(rows *sql.Rows, expectPlan string) {
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
var plan sql.NullString
err = rows.Scan(&plan)
dbt.Check(err, IsNil)
planStr := strings.ReplaceAll(plan.String, "\t", " ")
planStr = strings.ReplaceAll(planStr, "\n", " ")
c.Assert(planStr, Matches, expectPlan)
}
// Test for record slow log for load data statement.
rows := dbt.mustQuery(fmt.Sprintf("select plan from information_schema.slow_query where query like 'load data local infile %% into table t_slow;' order by time desc limit 1"))
expectedPlan := ".*LoadData.* time.* loops.* prepare.* check_insert.* mem_insert_time:.* prefetch.* rpc.* commit_txn.*"
checkPlan(rows, expectedPlan)
// Test for record statements_summary for load data statement.
rows = dbt.mustQuery(fmt.Sprintf("select plan from information_schema.STATEMENTS_SUMMARY where QUERY_SAMPLE_TEXT like 'load data local infile %%' limit 1"))
checkPlan(rows, expectedPlan)
// Test log normal statement after executing load date.
rows = dbt.mustQuery(fmt.Sprintf("select plan from information_schema.slow_query where query = 'insert ignore into t_slow values (1,1);' order by time desc limit 1"))
expectedPlan = ".*Insert.* time.* loops.* prepare.* check_insert.* mem_insert_time:.* prefetch.* rpc.*"
checkPlan(rows, expectedPlan)
})
}
func (cli *testServerClient) prepareLoadDataFile(c *C, path string, rows ...string) {
fp, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
defer func() {
err = fp.Close()
c.Assert(err, IsNil)
}()
for _, row := range rows {
fields := strings.Split(row, " ")
_, err = fp.WriteString(strings.Join(fields, "\t"))
_, err = fp.WriteString("\n")
}
c.Assert(err, IsNil)
}
func (cli *testServerClient) runTestLoadDataAutoRandom(c *C) {
path := "/tmp/load_data_txn_error.csv"
fp, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
defer func() {
_ = os.Remove(path)
}()
cksum1 := 0
cksum2 := 0
for i := 0; i < 50000; i++ {
n1 := rand.Intn(1000)
n2 := rand.Intn(1000)
str1 := strconv.Itoa(n1)
str2 := strconv.Itoa(n2)
row := str1 + "\t" + str2
_, err := fp.WriteString(row)
c.Assert(err, IsNil)
_, err = fp.WriteString("\n")
c.Assert(err, IsNil)
if i == 0 {
cksum1 = n1
cksum2 = n2
} else {
cksum1 = cksum1 ^ n1
cksum2 = cksum2 ^ n2
}
}
err = fp.Close()
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "load_data_batch_dml", func(dbt *DBTest) {
// Set batch size, and check if load data got a invalid txn error.
dbt.mustExec("set @@session.tidb_dml_batch_size = 128")
dbt.mustExec("drop table if exists t")
dbt.mustExec("create table t(c1 bigint auto_random primary key, c2 bigint, c3 bigint)")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t (c2, c3)", path))
rows := dbt.mustQuery("select count(*) from t")
cli.checkRows(c, rows, "50000")
rows = dbt.mustQuery("select bit_xor(c2), bit_xor(c3) from t")
res := strconv.Itoa(cksum1)
res = res + " "
res = res + strconv.Itoa(cksum2)
cli.checkRows(c, rows, res)
})
}
func (cli *testServerClient) runTestLoadDataAutoRandomWithSpecialTerm(c *C) {
path := "/tmp/load_data_txn_error_term.csv"
fp, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
defer func() {
_ = os.Remove(path)
}()
cksum1 := 0
cksum2 := 0
for i := 0; i < 50000; i++ {
n1 := rand.Intn(1000)
n2 := rand.Intn(1000)
str1 := strconv.Itoa(n1)
str2 := strconv.Itoa(n2)
row := "'" + str1 + "','" + str2 + "'"
_, err := fp.WriteString(row)
c.Assert(err, IsNil)
if i != 49999 {
_, err = fp.WriteString("|")
}
c.Assert(err, IsNil)
if i == 0 {
cksum1 = n1
cksum2 = n2
} else {
cksum1 = cksum1 ^ n1
cksum2 = cksum2 ^ n2
}
}
err = fp.Close()
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params = map[string]string{"sql_mode": "''"}
}, "load_data_batch_dml", func(dbt *DBTest) {
// Set batch size, and check if load data got a invalid txn error.
dbt.mustExec("set @@session.tidb_dml_batch_size = 128")
dbt.mustExec("drop table if exists t1")
dbt.mustExec("create table t1(c1 bigint auto_random primary key, c2 bigint, c3 bigint)")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t1 fields terminated by ',' enclosed by '\\'' lines terminated by '|' (c2, c3)", path))
rows := dbt.mustQuery("select count(*) from t1")
cli.checkRows(c, rows, "50000")
rows = dbt.mustQuery("select bit_xor(c2), bit_xor(c3) from t1")
res := strconv.Itoa(cksum1)
res = res + " "
res = res + strconv.Itoa(cksum2)
cli.checkRows(c, rows, res)
})
}
func (cli *testServerClient) runTestLoadDataForListPartition(c *C) {
path := "/tmp/load_data_list_partition.csv"
defer func() {
_ = os.Remove(path)
}()
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "load_data_list_partition", func(dbt *DBTest) {
dbt.mustExec("set @@session.tidb_enable_list_partition = ON")
dbt.mustExec(`create table t (id int, name varchar(10),
unique index idx (id)) partition by list (id) (
partition p0 values in (3,5,6,9,17),
partition p1 values in (1,2,10,11,19,20),
partition p2 values in (4,12,13,14,18),
partition p3 values in (7,8,15,16,null)
);`)
// Test load data into 1 partition.
cli.prepareLoadDataFile(c, path, "1 a", "2 b")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows := dbt.mustQuery("select * from t partition(p1) order by id")
cli.checkRows(c, rows, "1 a", "2 b")
// Test load data into multi-partitions.
dbt.mustExec("delete from t")
cli.prepareLoadDataFile(c, path, "1 a", "3 c", "4 e")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "1 a", "3 c", "4 e")
// Test load data meet duplicate error.
cli.prepareLoadDataFile(c, path, "1 x", "2 b", "2 x", "7 a")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows,
"Warning 1062 Duplicate entry '1' for key 'idx'",
"Warning 1062 Duplicate entry '2' for key 'idx'")
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "1 a", "2 b", "3 c", "4 e", "7 a")
// Test load data meet no partition warning.
cli.prepareLoadDataFile(c, path, "5 a", "100 x")
_, err := dbt.db.Exec(fmt.Sprintf("load data local infile %q into table t", path))
c.Assert(err, IsNil)
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows, "Warning 1526 Table has no partition for value 100")
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "1 a", "2 b", "3 c", "4 e", "5 a", "7 a")
})
}
func (cli *testServerClient) runTestLoadDataForListPartition2(c *C) {
path := "/tmp/load_data_list_partition.csv"
defer func() {
_ = os.Remove(path)
}()
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "load_data_list_partition", func(dbt *DBTest) {
dbt.mustExec("set @@session.tidb_enable_list_partition = ON")
dbt.mustExec(`create table t (id int, name varchar(10),b int generated always as (length(name)+1) virtual,
unique index idx (id,b)) partition by list (id*2 + b*b + b*b - b*b*2 - abs(id)) (
partition p0 values in (3,5,6,9,17),
partition p1 values in (1,2,10,11,19,20),
partition p2 values in (4,12,13,14,18),
partition p3 values in (7,8,15,16,null)
);`)
// Test load data into 1 partition.
cli.prepareLoadDataFile(c, path, "1 a", "2 b")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t (id,name)", path))
rows := dbt.mustQuery("select id,name from t partition(p1) order by id")
cli.checkRows(c, rows, "1 a", "2 b")
// Test load data into multi-partitions.
dbt.mustExec("delete from t")
cli.prepareLoadDataFile(c, path, "1 a", "3 c", "4 e")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t (id,name)", path))
rows = dbt.mustQuery("select id,name from t order by id")
cli.checkRows(c, rows, "1 a", "3 c", "4 e")
// Test load data meet duplicate error.
cli.prepareLoadDataFile(c, path, "1 x", "2 b", "2 x", "7 a")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t (id,name)", path))
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows,
"Warning 1062 Duplicate entry '1-2' for key 'idx'",
"Warning 1062 Duplicate entry '2-2' for key 'idx'")
rows = dbt.mustQuery("select id,name from t order by id")
cli.checkRows(c, rows, "1 a", "2 b", "3 c", "4 e", "7 a")
// Test load data meet no partition warning.
cli.prepareLoadDataFile(c, path, "5 a", "100 x")
_, err := dbt.db.Exec(fmt.Sprintf("load data local infile %q into table t (id,name)", path))
c.Assert(err, IsNil)
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows, "Warning 1526 Table has no partition for value 100")
rows = dbt.mustQuery("select id,name from t order by id")
cli.checkRows(c, rows, "1 a", "2 b", "3 c", "4 e", "5 a", "7 a")
})
}
func (cli *testServerClient) runTestLoadDataForListColumnPartition(c *C) {
path := "/tmp/load_data_list_partition.csv"
defer func() {
_ = os.Remove(path)
}()
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "load_data_list_partition", func(dbt *DBTest) {
dbt.mustExec("set @@session.tidb_enable_list_partition = ON")
dbt.mustExec(`create table t (id int, name varchar(10),
unique index idx (id)) partition by list columns (id) (
partition p0 values in (3,5,6,9,17),
partition p1 values in (1,2,10,11,19,20),
partition p2 values in (4,12,13,14,18),
partition p3 values in (7,8,15,16,null)
);`)
// Test load data into 1 partition.
cli.prepareLoadDataFile(c, path, "1 a", "2 b")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows := dbt.mustQuery("select * from t partition(p1) order by id")
cli.checkRows(c, rows, "1 a", "2 b")
// Test load data into multi-partitions.
dbt.mustExec("delete from t")
cli.prepareLoadDataFile(c, path, "1 a", "3 c", "4 e")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "1 a", "3 c", "4 e")
// Test load data meet duplicate error.
cli.prepareLoadDataFile(c, path, "1 x", "2 b", "2 x", "7 a")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows,
"Warning 1062 Duplicate entry '1' for key 'idx'",
"Warning 1062 Duplicate entry '2' for key 'idx'")
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "1 a", "2 b", "3 c", "4 e", "7 a")
// Test load data meet no partition warning.
cli.prepareLoadDataFile(c, path, "5 a", "100 x")
_, err := dbt.db.Exec(fmt.Sprintf("load data local infile %q into table t", path))
c.Assert(err, IsNil)
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows, "Warning 1526 Table has no partition for value from column_list")
rows = dbt.mustQuery("select id,name from t order by id")
cli.checkRows(c, rows, "1 a", "2 b", "3 c", "4 e", "5 a", "7 a")
})
}
func (cli *testServerClient) runTestLoadDataForListColumnPartition2(c *C) {
path := "/tmp/load_data_list_partition.csv"
defer func() {
_ = os.Remove(path)
}()
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "load_data_list_partition", func(dbt *DBTest) {
dbt.mustExec("set @@session.tidb_enable_list_partition = ON")
dbt.mustExec(`create table t (location varchar(10), id int, a int, unique index idx (location,id)) partition by list columns (location,id) (
partition p_west values in (('w', 1),('w', 2),('w', 3),('w', 4)),
partition p_east values in (('e', 5),('e', 6),('e', 7),('e', 8)),
partition p_north values in (('n', 9),('n',10),('n',11),('n',12)),
partition p_south values in (('s',13),('s',14),('s',15),('s',16))
);`)
// Test load data into 1 partition.
cli.prepareLoadDataFile(c, path, "w 1 1", "w 2 2")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows := dbt.mustQuery("select * from t partition(p_west) order by id")
cli.checkRows(c, rows, "w 1 1", "w 2 2")
// Test load data into multi-partitions.
dbt.mustExec("delete from t")
cli.prepareLoadDataFile(c, path, "w 1 1", "e 5 5", "n 9 9")
dbt.mustExec(fmt.Sprintf("load data local infile %q into table t", path))
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "w 1 1", "e 5 5", "n 9 9")
// Test load data meet duplicate error.
cli.prepareLoadDataFile(c, path, "w 1 2", "w 2 2")
_, err := dbt.db.Exec(fmt.Sprintf("load data local infile %q into table t", path))
c.Assert(err, IsNil)
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows, "Warning 1062 Duplicate entry 'w-1' for key 'idx'")
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "w 1 1", "w 2 2", "e 5 5", "n 9 9")
// Test load data meet no partition warning.
cli.prepareLoadDataFile(c, path, "w 3 3", "w 5 5", "e 8 8")
_, err = dbt.db.Exec(fmt.Sprintf("load data local infile %q into table t", path))
c.Assert(err, IsNil)
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows, "Warning 1526 Table has no partition for value from column_list")
cli.prepareLoadDataFile(c, path, "x 1 1", "w 1 1")
_, err = dbt.db.Exec(fmt.Sprintf("load data local infile %q into table t", path))
c.Assert(err, IsNil)
rows = dbt.mustQuery("show warnings")
cli.checkRows(c, rows,
"Warning 1526 Table has no partition for value from column_list",
"Warning 1062 Duplicate entry 'w-1' for key 'idx'")
rows = dbt.mustQuery("select * from t order by id")
cli.checkRows(c, rows, "w 1 1", "w 2 2", "w 3 3", "e 5 5", "e 8 8", "n 9 9")
})
}
func (cli *testServerClient) checkRows(c *C, rows *sql.Rows, expectedRows ...string) {
buf := bytes.NewBuffer(nil)
result := make([]string, 0, 2)
for rows.Next() {
cols, err := rows.Columns()
c.Assert(err, IsNil)
rawResult := make([][]byte, len(cols))
dest := make([]interface{}, len(cols))
for i := range rawResult {
dest[i] = &rawResult[i]
}
err = rows.Scan(dest...)
c.Assert(err, IsNil)
buf.Reset()
for i, raw := range rawResult {
if i > 0 {
buf.WriteString(" ")
}
if raw == nil {
buf.WriteString("<nil>")
} else {
buf.WriteString(string(raw))
}
}
result = append(result, buf.String())
}
c.Assert(strings.Join(result, "\n"), Equals, strings.Join(expectedRows, "\n"))
}
func (cli *testServerClient) runTestLoadData(c *C, server *Server) {
// create a file and write data.
path := "/tmp/load_data_test.csv"
fp, err := os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
defer func() {
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
}()
_, err = fp.WriteString("\n" +
"xxx row1_col1 - row1_col2 1abc\n" +
"xxx row2_col1 - row2_col2 \n" +
"xxxy row3_col1 - row3_col2 \n" +
"xxx row4_col1 - 900\n" +
"xxx row5_col1 - row5_col3")
c.Assert(err, IsNil)
originalTxnTotalSizeLimit := kv.TxnTotalSizeLimit
// If the MemBuffer can't be committed once in each batch, it will return an error like "transaction is too large".
kv.TxnTotalSizeLimit = 10240
defer func() { kv.TxnTotalSizeLimit = originalTxnTotalSizeLimit }()
// support ClientLocalFiles capability
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("set @@tidb_dml_batch_size = 3")
dbt.mustExec("create table test (a varchar(255), b varchar(255) default 'default value', c int not null auto_increment, primary key(c))")
dbt.mustExec("create view v1 as select 1")
dbt.mustExec("create sequence s1")
// can't insert into views (in TiDB) or sequences. issue #20880
_, err = dbt.db.Exec("load data local infile '/tmp/load_data_test.csv' into table v1")
dbt.Assert(err, NotNil)
dbt.Assert(err.Error(), Equals, "Error 1105: can only load data into base tables")
_, err = dbt.db.Exec("load data local infile '/tmp/load_data_test.csv' into table s1")
dbt.Assert(err, NotNil)
dbt.Assert(err.Error(), Equals, "Error 1105: can only load data into base tables")
rs, err1 := dbt.db.Exec("load data local infile '/tmp/load_data_test.csv' into table test")
dbt.Assert(err1, IsNil)
lastID, err1 := rs.LastInsertId()
dbt.Assert(err1, IsNil)
dbt.Assert(lastID, Equals, int64(1))
affectedRows, err1 := rs.RowsAffected()
dbt.Assert(err1, IsNil)
dbt.Assert(affectedRows, Equals, int64(5))
var (
a string
b string
bb sql.NullString
cc int
)
rows := dbt.mustQuery("select * from test")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &bb, &cc)
dbt.Check(err, IsNil)
dbt.Check(a, DeepEquals, "")
dbt.Check(bb.String, DeepEquals, "")
dbt.Check(cc, DeepEquals, 1)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "xxx row2_col1")
dbt.Check(b, DeepEquals, "- row2_col2")
dbt.Check(cc, DeepEquals, 2)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "xxxy row3_col1")
dbt.Check(b, DeepEquals, "- row3_col2")
dbt.Check(cc, DeepEquals, 3)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "xxx row4_col1")
dbt.Check(b, DeepEquals, "- ")
dbt.Check(cc, DeepEquals, 4)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "xxx row5_col1")
dbt.Check(b, DeepEquals, "- ")
dbt.Check(cc, DeepEquals, 5)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
rows.Close()
// specify faileds and lines
dbt.mustExec("delete from test")
dbt.mustExec("set @@tidb_dml_batch_size = 3")
rs, err = dbt.db.Exec("load data local infile '/tmp/load_data_test.csv' into table test fields terminated by '\t- ' lines starting by 'xxx ' terminated by '\n'")
dbt.Assert(err, IsNil)
lastID, err = rs.LastInsertId()
dbt.Assert(err, IsNil)
dbt.Assert(lastID, Equals, int64(6))
affectedRows, err = rs.RowsAffected()
dbt.Assert(err, IsNil)
dbt.Assert(affectedRows, Equals, int64(4))
rows = dbt.mustQuery("select * from test")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "row1_col1")
dbt.Check(b, DeepEquals, "row1_col2\t1abc")
dbt.Check(cc, DeepEquals, 6)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "row2_col1")
dbt.Check(b, DeepEquals, "row2_col2\t")
dbt.Check(cc, DeepEquals, 7)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "row4_col1")
dbt.Check(b, DeepEquals, "\t\t900")
dbt.Check(cc, DeepEquals, 8)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &cc)
c.Assert(err, IsNil)
dbt.Check(a, DeepEquals, "row5_col1")
dbt.Check(b, DeepEquals, "\trow5_col3")
dbt.Check(cc, DeepEquals, 9)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
// infile size more than a packet size(16K)
dbt.mustExec("delete from test")
_, err = fp.WriteString("\n")
dbt.Assert(err, IsNil)
for i := 6; i <= 800; i++ {
_, err = fp.WriteString(fmt.Sprintf("xxx row%d_col1 - row%d_col2\n", i, i))
dbt.Assert(err, IsNil)
}
dbt.mustExec("set @@tidb_dml_batch_size = 3")
rs, err = dbt.db.Exec("load data local infile '/tmp/load_data_test.csv' into table test fields terminated by '\t- ' lines starting by 'xxx ' terminated by '\n'")
dbt.Assert(err, IsNil)
lastID, err = rs.LastInsertId()
dbt.Assert(err, IsNil)
dbt.Assert(lastID, Equals, int64(10))
affectedRows, err = rs.RowsAffected()
dbt.Assert(err, IsNil)
dbt.Assert(affectedRows, Equals, int64(799))
rows = dbt.mustQuery("select * from test")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
// don't support lines terminated is ""
dbt.mustExec("set @@tidb_dml_batch_size = 3")
_, err = dbt.db.Exec("load data local infile '/tmp/load_data_test.csv' into table test lines terminated by ''")
dbt.Assert(err, NotNil)
// infile doesn't exist
dbt.mustExec("set @@tidb_dml_batch_size = 3")
_, err = dbt.db.Exec("load data local infile '/tmp/nonexistence.csv' into table test")
dbt.Assert(err, NotNil)
})
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test mixed unenclosed and enclosed fields.
_, err = fp.WriteString(
"\"abc\",123\n" +
"def,456,\n" +
"hig,\"789\",")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("create table test (str varchar(10) default null, i int default null)")
dbt.mustExec("set @@tidb_dml_batch_size = 3")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table test FIELDS TERMINATED BY ',' enclosed by '"'`)
dbt.Assert(err1, IsNil)
var (
str string
id int
)
rows := dbt.mustQuery("select * from test")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&str, &id)
dbt.Check(err, IsNil)
dbt.Check(str, DeepEquals, "abc")
dbt.Check(id, DeepEquals, 123)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&str, &id)
c.Assert(err, IsNil)
dbt.Check(str, DeepEquals, "def")
dbt.Check(id, DeepEquals, 456)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&str, &id)
c.Assert(err, IsNil)
dbt.Check(str, DeepEquals, "hig")
dbt.Check(id, DeepEquals, 789)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
dbt.mustExec("delete from test")
})
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test irregular csv file.
_, err = fp.WriteString(
`,\N,NULL,,` + "\n" +
"00,0,000000,,\n" +
`2003-03-03, 20030303,030303,\N` + "\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("create table test (a date, b date, c date not null, d date)")
dbt.mustExec("set @@tidb_dml_batch_size = 3")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table test FIELDS TERMINATED BY ','`)
dbt.Assert(err1, IsNil)
var (
a sql.NullString
b sql.NullString
d sql.NullString
c sql.NullString
)
rows := dbt.mustQuery("select * from test")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c, &d)
dbt.Check(err, IsNil)
dbt.Check(a.String, Equals, "0000-00-00")
dbt.Check(b.String, Equals, "")
dbt.Check(c.String, Equals, "0000-00-00")
dbt.Check(d.String, Equals, "0000-00-00")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c, &d)
dbt.Check(err, IsNil)
dbt.Check(a.String, Equals, "0000-00-00")
dbt.Check(b.String, Equals, "0000-00-00")
dbt.Check(c.String, Equals, "0000-00-00")
dbt.Check(d.String, Equals, "0000-00-00")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c, &d)
dbt.Check(err, IsNil)
dbt.Check(a.String, Equals, "2003-03-03")
dbt.Check(b.String, Equals, "2003-03-03")
dbt.Check(c.String, Equals, "2003-03-03")
dbt.Check(d.String, Equals, "")
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
dbt.mustExec("delete from test")
})
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test double enclosed.
_, err = fp.WriteString(
`"field1","field2"` + "\n" +
`"a""b","cd""ef"` + "\n" +
`"a"b",c"d"e` + "\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("create table test (a varchar(20), b varchar(20))")
dbt.mustExec("set @@tidb_dml_batch_size = 3")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table test FIELDS TERMINATED BY ',' enclosed by '"'`)
dbt.Assert(err1, IsNil)
var (
a sql.NullString
b sql.NullString
)
rows := dbt.mustQuery("select * from test")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b)
dbt.Check(err, IsNil)
dbt.Check(a.String, Equals, "field1")
dbt.Check(b.String, Equals, "field2")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b)
c.Assert(err, IsNil)
dbt.Check(a.String, Equals, `a"b`)
dbt.Check(b.String, Equals, `cd"ef`)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b)
c.Assert(err, IsNil)
dbt.Check(a.String, Equals, `a"b`)
dbt.Check(b.String, Equals, `c"d"e`)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
dbt.mustExec("delete from test")
})
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test OPTIONALLY
_, err = fp.WriteString(
`"a,b,c` + "\n" +
`"1",2,"3"` + "\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("create table test (id INT NOT NULL PRIMARY KEY, b INT, c varchar(10))")
dbt.mustExec("set @@tidb_dml_batch_size = 3")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table test FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '\"' IGNORE 1 LINES`)
dbt.Assert(err1, IsNil)
var (
a int
b int
c sql.NullString
)
rows := dbt.mustQuery("select * from test")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 1)
dbt.Check(b, Equals, 2)
dbt.Check(c.String, Equals, "3")
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
dbt.mustExec("delete from test")
})
// unsupport ClientLocalFiles capability
server.capability ^= tmysql.ClientLocalFiles
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("create table test (a varchar(255), b varchar(255) default 'default value', c int not null auto_increment, primary key(c))")
dbt.mustExec("set @@tidb_dml_batch_size = 3")
_, err = dbt.db.Exec("load data local infile '/tmp/load_data_test.csv' into table test")
dbt.Assert(err, NotNil)
checkErrorCode(c, err, errno.ErrNotAllowedCommand)
})
server.capability |= tmysql.ClientLocalFiles
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test OPTIONALLY
_, err = fp.WriteString(
`1,2` + "\n" +
`3,4` + "\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("drop table if exists pn")
dbt.mustExec("create table pn (c1 int, c2 int)")
dbt.mustExec("set @@tidb_dml_batch_size = 1")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table pn FIELDS TERMINATED BY ','`)
dbt.Assert(err1, IsNil)
var (
a int
b int
)
rows := dbt.mustQuery("select * from pn")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 1)
dbt.Check(b, Equals, 2)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 3)
dbt.Check(b, Equals, 4)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
// fail error processing test
dbt.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/commitOneTaskErr", "return"), IsNil)
_, err1 = dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table pn FIELDS TERMINATED BY ','`)
mysqlErr, ok := err1.(*mysql.MySQLError)
dbt.Assert(ok, IsTrue)
dbt.Assert(mysqlErr.Message, Equals, "mock commit one task error")
dbt.Assert(failpoint.Disable("github.com/pingcap/tidb/executor/commitOneTaskErr"), IsNil)
dbt.mustExec("drop table if exists pn")
})
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test Column List Specification
_, err = fp.WriteString(
`1,2` + "\n" +
`3,4` + "\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("drop table if exists pn")
dbt.mustExec("create table pn (c1 int, c2 int)")
dbt.mustExec("set @@tidb_dml_batch_size = 1")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table pn FIELDS TERMINATED BY ',' (c1, c2)`)
dbt.Assert(err1, IsNil)
var (
a int
b int
)
rows := dbt.mustQuery("select * from pn")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 1)
dbt.Check(b, Equals, 2)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 3)
dbt.Check(b, Equals, 4)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
dbt.mustExec("drop table if exists pn")
})
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test Column List Specification
_, err = fp.WriteString(
`1,2,3` + "\n" +
`4,5,6` + "\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("drop table if exists pn")
dbt.mustExec("create table pn (c1 int, c2 int, c3 int)")
dbt.mustExec("set @@tidb_dml_batch_size = 1")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table pn FIELDS TERMINATED BY ',' (c1, @dummy)`)
dbt.Assert(err1, IsNil)
var (
a int
b sql.NullString
c sql.NullString
)
rows := dbt.mustQuery("select * from pn")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 1)
dbt.Check(b.String, Equals, "")
dbt.Check(c.String, Equals, "")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 4)
dbt.Check(b.String, Equals, "")
dbt.Check(c.String, Equals, "")
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
dbt.mustExec("drop table if exists pn")
})
err = fp.Close()
c.Assert(err, IsNil)
err = os.Remove(path)
c.Assert(err, IsNil)
fp, err = os.Create(path)
c.Assert(err, IsNil)
c.Assert(fp, NotNil)
// Test Input Preprocessing
_, err = fp.WriteString(
`1,2,3` + "\n" +
`4,5,6` + "\n")
c.Assert(err, IsNil)
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
}, "LoadData", func(dbt *DBTest) {
dbt.mustExec("drop table if exists pn")
dbt.mustExec("create table pn (c1 int, c2 int, c3 int)")
dbt.mustExec("set @@tidb_dml_batch_size = 1")
_, err1 := dbt.db.Exec(`load data local infile '/tmp/load_data_test.csv' into table pn FIELDS TERMINATED BY ',' (c1, @val1, @val2) SET c3 = @val2 * 100, c2 = CAST(@val1 AS UNSIGNED)`)
dbt.Assert(err1, IsNil)
var (
a int
b int
c int
)
rows := dbt.mustQuery("select * from pn")
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 1)
dbt.Check(b, Equals, 2)
dbt.Check(c, Equals, 300)
dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data"))
err = rows.Scan(&a, &b, &c)
dbt.Check(err, IsNil)
dbt.Check(a, Equals, 4)
dbt.Check(b, Equals, 5)
dbt.Check(c, Equals, 600)
dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data"))
dbt.mustExec("drop table if exists pn")
})
}
func (cli *testServerClient) runTestConcurrentUpdate(c *C) {
dbName := "Concurrent"
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.Params["sql_mode"] = "''"
}, dbName, func(dbt *DBTest) {
dbt.mustExec("drop table if exists test2")
dbt.mustExec("create table test2 (a int, b int)")
dbt.mustExec("insert test2 values (1, 1)")
dbt.mustExec("set @@tidb_disable_txn_auto_retry = 0")
txn1, err := dbt.db.Begin()
c.Assert(err, IsNil)
_, err = txn1.Exec(fmt.Sprintf("USE `%s`;", dbName))
c.Assert(err, IsNil)
txn2, err := dbt.db.Begin()
c.Assert(err, IsNil)
_, err = txn2.Exec(fmt.Sprintf("USE `%s`;", dbName))
c.Assert(err, IsNil)
_, err = txn2.Exec("update test2 set a = a + 1 where b = 1")
c.Assert(err, IsNil)
err = txn2.Commit()
c.Assert(err, IsNil)
_, err = txn1.Exec("update test2 set a = a + 1 where b = 1")
c.Assert(err, IsNil)
err = txn1.Commit()
c.Assert(err, IsNil)
})
}
func (cli *testServerClient) runTestExplainForConn(c *C) {
cli.runTestsOnNewDB(c, nil, "explain_for_conn", func(dbt *DBTest) {
dbt.mustExec("drop table if exists t")
dbt.mustExec("create table t (a int key, b int)")
dbt.mustExec("insert t values (1, 1)")
rows := dbt.mustQuery("select connection_id();")
c.Assert(rows.Next(), IsTrue)
var connID int64
err := rows.Scan(&connID)
c.Assert(err, IsNil)
c.Assert(rows.Close(), IsNil)
dbt.mustQuery("select * from t where a=1")
rows = dbt.mustQuery("explain for connection " + strconv.Itoa(int(connID)))
c.Assert(rows.Next(), IsTrue)
row := make([]string, 9)
err = rows.Scan(&row[0], &row[1], &row[2], &row[3], &row[4], &row[5], &row[6], &row[7], &row[8])
c.Assert(err, IsNil)
c.Assert(strings.Join(row, ","), Matches, "Point_Get_1,1.00,1,root,table:t,time.*loop.*handle:1.*")
c.Assert(rows.Close(), IsNil)
})
}
func (cli *testServerClient) runTestErrorCode(c *C) {
cli.runTestsOnNewDB(c, nil, "ErrorCode", func(dbt *DBTest) {
dbt.mustExec("create table test (c int PRIMARY KEY);")
dbt.mustExec("insert into test values (1);")
txn1, err := dbt.db.Begin()
c.Assert(err, IsNil)
_, err = txn1.Exec("insert into test values(1)")
c.Assert(err, IsNil)
err = txn1.Commit()
checkErrorCode(c, err, errno.ErrDupEntry)
// Schema errors
txn2, err := dbt.db.Begin()
c.Assert(err, IsNil)
_, err = txn2.Exec("use db_not_exists;")
checkErrorCode(c, err, errno.ErrBadDB)
_, err = txn2.Exec("select * from tbl_not_exists;")
checkErrorCode(c, err, errno.ErrNoSuchTable)
_, err = txn2.Exec("create database test;")
// Make tests stable. Some times the error may be the ErrInfoSchemaChanged.
checkErrorCode(c, err, errno.ErrDBCreateExists, errno.ErrInfoSchemaChanged)
_, err = txn2.Exec("create database aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;")
checkErrorCode(c, err, errno.ErrTooLongIdent, errno.ErrInfoSchemaChanged)
_, err = txn2.Exec("create table test (c int);")
checkErrorCode(c, err, errno.ErrTableExists, errno.ErrInfoSchemaChanged)
_, err = txn2.Exec("drop table unknown_table;")
checkErrorCode(c, err, errno.ErrBadTable, errno.ErrInfoSchemaChanged)
_, err = txn2.Exec("drop database unknown_db;")
checkErrorCode(c, err, errno.ErrDBDropExists, errno.ErrInfoSchemaChanged)
_, err = txn2.Exec("create table aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa (a int);")
checkErrorCode(c, err, errno.ErrTooLongIdent, errno.ErrInfoSchemaChanged)
_, err = txn2.Exec("create table long_column_table (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa int);")
checkErrorCode(c, err, errno.ErrTooLongIdent, errno.ErrInfoSchemaChanged)
_, err = txn2.Exec("alter table test add aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa int;")
checkErrorCode(c, err, errno.ErrTooLongIdent, errno.ErrInfoSchemaChanged)
// Optimizer errors
_, err = txn2.Exec("select *, * from test;")
checkErrorCode(c, err, errno.ErrInvalidWildCard)
_, err = txn2.Exec("select row(1, 2) > 1;")
checkErrorCode(c, err, errno.ErrOperandColumns)
_, err = txn2.Exec("select * from test order by row(c, c);")
checkErrorCode(c, err, errno.ErrOperandColumns)
// Variable errors
_, err = txn2.Exec("select @@unknown_sys_var;")
checkErrorCode(c, err, errno.ErrUnknownSystemVariable)
_, err = txn2.Exec("set @@unknown_sys_var='1';")
checkErrorCode(c, err, errno.ErrUnknownSystemVariable)
// Expression errors
_, err = txn2.Exec("select greatest(2);")
checkErrorCode(c, err, errno.ErrWrongParamcountToNativeFct)
})
}
func checkErrorCode(c *C, e error, codes ...uint16) {
me, ok := e.(*mysql.MySQLError)
c.Assert(ok, IsTrue, Commentf("err: %v", e))
if len(codes) == 1 {
c.Assert(me.Number, Equals, codes[0])
}
isMatchCode := false
for _, code := range codes {
if me.Number == code {
isMatchCode = true
break
}
}
c.Assert(isMatchCode, IsTrue, Commentf("got err %v, expected err codes %v", me, codes))
}
func (cli *testServerClient) runTestAuth(c *C) {
cli.runTests(c, nil, func(dbt *DBTest) {
dbt.mustExec(`CREATE USER 'authtest'@'%' IDENTIFIED BY '123';`)
dbt.mustExec(`CREATE ROLE 'authtest_r1'@'%';`)
dbt.mustExec(`GRANT ALL on test.* to 'authtest'`)
dbt.mustExec(`GRANT authtest_r1 to 'authtest'`)
dbt.mustExec(`SET DEFAULT ROLE authtest_r1 TO authtest`)
})
cli.runTests(c, func(config *mysql.Config) {
config.User = "authtest"
config.Passwd = "123"
}, func(dbt *DBTest) {
dbt.mustExec(`USE information_schema;`)
})
db, err := sql.Open("mysql", cli.getDSN(func(config *mysql.Config) {
config.User = "authtest"
config.Passwd = "456"
}))
c.Assert(err, IsNil)
_, err = db.Query("USE information_schema;")
c.Assert(err, NotNil, Commentf("Wrong password should be failed"))
err = db.Close()
c.Assert(err, IsNil)
// Test for loading active roles.
db, err = sql.Open("mysql", cli.getDSN(func(config *mysql.Config) {
config.User = "authtest"
config.Passwd = "123"
}))
c.Assert(err, IsNil)
rows, err := db.Query("select current_role;")
c.Assert(err, IsNil)
c.Assert(rows.Next(), IsTrue)
var outA string
err = rows.Scan(&outA)
c.Assert(err, IsNil)
c.Assert(outA, Equals, "`authtest_r1`@`%`")
err = db.Close()
c.Assert(err, IsNil)
// Test login use IP that not exists in mysql.user.
cli.runTests(c, nil, func(dbt *DBTest) {
dbt.mustExec(`CREATE USER 'authtest2'@'localhost' IDENTIFIED BY '123';`)
dbt.mustExec(`GRANT ALL on test.* to 'authtest2'@'localhost'`)
})
cli.runTests(c, func(config *mysql.Config) {
config.User = "authtest2"
config.Passwd = "123"
}, func(dbt *DBTest) {
dbt.mustExec(`USE information_schema;`)
})
}
func (cli *testServerClient) runTestIssue3662(c *C) {
db, err := sql.Open("mysql", cli.getDSN(func(config *mysql.Config) {
config.DBName = "non_existing_schema"
}))
c.Assert(err, IsNil)
go func() {
err := db.Close()
c.Assert(err, IsNil)
}()
// According to documentation, "Open may just validate its arguments without
// creating a connection to the database. To verify that the data source name
// is valid, call Ping."
err = db.Ping()
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Error 1049: Unknown database 'non_existing_schema'")
}
func (cli *testServerClient) runTestIssue3680(c *C) {
db, err := sql.Open("mysql", cli.getDSN(func(config *mysql.Config) {
config.User = "non_existing_user"
}))
c.Assert(err, IsNil)
go func() {
err := db.Close()
c.Assert(err, IsNil)
}()
// According to documentation, "Open may just validate its arguments without
// creating a connection to the database. To verify that the data source name
// is valid, call Ping."
err = db.Ping()
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Error 1045: Access denied for user 'non_existing_user'@'127.0.0.1' (using password: NO)")
}
func (cli *testServerClient) runTestIssue22646(c *C) {
cli.runTests(c, nil, func(dbt *DBTest) {
c1 := make(chan string, 1)
go func() {
dbt.mustExec(``) // empty query.
c1 <- "success"
}()
select {
case res := <-c1:
fmt.Println(res)
case <-time.After(30 * time.Second):
panic("read empty query statement timed out.")
}
})
}
func (cli *testServerClient) runTestIssue3682(c *C) {
cli.runTests(c, nil, func(dbt *DBTest) {
dbt.mustExec(`CREATE USER 'issue3682'@'%' IDENTIFIED BY '123';`)
dbt.mustExec(`GRANT ALL on test.* to 'issue3682'`)
dbt.mustExec(`GRANT ALL on mysql.* to 'issue3682'`)
})
cli.runTests(c, func(config *mysql.Config) {
config.User = "issue3682"
config.Passwd = "123"
}, func(dbt *DBTest) {
dbt.mustExec(`USE mysql;`)
})
db, err := sql.Open("mysql", cli.getDSN(func(config *mysql.Config) {
config.User = "issue3682"
config.Passwd = "wrong_password"
config.DBName = "non_existing_schema"
}))
c.Assert(err, IsNil)
go func() {
err := db.Close()
c.Assert(err, IsNil)
}()
err = db.Ping()
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Error 1045: Access denied for user 'issue3682'@'127.0.0.1' (using password: YES)")
}
func (cli *testServerClient) runTestDBNameEscape(c *C) {
cli.runTests(c, nil, func(dbt *DBTest) {
dbt.mustExec("CREATE DATABASE `aa-a`;")
})
cli.runTests(c, func(config *mysql.Config) {
config.DBName = "aa-a"
}, func(dbt *DBTest) {
dbt.mustExec(`USE mysql;`)
dbt.mustExec("DROP DATABASE `aa-a`")
})
}
func (cli *testServerClient) runTestResultFieldTableIsNull(c *C) {
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.Params["sql_mode"] = "''"
}, "ResultFieldTableIsNull", func(dbt *DBTest) {
dbt.mustExec("drop table if exists test;")
dbt.mustExec("create table test (c int);")
dbt.mustExec("explain select * from test;")
})
}
func (cli *testServerClient) runTestStatusAPI(c *C) {
resp, err := cli.fetchStatus("/status")
c.Assert(err, IsNil)
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var data status
err = decoder.Decode(&data)
c.Assert(err, IsNil)
c.Assert(data.Version, Equals, tmysql.ServerVersion)
c.Assert(data.GitHash, Equals, versioninfo.TiDBGitHash)
}
// The golang sql driver (and most drivers) should have multi-statement
// disabled by default for security reasons. Lets ensure that the behavior
// is correct.
func (cli *testServerClient) runFailedTestMultiStatements(c *C) {
cli.runTestsOnNewDB(c, nil, "FailedMultiStatements", func(dbt *DBTest) {
// Default is now OFF in new installations.
// It is still WARN in upgrade installations (for now)
_, err := dbt.db.Exec("SELECT 1; SELECT 1; SELECT 2; SELECT 3;")
c.Assert(err.Error(), Equals, "Error 8130: client has multi-statement capability disabled. Run SET GLOBAL tidb_multi_statement_mode='ON' after you understand the security risk")
// Change to WARN (legacy mode)
dbt.mustExec("SET tidb_multi_statement_mode='WARN'")
dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
res := dbt.mustExec("INSERT INTO test VALUES (1, 1)")
count, err := res.RowsAffected()
c.Assert(err, IsNil, Commentf("res.RowsAffected() returned error"))
c.Assert(count, Equals, int64(1))
res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
count, err = res.RowsAffected()
c.Assert(err, IsNil, Commentf("res.RowsAffected() returned error"))
c.Assert(count, Equals, int64(1))
rows := dbt.mustQuery("show warnings")
cli.checkRows(c, rows, "Warning 8130 client has multi-statement capability disabled. Run SET GLOBAL tidb_multi_statement_mode='ON' after you understand the security risk")
var out int
rows = dbt.mustQuery("SELECT value FROM test WHERE id=1;")
if rows.Next() {
err = rows.Scan(&out)
c.Assert(err, IsNil)
c.Assert(out, Equals, 5)
if rows.Next() {
dbt.Error("unexpected data")
}
} else {
dbt.Error("no data")
}
// Change to ON = Fully supported, TiDB legacy. No warnings or Errors.
dbt.mustExec("SET tidb_multi_statement_mode='ON';")
dbt.mustExec("DROP TABLE IF EXISTS test")
dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
res = dbt.mustExec("INSERT INTO test VALUES (1, 1)")
count, err = res.RowsAffected()
c.Assert(err, IsNil, Commentf("res.RowsAffected() returned error"))
c.Assert(count, Equals, int64(1))
res = dbt.mustExec("update test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
count, err = res.RowsAffected()
c.Assert(err, IsNil, Commentf("res.RowsAffected() returned error"))
c.Assert(count, Equals, int64(1))
rows = dbt.mustQuery("SELECT value FROM test WHERE id=1;")
if rows.Next() {
err = rows.Scan(&out)
c.Assert(err, IsNil)
c.Assert(out, Equals, 5)
if rows.Next() {
dbt.Error("unexpected data")
}
} else {
dbt.Error("no data")
}
})
}
func (cli *testServerClient) runTestMultiStatements(c *C) {
cli.runTestsOnNewDB(c, func(config *mysql.Config) {
config.Params["multiStatements"] = "true"
}, "MultiStatements", func(dbt *DBTest) {
// Create Table
dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
// Create Data
res := dbt.mustExec("INSERT INTO test VALUES (1, 1)")
count, err := res.RowsAffected()
c.Assert(err, IsNil, Commentf("res.RowsAffected() returned error"))
c.Assert(count, Equals, int64(1))
// Update
res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
count, err = res.RowsAffected()
c.Assert(err, IsNil, Commentf("res.RowsAffected() returned error"))
c.Assert(count, Equals, int64(1))
// Read
var out int
rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
if rows.Next() {
err = rows.Scan(&out)
c.Assert(err, IsNil)
c.Assert(out, Equals, 5)
if rows.Next() {
dbt.Error("unexpected data")
}
} else {
dbt.Error("no data")
}
})
}
func (cli *testServerClient) runTestStmtCount(t *C) {
cli.runTestsOnNewDB(t, nil, "StatementCount", func(dbt *DBTest) {
originStmtCnt := getStmtCnt(string(cli.getMetrics(t)))
dbt.mustExec("create table test (a int)")
dbt.mustExec("insert into test values(1)")
dbt.mustExec("insert into test values(2)")
dbt.mustExec("insert into test values(3)")
dbt.mustExec("insert into test values(4)")
dbt.mustExec("insert into test values(5)")
dbt.mustExec("delete from test where a = 3")
dbt.mustExec("update test set a = 2 where a = 1")
dbt.mustExec("select * from test")
dbt.mustExec("select 2")
dbt.mustExec("prepare stmt1 from 'update test set a = 1 where a = 2'")
dbt.mustExec("execute stmt1")
dbt.mustExec("prepare stmt2 from 'select * from test'")
dbt.mustExec("execute stmt2")
dbt.mustExec("replace into test(a) values(6);")
currentStmtCnt := getStmtCnt(string(cli.getMetrics(t)))
t.Assert(currentStmtCnt["CreateTable"], Equals, originStmtCnt["CreateTable"]+1)
t.Assert(currentStmtCnt["Insert"], Equals, originStmtCnt["Insert"]+5)
t.Assert(currentStmtCnt["Delete"], Equals, originStmtCnt["Delete"]+1)
t.Assert(currentStmtCnt["Update"], Equals, originStmtCnt["Update"]+1)
t.Assert(currentStmtCnt["Select"], Equals, originStmtCnt["Select"]+2)
t.Assert(currentStmtCnt["Prepare"], Equals, originStmtCnt["Prepare"]+2)
t.Assert(currentStmtCnt["Execute"], Equals, originStmtCnt["Execute"]+2)
t.Assert(currentStmtCnt["Replace"], Equals, originStmtCnt["Replace"]+1)
})
}
func (cli *testServerClient) runTestTLSConnection(t *C, overrider configOverrider) error {
dsn := cli.getDSN(overrider)
db, err := sql.Open("mysql", dsn)
t.Assert(err, IsNil)
go func() {
err := db.Close()
t.Assert(err, IsNil)
}()
_, err = db.Exec("USE test")
if err != nil {
return errors.Annotate(err, "dsn:"+dsn)
}
return err
}
func (cli *testServerClient) runReloadTLS(t *C, overrider configOverrider, errorNoRollback bool) error {
db, err := sql.Open("mysql", cli.getDSN(overrider))
t.Assert(err, IsNil)
go func() {
err := db.Close()
t.Assert(err, IsNil)
}()
sql := "alter instance reload tls"
if errorNoRollback {
sql += " no rollback on error"
}
_, err = db.Exec(sql)
return err
}
func (cli *testServerClient) runTestSumAvg(c *C) {
cli.runTests(c, nil, func(dbt *DBTest) {
dbt.mustExec("create table sumavg (a int, b decimal, c double)")
dbt.mustExec("insert sumavg values (1, 1, 1)")
rows := dbt.mustQuery("select sum(a), sum(b), sum(c) from sumavg")
c.Assert(rows.Next(), IsTrue)
var outA, outB, outC float64
err := rows.Scan(&outA, &outB, &outC)
c.Assert(err, IsNil)
c.Assert(outA, Equals, 1.0)
c.Assert(outB, Equals, 1.0)
c.Assert(outC, Equals, 1.0)
rows = dbt.mustQuery("select avg(a), avg(b), avg(c) from sumavg")
c.Assert(rows.Next(), IsTrue)
err = rows.Scan(&outA, &outB, &outC)
c.Assert(err, IsNil)
c.Assert(outA, Equals, 1.0)
c.Assert(outB, Equals, 1.0)
c.Assert(outC, Equals, 1.0)
})
}
func (cli *testServerClient) getMetrics(t *C) []byte {
resp, err := cli.fetchStatus("/metrics")
t.Assert(err, IsNil)
content, err := ioutil.ReadAll(resp.Body)
t.Assert(err, IsNil)
err = resp.Body.Close()
t.Assert(err, IsNil)
return content
}
func getStmtCnt(content string) (stmtCnt map[string]int) {
stmtCnt = make(map[string]int)
r := regexp.MustCompile("tidb_executor_statement_total{type=\"([A-Z|a-z|-]+)\"} (\\d+)")
matchResult := r.FindAllStringSubmatch(content, -1)
for _, v := range matchResult {
cnt, _ := strconv.Atoi(v[2])
stmtCnt[v[1]] = cnt
}
return stmtCnt
}
const retryTime = 100
func (cli *testServerClient) waitUntilServerOnline() {
// connect server
retry := 0
for ; retry < retryTime; retry++ {
time.Sleep(time.Millisecond * 10)
db, err := sql.Open("mysql", cli.getDSN())
if err == nil {
err = db.Close()
if err != nil {
panic(err)
}
break
}
}
if retry == retryTime {
log.Fatal("failed to connect DB in every 10 ms", zap.Int("retryTime", retryTime))
}
for retry = 0; retry < retryTime; retry++ {
// fetch http status
resp, err := cli.fetchStatus("/status")
if err == nil {
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
err = resp.Body.Close()
if err != nil {
panic(err)
}
break
}
time.Sleep(time.Millisecond * 10)
}
if retry == retryTime {
log.Fatal("failed to connect HTTP status in every 10 ms", zap.Int("retryTime", retryTime))
}
}
// Client errors are only incremented when using the TiDB Server protocol,
// and not internal SQL statements. Thus, this test is in the server-test suite.
func (cli *testServerClient) runTestInfoschemaClientErrors(t *C) {
cli.runTestsOnNewDB(t, nil, "clientErrors", func(dbt *DBTest) {
clientErrors := []struct {
stmt string
incrementWarnings bool
incrementErrors bool
errCode int
}{
{
stmt: "SELECT 0/0",
incrementWarnings: true,
errCode: 1365, // div by zero
},
{
stmt: "CREATE TABLE test_client_errors2 (a int primary key, b int primary key)",
incrementErrors: true,
errCode: 1068, // multiple pkeys
},
{
stmt: "gibberish",
incrementErrors: true,
errCode: 1064, // parse error
},
}
sources := []string{"client_errors_summary_global", "client_errors_summary_by_user", "client_errors_summary_by_host"}
for _, test := range clientErrors {
for _, tbl := range sources {
var errors, warnings int
rows := dbt.mustQuery("SELECT SUM(error_count), SUM(warning_count) FROM information_schema."+tbl+" WHERE error_number = ? GROUP BY error_number", test.errCode)
if rows.Next() {
rows.Scan(&errors, &warnings)
}
if test.incrementErrors {
errors++
}
if test.incrementWarnings {
warnings++
}
dbt.db.Query(test.stmt) // ignore results and errors (query table)
var newErrors, newWarnings int
rows = dbt.mustQuery("SELECT SUM(error_count), SUM(warning_count) FROM information_schema."+tbl+" WHERE error_number = ? GROUP BY error_number", test.errCode)
if rows.Next() {
rows.Scan(&newErrors, &newWarnings)
}
dbt.Check(newErrors, Equals, errors)
dbt.Check(newWarnings, Equals, warnings)
}
}
})
}
| [
"\"log_level\""
] | [] | [
"log_level"
] | [] | ["log_level"] | go | 1 | 0 | |
demos/TD3/GraphDrive-Hard(SR)/baseline.py | # Read this guide for how to use this script: https://medium.com/distributed-computing-with-ray/intro-to-rllib-example-environments-3a113f532c70
import os
os.environ["TUNE_RESULT_DIR"] = 'tmp/ray_results'
import multiprocessing
import json
import shutil
import ray
import time
from xarl.utils.workflow import train
from ray.rllib.agents.ddpg.td3 import TD3Trainer, TD3_DEFAULT_CONFIG
from environments import *
from ray.rllib.models import ModelCatalog
from xarl.models.ddpg import TFAdaptiveMultiHeadDDPG
# # ModelCatalog.register_custom_model("adaptive_multihead_network", TFAdaptiveMultiHeadDDPG)
# SELECT_ENV = "CescoDrive-V1"
SELECT_ENV = "GraphDrive-Hard-Sparse"
CONFIG = TD3_DEFAULT_CONFIG.copy()
CONFIG.update({
# "model": { # this is for GraphDrive and GridDrive
# "custom_model": "adaptive_multihead_network",
# },
# "preprocessor_pref": "rllib", # this prevents reward clipping on Atari and other weird issues when running from checkpoints
"seed": 42, # This makes experiments reproducible.
"rollout_fragment_length": 2**6, # Divide episodes into fragments of this many steps each during rollouts. Default is 1.
"train_batch_size": 2**8, # Number of transitions per train-batch. Default is: 100 for TD3, 256 for SAC and DDPG, 32 for DQN, 500 for APPO.
# "batch_mode": "truncate_episodes", # For some clustering schemes (e.g. extrinsic_reward, moving_best_extrinsic_reward, etc..) it has to be equal to 'complete_episodes', otherwise it can also be 'truncate_episodes'.
###########################
"prioritized_replay": True, # Whether to replay batches with the highest priority/importance/relevance for the agent.
'buffer_size': 2**14, # Size of the experience buffer. Default 50000
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4, # The smaller, the stronger is over-sampling
"prioritized_replay_eps": 1e-6,
"learning_starts": 2**14, # How many steps of the model to sample before learning starts.
###########################
"gamma": 0.999, # We use an higher gamma to extend the MDP's horizon; optimal agency on GraphDrive requires a longer horizon.
"tau": 1e-4,
##################################
})
CONFIG["callbacks"] = CustomEnvironmentCallbacks
####################################################################################
####################################################################################
ray.shutdown()
ray.init(ignore_reinit_error=True)
train(TD3Trainer, CONFIG, SELECT_ENV, test_every_n_step=4e7, stop_training_after_n_step=4e7)
| [] | [] | [
"TUNE_RESULT_DIR"
] | [] | ["TUNE_RESULT_DIR"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PersonalWebsite2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
git-bark/src/git_bark/__main__.py | """Git repository information consists of:
1. Files in the `.git` repository directory
2. Files in the working tree
Backing up a git repository means persisting it in a way so it can
be recovered later. There are several possible approaches, with
different trade-offs:
1. `git bundle create` - the resulting bundle does not include modified files in the
working tree, `.git/hooks`, `.git/config`, `.git/info/*`, or the reflog.
2. `git clone --mirror` - this does not include repository-local files and is also
pickier about which objects it copes.
3. backup the working tree and `.git` file/directory - this will likely copy
build artifacts (there can be a lot). What build artifacts to exclude is going to
vary by project, and some backup applications (duplicity, rclone, restic) don't seem
to support directory-specific exclude files (like reading `.gitignore`).
4. backup just the `.git` file/directory - this does not include modified files
in the working tree.
This script improves the `.git` directory backup approach, saving all modifications to
the working tree to the repository. The regular index is not modified, so in-progress
rebase/merge/staging is not interrupted.
Any ignored files will not be included.
"""
import argparse
import os
import subprocess
import sys
import tempfile
import textwrap
from contextlib import contextmanager
from pathlib import Path
from typing import Iterable, List, NamedTuple, Optional
class OutputPrintingError(subprocess.CalledProcessError):
def __init__(self, exc: subprocess.CalledProcessError):
super().__init__(exc.returncode, exc.cmd, exc.output, exc.stderr)
def __str__(self):
# Already has command and exit code/signal.
base_text = super().__str__()
fields = []
if self.output is not None:
printable_output = self.output.decode("utf-8", errors="replace")
output = textwrap.indent(printable_output, " ")
fields.append(f"output:\n{output}")
if self.stderr is not None:
printable_stderr = self.stderr.decode("utf-8", errors="replace")
stderr = textwrap.indent(printable_stderr, " ")
fields.append(f"stderr:\n{stderr}")
if fields:
field_text = textwrap.indent("\n".join(fields), " ")
return f"{base_text}\n{field_text}"
return base_text
@contextmanager
def traced_error_output():
try:
yield
except subprocess.CalledProcessError as e:
raise OutputPrintingError(e).with_traceback(e.__traceback__) from None
def git(args, **kwargs) -> subprocess.CompletedProcess:
args.insert(0, "git")
with traced_error_output():
return subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, **kwargs
)
def git_ref_exists(name: str) -> bool:
try:
git(["show-ref", "--quiet", "--verify", name])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
return False
raise
else:
return True
# Any refs under refs/worktree are worktree-specific, and behavior is exactly like we'd
# expect:
# - per-worktree refs (worktrees/<name>/refs/... or refs/...)
# - per-worktree reflogs (worktrees/<name>/logs/... or logs/...)
BACKUP_REF = "refs/worktree/backup"
def backup() -> None:
"""Given a (possibly dirty) worktree, back it up to the repository.
"""
# Create a new tree object.
with tempfile.TemporaryDirectory() as d:
index_file = Path(d) / "INDEX"
env = os.environ.copy()
env.update({"GIT_INDEX_FILE": str(index_file)})
subprocess.run(["git", "add", "--all"], env=env, check=True)
result = git(["write-tree"], env=env)
tree_id = result.stdout.decode("utf-8").strip()
result = git(["cat-file", "tree", tree_id])
if not result.stdout.strip():
# We cannot check out an empty tree, so do nothing.
return
if git_ref_exists(BACKUP_REF):
result = git(["show", "--quiet", "--pretty=format:%T", BACKUP_REF])
current_tree_id = result.stdout.decode("utf-8").strip()
if current_tree_id == tree_id:
# If the backup is already current, don't make a new commit.
return
message = b"backup"
result = git(["commit-tree", "--no-gpg-sign", tree_id], input=message)
commit_id = result.stdout.decode("utf-8").strip()
git(["update-ref", "-m", "new backup", "--create-reflog", BACKUP_REF, commit_id])
def _parse_ls_files_z(output: bytes) -> Iterable[str]:
output = output.rstrip(b"\0")
if not output:
return
records = output.split(b"\0")
for record in records:
_perms, _hash, _stage, filename = record.decode("utf-8").split(maxsplit=3)
yield filename
class StatusEntry(NamedTuple):
state_1: str
state_2: str
path: str
orig_path: Optional[str]
MULTI_FIELD_STATES = {"C", "R"}
def _parse_status_z(output: bytes) -> Iterable[StatusEntry]:
output = output.rstrip(b"\0")
# No change in index or working tree.
if not output:
return
fields = iter(output.split(b"\0"))
# field is the first part of an entry, i.e. XY PATH
for field in fields:
decoded_field = field.decode("utf-8")
state_1, state_2, path = decoded_field[0], decoded_field[1], decoded_field[3:]
orig_path = None
if state_1 in MULTI_FIELD_STATES or state_2 in MULTI_FIELD_STATES:
orig_path = next(fields).decode("utf-8")
yield StatusEntry(state_1, state_2, path, orig_path)
def restore() -> None:
"""Given a worktree that has been backed up, restore it.
"Backed up" means:
1. `backup` was executed on the worktree
2. git knows how to locate the repository
3. the worktree
3. the current state of the provided `path` is that the copied `.git`
directory/file is present and any other files present in the worktree can be
disregarded in favor of the backed up working tree state.
"""
# Our goal is to support a worktree with no (consequential) contents. This means
# no added/updated/renamed/untracked files.
# git status alone doesn't let us differentiate between unmerged paths and paths that
# are different from the index, so we retrieve the unmerged paths separately.
result = git(["rev-parse", "--show-toplevel"])
worktree_dir = Path(result.stdout.decode("utf-8").rstrip("\n"))
# -z: NUL-terminated
result = git(["ls-files", "-z", "--unmerged"])
unmerged_paths = set(_parse_ls_files_z(result.stdout))
for path in unmerged_paths:
if (worktree_dir / path).exists():
raise RuntimeError("Cannot restore worktree with unresolved paths present")
# --no-optional-locks: Do not touch the index.
# -z: NUL-separated fields/NUL-terminated records.
result = git(["--no-optional-locks", "status", "-z"])
for entry in _parse_status_z(result.stdout):
if entry.path in unmerged_paths:
continue
# Relative to the index, all files must be considered deleted.
if entry.state_2 != "D":
raise RuntimeError("Working tree must not have any files!")
if not git_ref_exists(BACKUP_REF):
# No ref, no action.
return
with tempfile.TemporaryDirectory() as d:
index_file = Path(d) / "INDEX"
env = os.environ.copy()
# Temporarily set git index to a different location so we don't clobber the existing one.
env.update({"GIT_INDEX_FILE": str(index_file)})
git(["checkout", BACKUP_REF, "."], env=env)
def main(argv: List[str] = sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Backup in-progress git repository files"
)
subparsers = parser.add_subparsers(
description="command", dest="command", required=True
)
backup_parser = subparsers.add_parser("backup")
backup_parser.set_defaults(func=backup)
restore_parser = subparsers.add_parser("restore")
restore_parser.set_defaults(func=restore)
args = parser.parse_args(argv)
args.func()
if __name__ == "__main__":
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
uta/__init__.py | import pkg_resources
import logging
import os
import warnings
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from uta.exceptions import *
from uta import models
try:
__version__ = pkg_resources.get_distribution(__package__).version
except pkg_resources.DistributionNotFound as e:
warnings.warn(
"can't get __version__ because %s package isn't installed" % __package__, Warning)
__version__ = None
public_db_url = "postgresql://uta_public:[email protected]/uta"
default_db_url = os.environ.get("UTA_DB_URL", public_db_url)
def connect(db_url=default_db_url):
"""
Connect to a UTA database instance and return a UTA0 interface instance.
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
bdi.sources.uta0.public_db_url.
The format of the db_url is driver://user:pass@host/database (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://uta_public:[email protected]/uta'
A local postgresql database:
postgresql://localhost/uta
A local SQLite database:
sqlite:////tmp/uta-0.0.5.db
SQLite database snapshots are available at:
`https://bitbucket.org/biocommons/uta/downloads`_
"""
# TODO: Verify schema version
engine = create_engine(db_url)
Session = sessionmaker(bind=engine)
session = Session()
logger = logging.getLogger(__name__)
logger.info("connected to " + db_url)
return session
# <LICENSE>
# Copyright 2014 UTA Contributors (https://bitbucket.org/biocommons/uta)
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
##
# http://www.apache.org/licenses/LICENSE-2.0
##
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| [] | [] | [
"UTA_DB_URL"
] | [] | ["UTA_DB_URL"] | python | 1 | 0 | |
votepizza/votepizza/asgi.py | """
ASGI config for votepizza project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'votepizza.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
app/interface/main/activity/dao/like/dao_test.go | package like
import (
"flag"
"go-common/app/interface/main/activity/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.web-svr.activity")
flag.Set("conf_token", "22edc93e2998bf0cb0bbee661b03d41f")
flag.Set("tree_id", "2873")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/activity-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
| [
"\"DEPLOY_ENV\""
] | [] | [
"DEPLOY_ENV"
] | [] | ["DEPLOY_ENV"] | go | 1 | 0 | |
src/syscall/syscall_unix_test.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package syscall_test
import (
"flag"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"syscall"
"testing"
"time"
)
// Tests that below functions, structures and constants are consistent
// on all Unix-like systems.
func _() {
// program scheduling priority functions and constants
var (
_ func(int, int, int) error = syscall.Setpriority
_ func(int, int) (int, error) = syscall.Getpriority
)
const (
_ int = syscall.PRIO_USER
_ int = syscall.PRIO_PROCESS
_ int = syscall.PRIO_PGRP
)
// termios constants
const (
_ int = syscall.TCIFLUSH
_ int = syscall.TCIOFLUSH
_ int = syscall.TCOFLUSH
)
// fcntl file locking structure and constants
var (
_ = syscall.Flock_t{
Type: int16(0),
Whence: int16(0),
Start: int64(0),
Len: int64(0),
Pid: int32(0),
}
)
const (
_ = syscall.F_GETLK
_ = syscall.F_SETLK
_ = syscall.F_SETLKW
)
}
// TestFcntlFlock tests whether the file locking structure matches
// the calling convention of each kernel.
// On some Linux systems, glibc uses another set of values for the
// commands and translates them to the correct value that the kernel
// expects just before the actual fcntl syscall. As Go uses raw
// syscalls directly, it must use the real value, not the glibc value.
// Thus this test also verifies that the Flock_t structure can be
// roundtripped with F_SETLK and F_GETLK.
func TestFcntlFlock(t *testing.T) {
if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
t.Skip("skipping; no child processes allowed on iOS")
}
flock := syscall.Flock_t{
Type: syscall.F_WRLCK,
Start: 31415, Len: 271828, Whence: 1,
}
if os.Getenv("GO_WANT_HELPER_PROCESS") == "" {
// parent
tempDir, err := ioutil.TempDir("", "TestFcntlFlock")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
name := filepath.Join(tempDir, "TestFcntlFlock")
fd, err := syscall.Open(name, syscall.O_CREAT|syscall.O_RDWR|syscall.O_CLOEXEC, 0)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
defer os.RemoveAll(tempDir)
defer syscall.Close(fd)
if err := syscall.Ftruncate(fd, 1<<20); err != nil {
t.Fatalf("Ftruncate(1<<20) failed: %v", err)
}
if err := syscall.FcntlFlock(uintptr(fd), syscall.F_SETLK, &flock); err != nil {
t.Fatalf("FcntlFlock(F_SETLK) failed: %v", err)
}
cmd := exec.Command(os.Args[0], "-test.run=^TestFcntlFlock$")
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.ExtraFiles = []*os.File{os.NewFile(uintptr(fd), name)}
out, err := cmd.CombinedOutput()
if len(out) > 0 || err != nil {
t.Fatalf("child process: %q, %v", out, err)
}
} else {
// child
got := flock
// make sure the child lock is conflicting with the parent lock
got.Start--
got.Len++
if err := syscall.FcntlFlock(3, syscall.F_GETLK, &got); err != nil {
t.Fatalf("FcntlFlock(F_GETLK) failed: %v", err)
}
flock.Pid = int32(syscall.Getppid())
// Linux kernel always set Whence to 0
flock.Whence = 0
if got.Type == flock.Type && got.Start == flock.Start && got.Len == flock.Len && got.Pid == flock.Pid && got.Whence == flock.Whence {
os.Exit(0)
}
t.Fatalf("FcntlFlock got %v, want %v", got, flock)
}
}
// TestPassFD tests passing a file descriptor over a Unix socket.
//
// This test involved both a parent and child process. The parent
// process is invoked as a normal test, with "go test", which then
// runs the child process by running the current test binary with args
// "-test.run=^TestPassFD$" and an environment variable used to signal
// that the test should become the child process instead.
func TestPassFD(t *testing.T) {
testenv.MustHaveExec(t)
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
passFDChild()
return
}
tempDir, err := ioutil.TempDir("", "TestPassFD")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
if err != nil {
t.Fatalf("Socketpair: %v", err)
}
defer syscall.Close(fds[0])
defer syscall.Close(fds[1])
writeFile := os.NewFile(uintptr(fds[0]), "child-writes")
readFile := os.NewFile(uintptr(fds[1]), "parent-reads")
defer writeFile.Close()
defer readFile.Close()
cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir)
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.ExtraFiles = []*os.File{writeFile}
out, err := cmd.CombinedOutput()
if len(out) > 0 || err != nil {
t.Fatalf("child process: %q, %v", out, err)
}
c, err := net.FileConn(readFile)
if err != nil {
t.Fatalf("FileConn: %v", err)
}
defer c.Close()
uc, ok := c.(*net.UnixConn)
if !ok {
t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c)
}
buf := make([]byte, 32) // expect 1 byte
oob := make([]byte, 32) // expect 24 bytes
closeUnix := time.AfterFunc(5*time.Second, func() {
t.Logf("timeout reading from unix socket")
uc.Close()
})
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
if err != nil {
t.Fatalf("ReadMsgUnix: %v", err)
}
closeUnix.Stop()
scms, err := syscall.ParseSocketControlMessage(oob[:oobn])
if err != nil {
t.Fatalf("ParseSocketControlMessage: %v", err)
}
if len(scms) != 1 {
t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms)
}
scm := scms[0]
gotFds, err := syscall.ParseUnixRights(&scm)
if err != nil {
t.Fatalf("syscall.ParseUnixRights: %v", err)
}
if len(gotFds) != 1 {
t.Fatalf("wanted 1 fd; got %#v", gotFds)
}
f := os.NewFile(uintptr(gotFds[0]), "fd-from-child")
defer f.Close()
got, err := ioutil.ReadAll(f)
want := "Hello from child process!\n"
if string(got) != want {
t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want)
}
}
// passFDChild is the child process used by TestPassFD.
func passFDChild() {
defer os.Exit(0)
// Look for our fd. It should be fd 3, but we work around an fd leak
// bug here (https://golang.org/issue/2603) to let it be elsewhere.
var uc *net.UnixConn
for fd := uintptr(3); fd <= 10; fd++ {
f := os.NewFile(fd, "unix-conn")
var ok bool
netc, _ := net.FileConn(f)
uc, ok = netc.(*net.UnixConn)
if ok {
break
}
}
if uc == nil {
fmt.Println("failed to find unix fd")
return
}
// Make a file f to send to our parent process on uc.
// We make it in tempDir, which our parent will clean up.
flag.Parse()
tempDir := flag.Arg(0)
f, err := ioutil.TempFile(tempDir, "")
if err != nil {
fmt.Printf("TempFile: %v", err)
return
}
f.Write([]byte("Hello from child process!\n"))
f.Seek(0, io.SeekStart)
rights := syscall.UnixRights(int(f.Fd()))
dummyByte := []byte("x")
n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil)
if err != nil {
fmt.Printf("WriteMsgUnix: %v", err)
return
}
if n != 1 || oobn != len(rights) {
fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights))
return
}
}
// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage,
// and ParseUnixRights are able to successfully round-trip lists of file descriptors.
func TestUnixRightsRoundtrip(t *testing.T) {
testCases := [...][][]int{
{{42}},
{{1, 2}},
{{3, 4, 5}},
{{}},
{{1, 2}, {3, 4, 5}, {}, {7}},
}
for _, testCase := range testCases {
b := []byte{}
var n int
for _, fds := range testCase {
// Last assignment to n wins
n = len(b) + syscall.CmsgLen(4*len(fds))
b = append(b, syscall.UnixRights(fds...)...)
}
// Truncate b
b = b[:n]
scms, err := syscall.ParseSocketControlMessage(b)
if err != nil {
t.Fatalf("ParseSocketControlMessage: %v", err)
}
if len(scms) != len(testCase) {
t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms)
}
for i, scm := range scms {
gotFds, err := syscall.ParseUnixRights(&scm)
if err != nil {
t.Fatalf("ParseUnixRights: %v", err)
}
wantFds := testCase[i]
if len(gotFds) != len(wantFds) {
t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds)
}
for j, fd := range gotFds {
if fd != wantFds[j] {
t.Fatalf("expected fd %v, got %v", wantFds[j], fd)
}
}
}
}
}
func TestRlimit(t *testing.T) {
var rlimit, zero syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
t.Fatalf("Getrlimit: save failed: %v", err)
}
if zero == rlimit {
t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit)
}
set := rlimit
set.Cur = set.Max - 1
if runtime.GOOS == "darwin" && set.Cur > 10240 {
// The max file limit is 10240, even though
// the max returned by Getrlimit is 1<<63-1.
// This is OPEN_MAX in sys/syslimits.h.
set.Cur = 10240
}
err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &set)
if err != nil {
t.Fatalf("Setrlimit: set failed: %#v %v", set, err)
}
var get syscall.Rlimit
err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &get)
if err != nil {
t.Fatalf("Getrlimit: get failed: %v", err)
}
set = rlimit
set.Cur = set.Max - 1
if runtime.GOOS == "darwin" && set.Cur > 10240 {
set.Cur = 10240
}
if set != get {
t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get)
}
err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err)
}
}
func TestSeekFailure(t *testing.T) {
_, err := syscall.Seek(-1, 0, io.SeekStart)
if err == nil {
t.Fatalf("Seek(-1, 0, 0) did not fail")
}
str := err.Error() // used to crash on Linux
t.Logf("Seek: %v", str)
if str == "" {
t.Fatalf("Seek(-1, 0, 0) return error with empty message")
}
}
| [
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\""
] | [] | [
"GO_WANT_HELPER_PROCESS"
] | [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
alttprbot/alttprgen/randomizer/ootr.py | import aiohttp
import os
OOTR_BASE_URL = os.environ.get('OOTR_BASE_URL', 'https://ootrandomizer.com')
OOTR_API_KEY = os.environ.get('OOTR_API_KEY')
async def roll_ootr(settings, version='6.1.0', encrypt=True):
async with aiohttp.request(
method='post',
url=f"{OOTR_BASE_URL}/api/sglive/seed/create",
raise_for_status=True,
json=settings,
params={
"key": OOTR_API_KEY,
"version": version,
"encrypt": str(encrypt).lower()
}
) as resp:
result = await resp.json()
return result
| [] | [] | [
"OOTR_BASE_URL",
"OOTR_API_KEY"
] | [] | ["OOTR_BASE_URL", "OOTR_API_KEY"] | python | 2 | 0 | |
internal/apiserver/root.go | package apiserver
/*
Copyright 2017 - 2021 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"context"
"crypto/ecdsa"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"github.com/percona/percona-postgresql-operator/internal/config"
"github.com/percona/percona-postgresql-operator/internal/kubeapi"
"github.com/percona/percona-postgresql-operator/internal/ns"
"github.com/percona/percona-postgresql-operator/internal/tlsutil"
"github.com/percona/percona-postgresql-operator/internal/util"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const PGOSecretName = "pgo.tls"
const VERSION_MISMATCH_ERROR = "pgo client and server version mismatch"
var (
// Clientset is a client for Kubernetes resources
Clientset kubeapi.Interface
// RESTConfig holds the REST configuration for a Kube client
RESTConfig *rest.Config
)
// MetricsFlag if set to true will cause crunchy-postgres-exporter to be added into new clusters
var MetricsFlag, BadgerFlag bool
// AuditFlag if set to true will cause auditing to occur in the logs
var AuditFlag bool
// DebugFlag is the debug flag value
var DebugFlag bool
// BasicAuth comes from the apiserver config
var BasicAuth bool
// Namespace comes from the apiserver config in this version
var (
PgoNamespace string
InstallationName string
)
var CRUNCHY_DEBUG bool
// TreeTrunk is for debugging only in this context
const TreeTrunk = "└── "
// TreeBranch is for debugging only in this context
const TreeBranch = "├── "
type CredentialDetail struct {
Username string
Password string
Role string
Namespaces []string
}
var Pgo config.PgoConfig
// NamespaceOperatingMode defines the namespace operating mode for the cluster,
// e.g. "dynamic", "readonly" or "disabled". See type NamespaceOperatingMode
// for detailed explanations of each mode available.
var namespaceOperatingMode ns.NamespaceOperatingMode
func Initialize() {
PgoNamespace = os.Getenv("PGO_OPERATOR_NAMESPACE")
if PgoNamespace == "" {
log.Info("PGO_OPERATOR_NAMESPACE environment variable is not set and is required, this is the namespace that the Operator is to run within.")
os.Exit(2)
}
log.Info("Pgo Namespace is [" + PgoNamespace + "]")
InstallationName = os.Getenv("PGO_INSTALLATION_NAME")
if InstallationName == "" {
log.Error("PGO_INSTALLATION_NAME environment variable is missng")
os.Exit(2)
}
log.Info("InstallationName is [" + InstallationName + "]")
tmp := os.Getenv("CRUNCHY_DEBUG")
CRUNCHY_DEBUG = false
if tmp == "true" {
CRUNCHY_DEBUG = true
}
BasicAuth = true
MetricsFlag = false
BadgerFlag = false
AuditFlag = false
log.Infoln("apiserver starts")
connectToKube()
initializePerms()
err := Pgo.GetConfig(Clientset, PgoNamespace)
if err != nil {
log.Error(err)
log.Error("error in Pgo configuration")
os.Exit(2)
}
initConfig()
// look through all the pgouser secrets in the Operator's
// namespace and set a generated password for any that currently
// have an empty password set
setRandomPgouserPasswords()
if err := setNamespaceOperatingMode(); err != nil {
log.Error(err)
os.Exit(2)
}
_, err = ns.GetInitialNamespaceList(Clientset, NamespaceOperatingMode(),
InstallationName, PgoNamespace)
if err != nil {
log.Error(err)
os.Exit(2)
}
log.Infof("Namespace operating mode is '%s'", NamespaceOperatingMode())
}
func connectToKube() {
client, err := kubeapi.NewClient()
if err != nil {
panic(err)
}
Clientset = client
RESTConfig = client.Config
}
func initConfig() {
AuditFlag = Pgo.Pgo.Audit
if AuditFlag {
log.Info("audit flag is set to true")
}
MetricsFlag = Pgo.Cluster.Metrics
if MetricsFlag {
log.Info("metrics flag is set to true")
}
BadgerFlag = Pgo.Cluster.Badger
if BadgerFlag {
log.Info("badger flag is set to true")
}
tmp := Pgo.BasicAuth
if tmp == "" {
BasicAuth = true
} else {
var err error
BasicAuth, err = strconv.ParseBool(tmp)
if err != nil {
log.Error("BasicAuth config value is not valid")
os.Exit(2)
}
}
log.Infof("BasicAuth is %v", BasicAuth)
}
func BasicAuthCheck(username, password string) bool {
ctx := context.TODO()
if !BasicAuth {
return true
}
// see if there is a pgouser Secret for this username
secretName := "pgouser-" + username
secret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, secretName, metav1.GetOptions{})
if err != nil {
log.Errorf("could not get pgouser secret %s: %s", username, err.Error())
return false
}
return password == string(secret.Data["password"])
}
func BasicAuthzCheck(username, perm string) bool {
ctx := context.TODO()
secretName := "pgouser-" + username
secret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, secretName, metav1.GetOptions{})
if err != nil {
log.Errorf("could not get pgouser secret %s: %s", username, err.Error())
return false
}
// get the roles for this user
rolesString := string(secret.Data["roles"])
roles := strings.Split(rolesString, ",")
if len(roles) == 0 {
log.Errorf("%s user has no roles ", username)
return false
}
// venture thru each role this user has looking for a perm match
for _, r := range roles {
// get the pgorole
roleSecretName := "pgorole-" + r
rolesecret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, roleSecretName, metav1.GetOptions{})
if err != nil {
log.Errorf("could not get pgorole secret %s: %s", r, err.Error())
return false
}
permsString := strings.TrimSpace(string(rolesecret.Data["permissions"]))
// first a special case. If this is a solitary "*" indicating that this
// encompasses every permission, then we can exit here as true
if permsString == "*" {
return true
}
// otherwise, blow up the permission string and see if the user has explicit
// permission (i.e. is authorized) to access this resource
perms := strings.Split(permsString, ",")
for _, p := range perms {
pp := strings.TrimSpace(p)
if pp == perm {
log.Debugf("%s perm found in role %s for username %s", pp, r, username)
return true
}
}
}
return false
}
// GetNamespace determines if a user has permission for
// a namespace they are requesting
// a valid requested namespace is required
func GetNamespace(clientset kubernetes.Interface, username, requestedNS string) (string, error) {
log.Debugf("GetNamespace username [%s] ns [%s]", username, requestedNS)
if requestedNS == "" {
return requestedNS, errors.New("empty namespace is not valid from pgo clients")
}
iAccess, uAccess, err := UserIsPermittedInNamespace(username, requestedNS)
if err != nil {
return requestedNS, fmt.Errorf("Error when determining whether user [%s] is allowed access to "+
"namespace [%s]: %s", username, requestedNS, err.Error())
}
if !iAccess {
errMsg := fmt.Sprintf("namespace [%s] is not part of the Operator installation", requestedNS)
return requestedNS, errors.New(errMsg)
}
if !uAccess {
errMsg := fmt.Sprintf("user [%s] is not allowed access to namespace [%s]", username, requestedNS)
return requestedNS, errors.New(errMsg)
}
return requestedNS, nil
}
// Authn performs HTTP Basic Authentication against a user if "BasicAuth" is set
// to "true" (which it is by default).
//
// ...it also performs Authorization (Authz) against the user that is attempting
// to authenticate, and as such, to truly "authenticate/authorize," one needs
// at least a valid Operator User account.
func Authn(perm string, w http.ResponseWriter, r *http.Request) (string, error) {
var err error
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
// Need to run the HTTP library `BasicAuth` even if `BasicAuth == false`, as
// this function currently encapsulates authorization as well, and this is
// the call where we get the username to check the RBAC settings
username, password, authOK := r.BasicAuth()
if AuditFlag {
log.Infof("[audit] %s username=[%s] method=[%s] ip=[%s] ok=[%t] ", perm, username, r.Method, r.RemoteAddr, authOK)
}
// Check to see if this user is authenticated
// If BasicAuth is "disabled", skip the authentication; o/w/ check if the
// authentication passed
if !BasicAuth {
log.Debugf("BasicAuth disabled, Skipping Authentication %s username=[%s]", perm, username)
} else {
log.Debugf("Authentication Attempt %s username=[%s]", perm, username)
if !authOK {
http.Error(w, "Not Authorized. Basic Authentication credentials must be provided according to RFC 7617, Section 2.", 401)
return "", errors.New("Not Authorized: Credentials do not comply with RFC 7617")
}
}
if !BasicAuthCheck(username, password) {
log.Errorf("Authentication Failed %s username=[%s]", perm, username)
http.Error(w, "Not authenticated in apiserver", 401)
return "", errors.New("Not Authenticated")
}
if !BasicAuthzCheck(username, perm) {
log.Errorf("Authorization Failed %s username=[%s]", perm, username)
http.Error(w, "Not authorized for this apiserver action", 403)
return "", errors.New("Not authorized for this apiserver action")
}
log.Debug("Authentication Success")
return username, err
}
func IsValidStorageName(name string) bool {
_, ok := Pgo.Storage[name]
return ok
}
// ValidateNodeLabel
// returns error if node label is invalid based on format
func ValidateNodeLabel(nodeLabel string) error {
parts := strings.Split(nodeLabel, "=")
if len(parts) != 2 {
return errors.New(nodeLabel + " node label does not follow key=value format")
}
return nil
}
// UserIsPermittedInNamespace returns installation access and user access.
// Installation access means a namespace belongs to this Operator installation.
// User access means this user has access to a namespace.
func UserIsPermittedInNamespace(username, requestedNS string) (bool, bool, error) {
ctx := context.TODO()
var iAccess, uAccess bool
if err := ns.ValidateNamespacesWatched(Clientset, NamespaceOperatingMode(), InstallationName,
requestedNS); err != nil {
if !errors.Is(err, ns.ErrNamespaceNotWatched) {
return false, false, err
}
} else {
iAccess = true
}
if iAccess {
// get the pgouser Secret for this username
userSecretName := "pgouser-" + username
userSecret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, userSecretName, metav1.GetOptions{})
if err != nil {
log.Errorf("could not get pgouser secret %s: %s", username, err.Error())
return false, false, err
}
// handle the case of a user in pgouser with "" (all) namespaces, otherwise check the
// namespaces config in the user secret
nsstring := string(userSecret.Data["namespaces"])
if nsstring == "" {
uAccess = true
} else {
nsList := strings.Split(nsstring, ",")
for _, v := range nsList {
ns := strings.TrimSpace(v)
if ns == requestedNS {
uAccess = true
}
}
}
}
return iAccess, uAccess, nil
}
// WriteTLSCert is a legacy method that writes the server certificate and key to
// files from the PGOSecretName secret or generates a new key (writing to both
// the secret and the expected files
func WriteTLSCert(certPath, keyPath string) error {
ctx := context.TODO()
pgoSecret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, PGOSecretName, metav1.GetOptions{})
// if the TLS certificate secret is not found, attempt to generate one
if err != nil {
log.Infof("%s Secret NOT found in namespace %s", PGOSecretName, PgoNamespace)
if err := generateTLSCert(certPath, keyPath); err != nil {
log.Error("error generating pgo.tls Secret")
return err
}
return nil
}
// otherwise, write the TLS sertificate to the certificate and key path
log.Infof("%s Secret found in namespace %s", PGOSecretName, PgoNamespace)
log.Infof("cert key data len is %d", len(pgoSecret.Data[corev1.TLSCertKey]))
if err := ioutil.WriteFile(certPath, pgoSecret.Data[corev1.TLSCertKey], 0o600); err != nil {
return err
}
log.Infof("private key data len is %d", len(pgoSecret.Data[corev1.TLSPrivateKeyKey]))
if err := ioutil.WriteFile(keyPath, pgoSecret.Data[corev1.TLSPrivateKeyKey], 0o600); err != nil {
return err
}
return nil
}
// generateTLSCert generates a self signed cert and stores it in both
// the PGOSecretName Secret and certPath, keyPath files
func generateTLSCert(certPath, keyPath string) error {
ctx := context.TODO()
var err error
// generate private key
var privateKey *ecdsa.PrivateKey
privateKey, err = tlsutil.NewPrivateKey()
if err != nil {
fmt.Println(err.Error())
os.Exit(2)
}
privateKeyBytes := tlsutil.EncodePrivateKeyPEM(privateKey)
log.Debugf("generated privateKeyBytes len %d", len(privateKeyBytes))
var caCert *x509.Certificate
caCert, err = tlsutil.NewSelfSignedCACertificate(privateKey)
if err != nil {
fmt.Println(err.Error())
os.Exit(2)
}
caCertBytes := tlsutil.EncodeCertificatePEM(caCert)
log.Debugf("generated caCertBytes len %d", len(caCertBytes))
// CreateSecret
newSecret := corev1.Secret{}
newSecret.Name = PGOSecretName
newSecret.ObjectMeta.Labels = make(map[string]string)
newSecret.ObjectMeta.Labels[config.LABEL_VENDOR] = "crunchydata"
newSecret.Data = make(map[string][]byte)
newSecret.Data[corev1.TLSCertKey] = caCertBytes
newSecret.Data[corev1.TLSPrivateKeyKey] = privateKeyBytes
newSecret.Type = corev1.SecretTypeTLS
_, err = Clientset.CoreV1().Secrets(PgoNamespace).Create(ctx, &newSecret, metav1.CreateOptions{})
if err != nil {
fmt.Println(err.Error())
os.Exit(2)
}
if err := ioutil.WriteFile(certPath, newSecret.Data[corev1.TLSCertKey], 0o600); err != nil {
return err
}
if err := ioutil.WriteFile(keyPath, newSecret.Data[corev1.TLSPrivateKeyKey], 0o600); err != nil {
return err
}
return err
}
// setNamespaceOperatingMode set the namespace operating mode for the Operator by calling the
// proper utility function to determine which mode is applicable based on the current
// permissions assigned to the Operator Service Account.
func setNamespaceOperatingMode() error {
nsOpMode, err := ns.GetNamespaceOperatingMode(Clientset)
if err != nil {
return err
}
namespaceOperatingMode = nsOpMode
return nil
}
// setRandomPgouserPasswords looks through the pgouser secrets in the Operator's
// namespace. If any have an empty password, it generates a random password,
// Base64 encodes it, then stores it in the relevant PGO user's secret
func setRandomPgouserPasswords() {
ctx := context.TODO()
selector := "pgo-pgouser=true,vendor=crunchydata"
secrets, err := Clientset.CoreV1().Secrets(PgoNamespace).
List(ctx, metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Warnf("Could not get pgouser secrets in namespace: %s", PgoNamespace)
return
}
for _, secret := range secrets.Items {
// check if password is set. if it is, continue.
if len(secret.Data["password"]) > 0 {
continue
}
log.Infof("Password in pgouser secret %s for operator installation %s in namespace %s is empty. "+
"Setting a generated password.", secret.Name, InstallationName, PgoNamespace)
// generate the password using the default password length
generatedPassword, err := util.GeneratePassword(util.DefaultGeneratedPasswordLength)
if err != nil {
log.Errorf("Could not generate password for pgouser secret %s for operator installation %s in "+
"namespace %s", secret.Name, InstallationName, PgoNamespace)
continue
}
// create the password patch
patch, err := kubeapi.NewMergePatch().Add("stringData", "password")(generatedPassword).Bytes()
if err != nil {
log.Errorf("Could not generate password patch for pgouser secret %s for operator installation "+
"%s in namespace %s", secret.Name, InstallationName, PgoNamespace)
continue
}
// patch the pgouser secret with the new password
if _, err := Clientset.CoreV1().Secrets(PgoNamespace).Patch(ctx, secret.Name, types.MergePatchType,
patch, metav1.PatchOptions{}); err != nil {
log.Errorf("Could not patch pgouser secret %s with generated password for operator installation "+
"%s in namespace %s", secret.Name, InstallationName, PgoNamespace)
}
}
}
// NamespaceOperatingMode returns the namespace operating mode for the current Operator
// installation, which is stored in the "namespaceOperatingMode" variable
func NamespaceOperatingMode() ns.NamespaceOperatingMode {
return namespaceOperatingMode
}
| [
"\"PGO_OPERATOR_NAMESPACE\"",
"\"PGO_INSTALLATION_NAME\"",
"\"CRUNCHY_DEBUG\""
] | [] | [
"PGO_OPERATOR_NAMESPACE",
"PGO_INSTALLATION_NAME",
"CRUNCHY_DEBUG"
] | [] | ["PGO_OPERATOR_NAMESPACE", "PGO_INSTALLATION_NAME", "CRUNCHY_DEBUG"] | go | 3 | 0 | |
ML/DT/python/DTree_sklearn_titanic.py |
# coding: utf-8
# In[13]:
import sklearn_dt_utils as utils
from sklearn.tree import export_graphviz
import os
# In[14]:
q_src_dir = os.getenv('Q_SRC_ROOT')
if not q_src_dir:
print("'Q_SRC_ROOT' is not set")
exit(-1)
csv_file_path = "%s/ML/KNN/data/titanic/titanic_train.csv" % q_src_dir
csv_file_path = "/root/WORK/Q/ML/KNN/data/titanic/titanic_train.csv"
graphviz_gini = "graphviz_gini.txt"
graphviz_entropy = "graphviz_entropy.txt"
goal_col_name = "Survived"
split_ratio = 0.5
# In[15]:
print("Dataset shape")
data = utils.import_data(csv_file_path)
# In[16]:
X, Y, X_train, X_test, y_train, y_test = utils.split_dataset(data, goal_col_name, split_ratio)
# In[17]:
#print(len(X.columns))
# In[18]:
#print(len(data.columns))
# In[19]:
# cross validation
utils.cross_validate_dt_new(X, Y)
# In[20]:
# cross validation
# utils.cross_validate_dt(X, Y)
# In[21]:
# Train using gini
clf_gini = utils.train_using_gini(X_train, y_train)
# print(X_train[1])
export_graphviz(clf_gini, out_file=graphviz_gini, filled=True, rounded=True, special_characters=True, feature_names=X_train.columns)
# In[22]:
# Prediction using gini
y_pred_gini = utils.prediction(X_test, clf_gini)
print("Results for gini algo")
utils.cal_accuracy(y_test, y_pred_gini)
# In[23]:
# Train using entropy
clf_entropy = utils.tarin_using_entropy(X_train, y_train)
export_graphviz(clf_entropy, out_file=graphviz_entropy, filled=True, rounded=True, special_characters=True, feature_names=X_train.columns)
# In[24]:
# Prediction using entropy
y_pred_entropy = utils.prediction(X_test, clf_entropy)
print("Results for entropy algo")
utils.cal_accuracy(y_test, y_pred_entropy)
| [] | [] | [
"Q_SRC_ROOT"
] | [] | ["Q_SRC_ROOT"] | python | 1 | 0 | |
park_api/crossdomain.py | from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| [] | [] | [] | [] | [] | python | null | null | null |
pygraphblas/demo/dnn.py | import os
from functools import wraps, partial
from time import time
from statistics import mean
from pathlib import Path
from pygraphblas import *
from multiprocessing.pool import ThreadPool
from multiprocessing import cpu_count
NFEATURES = 60000
BIAS = {1024: -0.3, 4096: -0.35, 16384: -0.4, 65536: -0.45}
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print('func:%r took: %2.4f' % (f.__name__, te-ts))
return result
return wrap
@timing
def dnn(W, B, Y):
for w, b in zip(W, B):
Y = Y @ w
with plus_plus:
Y = Y @ b
Y = Y.select('>0')
M = Y.select('>', 32)
if len(M):
Y[M] = 32
return Y
@timing
def dnn2(W, B, Y):
for w, b in zip(W, B):
Y = Y.mxm(w, out=Y)
with plus_plus:
Y = Y.mxm(b, out=Y)
Y.select('>0', out=Y)
M = Y.select('>', 32)
if len(M):
Y[M] = 32
return Y
@timing
def load_images(neurons, dest):
fname = '{}/sparse-images-{}.{}'
binfile = fname.format(dest, neurons, 'ssb')
if Path(binfile).exists():
return Matrix.from_binfile(binfile.encode('ascii'))
images = Path(fname.format(dest, neurons, 'tsv'))
with images.open() as i:
m = Matrix.from_tsv(i, FP32, NFEATURES, neurons)
m.to_binfile(binfile.encode('ascii'))
return m
def load_categories(neurons, nlayers, dest):
fname = '{}/neuron{}-l{}-categories.tsv'
cats = Path(fname.format(dest, neurons, nlayers))
result = Vector.from_type(BOOL, NFEATURES)
with cats.open() as i:
for line in i.readlines():
result[int(line.strip())-1] = True
return result
def load_layer(i, dest):
fname = '{}/neuron{}/n{}-l{}.{}'
binfile = fname.format(dest, neurons, neurons, str(i+1), 'ssb')
if Path(binfile).exists():
return Matrix.from_binfile(binfile.encode('ascii'))
l = Path(fname.format(dest, neurons, neurons, str(i+1), 'tsv'))
with l.open() as f:
m = Matrix.from_tsv(f, FP32, neurons, neurons)
m.to_binfile(binfile.encode('ascii'))
return m
@timing
def generate_layers(neurons, nlayers, dest):
neurons = Path('{}/neuron{}'.format(dest, neurons))
with ThreadPool(cpu_count()) as pool:
return pool.map(partial(load_layer, dest=dest), range(nlayers))
@timing
def generate_bias(neurons, nlayers):
result = []
for i in range(nlayers):
bias = Matrix.from_type(FP32, neurons, neurons)
for i in range(neurons):
bias[i,i] = BIAS[neurons]
bias.nvals # causes async completion
result.append(bias)
return result
@timing
def run(neurons, images, layers, bias, dest):
result = dnn2(layers,
bias,
images)
r = result.reduce_vector()
cats = r.apply(lib.GxB_ONE_BOOL, out=Vector.from_type(BOOL, r.size))
truecats = load_categories(neurons, nlayers, dest)
assert cats == truecats
num_neurons = [1024, 4096, 16384, 65536]
num_layers = [120, 480, 1920]
if __name__ == '__main__':
dest = os.getenv('DEST')
neurons = os.getenv('NEURONS')
nlayers = os.getenv('NLAYERS')
if neurons and nlayers:
neurons = int(neurons)
nlayers = int(nlayers)
images = load_images(neurons, dest)
layers = generate_layers(neurons, nlayers, dest)
bias = generate_bias(neurons, nlayers)
run(neurons, images, layers, bias, dest)
else:
for neurons in num_neurons:
print('Building layers for %s neurons' % neurons)
layers = generate_layers(neurons, 1920, dest)
bias = generate_bias(neurons, 1920)
images = load_images(neurons, dest)
for nlayers in num_layers:
print('Benching %s neurons %s layers' % (neurons, nlayers))
run(neurons, images, layers[:nlayers], bias[:nlayers], dest)
| [] | [] | [
"NEURONS",
"NLAYERS",
"DEST"
] | [] | ["NEURONS", "NLAYERS", "DEST"] | python | 3 | 0 | |
pkg/operator/starter.go | package operator
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/eparis/bugzilla"
"github.com/openshift/library-go/pkg/controller/factory"
slackgo "github.com/slack-go/slack"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog"
"github.com/mfojtik/bugzilla-operator/pkg/cache"
"github.com/mfojtik/bugzilla-operator/pkg/operator/closecontroller"
"github.com/mfojtik/bugzilla-operator/pkg/operator/config"
"github.com/mfojtik/bugzilla-operator/pkg/operator/controller"
"github.com/mfojtik/bugzilla-operator/pkg/operator/firstteamcommentcontroller"
"github.com/mfojtik/bugzilla-operator/pkg/operator/newcontroller"
"github.com/mfojtik/bugzilla-operator/pkg/operator/reporters/blockers"
"github.com/mfojtik/bugzilla-operator/pkg/operator/reporters/closed"
"github.com/mfojtik/bugzilla-operator/pkg/operator/reporters/incoming"
"github.com/mfojtik/bugzilla-operator/pkg/operator/reporters/upcomingsprint"
"github.com/mfojtik/bugzilla-operator/pkg/operator/resetcontroller"
"github.com/mfojtik/bugzilla-operator/pkg/operator/stalecontroller"
"github.com/mfojtik/bugzilla-operator/pkg/slack"
"github.com/mfojtik/bugzilla-operator/pkg/slacker"
)
const bugzillaEndpoint = "https://bugzilla.redhat.com"
func Run(ctx context.Context, cfg config.OperatorConfig) error {
if len(cfg.CachePath) > 0 {
cache.Open(cfg.CachePath)
}
defer cache.Close()
slackClient := slackgo.New(cfg.Credentials.DecodedSlackToken(), slackgo.OptionDebug(true))
// This slack client is used for debugging
slackDebugClient := slack.NewChannelClient(slackClient, cfg.SlackAdminChannel, cfg.SlackAdminChannel, true)
// This slack client posts only to the admin channel
slackAdminClient := slack.NewChannelClient(slackClient, cfg.SlackAdminChannel, cfg.SlackAdminChannel, false)
recorder := slack.NewRecorder(slackAdminClient, "BugzillaOperator")
slackerInstance := slacker.NewSlacker(slackClient, slacker.Options{
ListenAddress: "0.0.0.0:3000",
VerificationToken: cfg.Credentials.DecodedSlackVerificationToken(),
})
slackerInstance.Command("say <message>", &slacker.CommandDefinition{
Description: "Say something.",
Handler: func(req slacker.Request, w slacker.ResponseWriter) {
msg := req.StringParam("message", "")
w.Reply(msg)
},
})
slackerInstance.DefaultCommand(func(req slacker.Request, w slacker.ResponseWriter) {
w.Reply("Unknown command")
})
recorder.Eventf("OperatorStarted", "Bugzilla Operator Started\n\n```\n%s\n```\n", spew.Sdump(cfg.Anonymize()))
kubeConfig, err := rest.InClusterConfig()
if err != nil {
return err
}
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
if err != nil {
return err
}
cmClient := kubeClient.CoreV1().ConfigMaps(os.Getenv("POD_NAMESPACE"))
controllerContext := controller.NewControllerContext(newBugzillaClient(&cfg, slackDebugClient), slackAdminClient, slackDebugClient, cmClient)
controllers := map[string]factory.Controller{
"stale": stalecontroller.NewStaleController(controllerContext, cfg, recorder),
"stale-reset": resetcontroller.NewResetStaleController(controllerContext, cfg, recorder),
"close-stale": closecontroller.NewCloseStaleController(controllerContext, cfg, recorder),
"first-team-comment": firstteamcommentcontroller.NewFirstTeamCommentController(controllerContext, cfg, recorder),
"new": newcontroller.NewNewBugController(controllerContext, cfg, recorder),
}
// TODO: enable by default
cfg.DisabledControllers = append(cfg.DisabledControllers, "NewBugController")
var scheduledReports []factory.Controller
reportNames := sets.NewString()
newReport := func(name string, ctx controller.ControllerContext, components, when []string) factory.Controller {
switch name {
case "blocker-bugs":
return blockers.NewBlockersReporter(ctx, components, when, cfg, recorder)
case "incoming-bugs":
return incoming.NewIncomingReporter(ctx, when, cfg, recorder)
case "closed-bugs":
return closed.NewClosedReporter(ctx, components, when, cfg, recorder)
case "upcoming-sprint":
return upcomingsprint.NewUpcomingSprintReporter(controllerContext, components, when, cfg, recorder)
default:
return nil
}
}
for _, ar := range cfg.Schedules {
slackChannelClient := slack.NewChannelClient(slackClient, ar.SlackChannel, cfg.SlackAdminChannel, false)
reporterContext := controller.NewControllerContext(newBugzillaClient(&cfg, slackDebugClient), slackChannelClient, slackDebugClient, cmClient)
for _, r := range ar.Reports {
if c := newReport(r, reporterContext, ar.Components, ar.When); c != nil {
scheduledReports = append(scheduledReports, c)
reportNames.Insert(r)
}
}
}
debugReportControllers := map[string]factory.Controller{}
for _, r := range reportNames.List() {
debugReportControllers[r] = newReport(r, controllerContext, cfg.Components.List(), nil)
}
controllerNames := sets.NewString()
for n := range controllers {
controllerNames.Insert(n)
}
// allow to manually trigger a controller to run out of its normal schedule
runJob := func(debug bool) func(req slacker.Request, w slacker.ResponseWriter) {
return func(req slacker.Request, w slacker.ResponseWriter) {
job := req.StringParam("job", "")
c, ok := controllers[job]
if !ok {
if !debug {
w.Reply(fmt.Sprintf("Unknown job %q", job))
return
}
if c, ok = debugReportControllers[job]; !ok {
w.Reply(fmt.Sprintf("Unknown job %q", job))
return
}
}
ctx := ctx // shadow global ctx
if debug {
ctx = context.WithValue(ctx, "debug", debug)
}
startTime := time.Now()
_, _, _, err := w.Client().SendMessage(req.Event().Channel,
slackgo.MsgOptionPostEphemeral(req.Event().User),
slackgo.MsgOptionText(fmt.Sprintf("Triggering job %q", job), false))
if err != nil {
klog.Error(err)
}
if err := c.Sync(ctx, factory.NewSyncContext(job, recorder)); err != nil {
recorder.Warningf("ReportError", "Job reported error: %v", err)
return
}
_, _, _, err = w.Client().SendMessage(req.Event().Channel,
slackgo.MsgOptionPostEphemeral(req.Event().User),
slackgo.MsgOptionText(fmt.Sprintf("Finished job %q after %v", job, time.Since(startTime)), false))
if err != nil {
klog.Error(err)
}
}
}
slackerInstance.Command("admin trigger <job>", &slacker.CommandDefinition{
Description: fmt.Sprintf("Trigger a job to run: %s", strings.Join(controllerNames.List(), ", ")),
Handler: auth(cfg, runJob(false), "group:admins"),
})
slackerInstance.Command("admin debug <job>", &slacker.CommandDefinition{
Description: fmt.Sprintf("Trigger a job to run in debug mode: %s", strings.Join(append(controllerNames.List(), reportNames.List()...), ", ")),
Handler: auth(cfg, runJob(true), "group:admins"),
})
slackerInstance.Command("report <job>", &slacker.CommandDefinition{
Description: fmt.Sprintf("Run a report and print result here: %s", strings.Join(reportNames.List(), ", ")),
Handler: func(req slacker.Request, w slacker.ResponseWriter) {
job := req.StringParam("job", "")
reports := map[string]func(ctx context.Context, client cache.BugzillaClient) (string, error){
"blocker-bugs": func(ctx context.Context, client cache.BugzillaClient) (string, error) {
// TODO: restrict components to one team
report, _, err := blockers.Report(ctx, client, recorder, &cfg, cfg.Components.List())
return report, err
},
"closed-bugs": func(ctx context.Context, client cache.BugzillaClient) (string, error) {
// TODO: restrict components to one team
return closed.Report(ctx, client, recorder, &cfg, cfg.Components.List())
},
"incoming-bugs": func(ctx context.Context, client cache.BugzillaClient) (string, error) {
// TODO: restrict components to one team
report, _, err := incoming.Report(ctx, client, recorder, &cfg)
return report, err
},
"upcoming-sprint": func(ctx context.Context, client cache.BugzillaClient) (string, error) {
// TODO: restrict components to one team
return upcomingsprint.Report(ctx, client, recorder, &cfg, cfg.Components.List())
},
// don't forget to also add new reports above in the trigger command
}
report, ok := reports[job]
if !ok {
w.Reply(fmt.Sprintf("Unknown report %q", job))
return
}
_, _, _, err := w.Client().SendMessage(req.Event().Channel,
slackgo.MsgOptionPostEphemeral(req.Event().User),
slackgo.MsgOptionText(fmt.Sprintf("Running job %q. This might take some seconds.", job), false))
if err != nil {
klog.Error(err)
}
reply, err := report(context.TODO(), newBugzillaClient(&cfg, slackDebugClient)(true)) // report should never write anything to BZ
if err != nil {
_, _, _, err := w.Client().SendMessage(req.Event().Channel,
slackgo.MsgOptionPostEphemeral(req.Event().User),
slackgo.MsgOptionText(fmt.Sprintf("Error running report %v: %v", job, err), false))
if err != nil {
klog.Error(err)
}
} else {
w.Reply(reply)
}
},
})
seen := []string{}
disabled := sets.NewString(cfg.DisabledControllers...)
var all []factory.Controller
for _, c := range controllers {
all = append(all, c)
}
for _, c := range append(all, scheduledReports...) {
seen = append(seen, c.Name())
if disabled.Has(c.Name()) {
continue
}
go c.Run(ctx, 1)
}
go slackerInstance.Run(ctx)
// sanity check list of disabled controllers
unknown := disabled.Difference(sets.NewString(seen...))
if unknown.Len() > 0 {
msg := fmt.Sprintf("Unknown disabled controllers in config: %v", unknown.List())
klog.Warning(msg)
slackAdminClient.MessageAdminChannel(msg)
}
<-ctx.Done()
return nil
}
func newBugzillaClient(cfg *config.OperatorConfig, slackDebugClient slack.ChannelClient) func(debug bool) cache.BugzillaClient {
return func(debug bool) cache.BugzillaClient {
c := cache.NewCachedBugzillaClient(bugzilla.NewClient(func() []byte {
return []byte(cfg.Credentials.DecodedAPIKey())
}, bugzillaEndpoint).WithCGIClient(cfg.Credentials.DecodedUsername(), cfg.Credentials.DecodedPassword()))
if debug {
return &loggingReadOnlyClient{delegate: c, slackLoggingClient: slackDebugClient}
}
return c
}
}
| [
"\"POD_NAMESPACE\""
] | [] | [
"POD_NAMESPACE"
] | [] | ["POD_NAMESPACE"] | go | 1 | 0 | |
silent.go | // +build silent
package main
import (
"io/ioutil"
"log"
"os"
)
func init() {
verbose := os.Getenv("GOCHATBOT_VERBOSE")
if verbose != "1" {
log.SetOutput(ioutil.Discard)
}
}
| [
"\"GOCHATBOT_VERBOSE\""
] | [] | [
"GOCHATBOT_VERBOSE"
] | [] | ["GOCHATBOT_VERBOSE"] | go | 1 | 0 | |
server.go | package main
import (
"log"
"os"
"github.com/ansrivas/fiberprometheus/v2"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/limiter"
"github.com/gofiber/fiber/v2/middleware/logger"
"github.com/gofiber/fiber/v2/middleware/requestid"
"github.com/gofiber/template/html"
)
func initServer() *fiber.App {
engine := html.New("./views", ".html")
app := fiber.New(fiber.Config{
Views: engine,
})
// This here will appear as a label, one can also use
// fiberprometheus.NewWith(servicename, namespace, subsystem )
prometheus := fiberprometheus.New("hello-world-service")
prometheus.RegisterAt(app, "/metrics")
app.Use(prometheus.Middleware)
app.Static("/static", "./public")
app.Use(logger.New())
app.Use(limiter.New())
app.Use(requestid.New())
app.Get("/text", func(c *fiber.Ctx) error {
return c.SendString("Hello, World!")
})
app.Get("/", func(c *fiber.Ctx) error {
return c.Render("index", fiber.Map{
"Title": "Hello World",
})
})
app.Use(func(c *fiber.Ctx) error {
return c.Status(fiber.StatusNotFound).SendString("Sorry can't find that!")
})
return app
}
func main() {
port := os.Getenv("PORT")
if os.Getenv("PORT") == "" {
port = "3000"
}
app := initServer()
log.Fatal(app.Listen(":" + port))
}
| [
"\"PORT\"",
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
testing/scripts/run_android_wpt.py | #!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Web Platform Tests (WPT) on Android browsers.
This script supports running tests on the Chromium Waterfall by mapping isolated
script flags to WPT flags.
It is also useful for local reproduction by performing APK installation and
configuring the browser to resolve test hosts. Be sure to invoke this
executable directly rather than using python run_android_wpt.py so that
WPT dependencies in Chromium vpython are found.
If you need more advanced test control, please use the runner located at
//third_party/blink/web_tests/external/wpt/wpt.
Here's the mapping [isolate script flag] : [wpt flag]
--isolated-script-test-output : --log-chromium
--total-shards : --total-chunks
--shard-index : -- this-chunk
"""
# TODO(aluo): Combine or factor out commons parts with run_wpt_tests.py script.
import argparse
import contextlib
import json
import logging
import os
import shutil
import sys
import common
import wpt_common
logger = logging.getLogger(__name__)
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
BUILD_ANDROID = os.path.join(SRC_DIR, 'build', 'android')
BLINK_TOOLS_DIR = os.path.join(
SRC_DIR, 'third_party', 'blink', 'tools')
CATAPULT_DIR = os.path.join(SRC_DIR, 'third_party', 'catapult')
DEFAULT_WPT = os.path.join(wpt_common.WEB_TESTS_DIR, 'external', 'wpt', 'wpt')
PYUTILS = os.path.join(CATAPULT_DIR, 'common', 'py_utils')
if PYUTILS not in sys.path:
sys.path.append(PYUTILS)
if BLINK_TOOLS_DIR not in sys.path:
sys.path.append(BLINK_TOOLS_DIR)
if BUILD_ANDROID not in sys.path:
sys.path.append(BUILD_ANDROID)
import devil_chromium
from blinkpy.web_tests.port.android import (
PRODUCTS, PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_WEBLAYER,
ANDROID_WEBVIEW, CHROME_ANDROID, ANDROID_DISABLED_TESTS)
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_utils
from devil.android.tools import system_app
from devil.android.tools import webview_app
from py_utils.tempfile_ext import NamedTemporaryDirectory
class PassThroughArgs(argparse.Action):
pass_through_args = []
def __call__(self, parser, namespace, values, option_string=None):
if option_string:
if self.nargs == 0:
self.add_unique_pass_through_arg(option_string)
elif self.nargs is None:
self.add_unique_pass_through_arg('{}={}'.format(option_string, values))
else:
raise ValueError("nargs {} not supported: {} {}".format(
self.nargs, option_string, values))
@classmethod
def add_unique_pass_through_arg(cls, arg):
if arg not in cls.pass_through_args:
cls.pass_through_args.append(arg)
def _get_adapter(device):
usage = '%(prog)s --product={' + ','.join(PRODUCTS) + '} ...'
product_parser = argparse.ArgumentParser(
add_help=False, prog='run_android_wpt.py', usage=usage)
product_parser.add_argument(
'--product', action='store', required=True, choices=PRODUCTS)
options, _ = product_parser.parse_known_args()
product = options.product
if product == ANDROID_WEBLAYER:
return WPTWeblayerAdapter(device)
elif product == ANDROID_WEBVIEW:
return WPTWebviewAdapter(device)
else:
return WPTClankAdapter(device)
class WPTAndroidAdapter(wpt_common.BaseWptScriptAdapter):
def __init__(self, device):
self.pass_through_wpt_args = []
self.pass_through_binary_args = []
self._metadata_dir = None
self._device = device
super(WPTAndroidAdapter, self).__init__()
# Arguments from add_extra_argumentsparse were added so
# its safe to parse the arguments and set self._options
self.parse_args()
@property
def rest_args(self):
rest_args = super(WPTAndroidAdapter, self).rest_args
# Here we add all of the arguments required to run WPT tests on Android.
rest_args.extend([self.options.wpt_path])
# vpython has packages needed by wpt, so force it to skip the setup
rest_args.extend(["--venv=../../", "--skip-venv-setup"])
rest_args.extend(["run",
"--test-type=" + self.options.test_type,
"--webdriver-binary",
self.options.webdriver_binary,
"--headless",
"--no-pause-after-test",
"--no-capture-stdio",
"--no-manifest-download",
])
# if metadata was created then add the metadata directory
# to the list of wpt arguments
if self._metadata_dir:
rest_args.extend(['--metadata', self._metadata_dir])
if self.options.verbose >= 3:
rest_args.extend(["--log-mach=-", "--log-mach-level=debug",
"--log-mach-verbose"])
if self.options.verbose >= 4:
rest_args.extend(['--webdriver-arg=--verbose',
'--webdriver-arg="--log-path=-"'])
rest_args.extend(self.pass_through_wpt_args)
return rest_args
def _extra_metadata_builder_args(self):
raise NotImplementedError
def _maybe_build_metadata(self):
metadata_builder_cmd = [
sys.executable,
os.path.join(wpt_common.BLINK_TOOLS_DIR, 'build_wpt_metadata.py'),
'--android-product',
self.options.product,
'--ignore-default-expectations',
'--metadata-output-dir',
self._metadata_dir,
'--additional-expectations',
ANDROID_DISABLED_TESTS,
]
metadata_builder_cmd.extend(self._extra_metadata_builder_args())
return common.run_command(metadata_builder_cmd)
def run_test(self):
with NamedTemporaryDirectory() as self._metadata_dir, self._install_apks():
metadata_command_ret = self._maybe_build_metadata()
if metadata_command_ret != 0:
return metadata_command_ret
return super(WPTAndroidAdapter, self).run_test()
def _install_apks(self):
raise NotImplementedError
def clean_up_after_test_run(self):
# Avoid having a dangling reference to the temp directory
# which was deleted
self._metadata_dir = None
def add_extra_arguments(self, parser):
# TODO: |pass_through_args| are broke and need to be supplied by way of
# --binary-arg".
class BinaryPassThroughArgs(PassThroughArgs):
pass_through_args = self.pass_through_binary_args
class WPTPassThroughArgs(PassThroughArgs):
pass_through_args = self.pass_through_wpt_args
# Add this so that product argument does not go in self._rest_args
# when self.parse_args() is called
parser.add_argument('--product', help=argparse.SUPPRESS)
parser.add_argument('--webdriver-binary', required=True,
help='Path of the webdriver binary. It needs to have'
' the same major version as the apk.')
parser.add_argument('--wpt-path', default=DEFAULT_WPT,
help='Controls the path of the WPT runner to use'
' (therefore tests). Defaults the revision rolled into'
' Chromium.')
parser.add_argument('--test-type', default='testharness',
help='Specify to experiment with other test types.'
' Currently only the default is expected to work.')
parser.add_argument('--verbose', '-v', action='count',
help='Verbosity level.')
parser.add_argument('--include', metavar='TEST_OR_DIR',
action=WPTPassThroughArgs,
help='Test(s) to run, defaults to run all tests.')
parser.add_argument('--list-tests', action=WPTPassThroughArgs, nargs=0,
help="Don't run any tests, just print out a list of"
' tests that would be run.')
parser.add_argument('--webdriver-arg', action=WPTPassThroughArgs,
help='WebDriver args.')
parser.add_argument('--log-wptreport', metavar='WPT_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log wptreport with subtest details.")
parser.add_argument('--log-raw', metavar='RAW_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log raw report.")
parser.add_argument('--log-html', metavar='HTML_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log html report.")
parser.add_argument('--log-xunit', metavar='XUNIT_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log xunit report.")
parser.add_argument('--enable-features', action=BinaryPassThroughArgs,
help='Chromium features to enable during testing.')
parser.add_argument('--disable-features', action=BinaryPassThroughArgs,
help='Chromium features to disable during testing.')
parser.add_argument('--disable-field-trial-config',
action=BinaryPassThroughArgs,
help='Disable test trials for Chromium features.')
parser.add_argument('--force-fieldtrials', action=BinaryPassThroughArgs,
help='Force trials for Chromium features.')
parser.add_argument('--force-fieldtrial-params',
action=BinaryPassThroughArgs,
help='Force trial params for Chromium features.')
class WPTWeblayerAdapter(WPTAndroidAdapter):
WEBLAYER_SHELL_PKG = 'org.chromium.weblayer.shell'
WEBLAYER_SUPPORT_PKG = 'org.chromium.weblayer.support'
@contextlib.contextmanager
def _install_apks(self):
install_weblayer_shell_as_needed = maybe_install_user_apk(
self._device, self.options.weblayer_shell, self.WEBLAYER_SHELL_PKG)
install_weblayer_support_as_needed = maybe_install_user_apk(
self._device, self.options.weblayer_support, self.WEBLAYER_SUPPORT_PKG)
install_webview_provider_as_needed = maybe_install_webview_provider(
self._device, self.options.webview_provider)
with install_weblayer_shell_as_needed, \
install_weblayer_support_as_needed, \
install_webview_provider_as_needed:
yield
def _extra_metadata_builder_args(self):
return [
'--additional-expectations',
PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER]]
def add_extra_arguments(self, parser):
super(WPTWeblayerAdapter, self).add_extra_arguments(parser)
parser.add_argument('--weblayer-shell',
help='WebLayer Shell apk to install.')
parser.add_argument('--weblayer-support',
help='WebLayer Support apk to install.')
parser.add_argument('--webview-provider',
help='Webview provider apk to install.')
@property
def rest_args(self):
args = super(WPTWeblayerAdapter, self).rest_args
args.append(ANDROID_WEBLAYER)
return args
class WPTWebviewAdapter(WPTAndroidAdapter):
SYSTEM_WEBVIEW_SHELL_PKG = 'org.chromium.webview_shell'
@contextlib.contextmanager
def _install_apks(self):
install_shell_as_needed = maybe_install_user_apk(
self._device, self.options.system_webview_shell,
self.SYSTEM_WEBVIEW_SHELL_PKG)
install_webview_provider_as_needed = maybe_install_webview_provider(
self._device, self.options.webview_provider)
with install_shell_as_needed, install_webview_provider_as_needed:
yield
def _extra_metadata_builder_args(self):
return [
'--additional-expectations',
PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBVIEW]]
def add_extra_arguments(self, parser):
super(WPTWebviewAdapter, self).add_extra_arguments(parser)
parser.add_argument('--system-webview-shell',
help=('System WebView Shell apk to install. If not '
'specified then the on-device WebView apk '
'will be used.'))
parser.add_argument('--webview-provider',
help='Webview provider APK to install.')
@property
def rest_args(self):
args = super(WPTWebviewAdapter, self).rest_args
args.append(ANDROID_WEBVIEW)
return args
class WPTClankAdapter(WPTAndroidAdapter):
@contextlib.contextmanager
def _install_apks(self):
install_clank_as_needed = maybe_install_user_apk(
self._device, self.options.chrome_apk)
with install_clank_as_needed:
yield
def _extra_metadata_builder_args(self):
return [
'--additional-expectations',
PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID]]
def add_extra_arguments(self, parser):
super(WPTClankAdapter, self).add_extra_arguments(parser)
parser.add_argument(
'--chrome-apk', help='Chrome apk to install.')
parser.add_argument(
'--chrome-package-name',
help=('The package name of Chrome to test,'
' defaults to that of the compiled Chrome apk.'))
@property
def rest_args(self):
args = super(WPTClankAdapter, self).rest_args
if not self.options.chrome_package_name and not self.options.chrome_apk:
raise Exception('Either the --chrome-package-name or --chrome-apk '
'command line arguments must be used.')
if not self.options.chrome_package_name:
self.options.chrome_package_name = apk_helper.GetPackageName(
self.options.chrome_apk)
logger.info("Using Chrome apk's default package %s." %
self.options.chrome_package_name)
args.extend(['--package-name', self.options.chrome_package_name])
# add the product postional argument
args.append(CHROME_ANDROID)
return args
def maybe_install_webview_provider(device, apk):
if apk:
logger.info('Will install WebView apk at ' + apk)
return webview_app.UseWebViewProvider(device, apk)
else:
return no_op()
def maybe_install_user_apk(device, apk, expected_pkg=None):
"""contextmanager to install apk on device.
Args:
device: DeviceUtils instance on which to install the apk.
apk: Apk file path on host.
expected_pkg: Optional, check that apk's package name matches.
Returns:
If apk evaluates to false, returns a do-nothing contextmanager.
Otherwise, returns a contextmanager to install apk on device.
"""
if apk:
pkg = apk_helper.GetPackageName(apk)
if expected_pkg and pkg != expected_pkg:
raise ValueError('{} has incorrect package name: {}, expected {}.'.format(
apk, pkg, expected_pkg))
install_as_needed = app_installed(device, apk)
logger.info('Will install ' + pkg + ' at ' + apk)
else:
install_as_needed = no_op()
return install_as_needed
@contextlib.contextmanager
def app_installed(device, apk):
pkg = apk_helper.GetPackageName(apk)
device.Install(apk)
try:
yield
finally:
device.Uninstall(pkg)
# Dummy contextmanager to simplify multiple optional managers.
@contextlib.contextmanager
def no_op():
yield
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
def main():
devil_chromium.Initialize()
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
logger.error('There are no devices attached to this host. Exiting script.')
return 1
# Only 1 device is supported for Android locally, this will work well with
# sharding support via swarming infra.
device = devices[0]
adapter = _get_adapter(device)
if adapter.options.verbose:
if adapter.options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
# WPT setup for chrome and webview requires that PATH contains adb.
platform_tools_path = os.path.dirname(devil_env.config.FetchPath('adb'))
os.environ['PATH'] = ':'.join([platform_tools_path] +
os.environ['PATH'].split(':'))
return adapter.run_test()
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger()
sys.exit(main())
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
algo/02-compare-the-triplets/main.go | // https://www.hackerrank.com/challenges/compare-the-triplets/problem
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// Complete the compareTriplets function below.
func compareTriplets(a []int32, b []int32) []int32 {
var res = []int32{0, 0}
for i := 0; i < 3; i++ {
switch {
case a[i] > b[i]:
res[0]++
case a[i] < b[i]:
res[1]++
default:
// do nothing
}
}
return res
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 16*1024*1024)
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
checkError(err)
defer stdout.Close()
writer := bufio.NewWriterSize(stdout, 16*1024*1024)
aTemp := strings.Split(strings.TrimSpace(readLine(reader)), " ")
var a []int32
for i := 0; i < 3; i++ {
aItemTemp, err := strconv.ParseInt(aTemp[i], 10, 64)
checkError(err)
aItem := int32(aItemTemp)
a = append(a, aItem)
}
bTemp := strings.Split(strings.TrimSpace(readLine(reader)), " ")
var b []int32
for i := 0; i < 3; i++ {
bItemTemp, err := strconv.ParseInt(bTemp[i], 10, 64)
checkError(err)
bItem := int32(bItemTemp)
b = append(b, bItem)
}
result := compareTriplets(a, b)
for i, resultItem := range result {
fmt.Fprintf(writer, "%d", resultItem)
if i != len(result)-1 {
fmt.Fprintf(writer, " ")
}
}
fmt.Fprintf(writer, "\n")
writer.Flush()
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
| [
"\"OUTPUT_PATH\""
] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | go | 1 | 0 | |
pkg/joinserver/errors.go | // Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package joinserver
import "go.thethings.network/lorawan-stack/pkg/errors"
var (
errCheckMIC = errors.Define("check_mic", "MIC check failed")
errComputeMIC = errors.DefineInvalidArgument("compute_mic", "failed to compute MIC")
errDecodePayload = errors.DefineInvalidArgument("decode_payload", "failed to decode payload")
errDeriveAppSKey = errors.Define("derive_app_s_key", "failed to derive application session key")
errDeriveNwkSKeys = errors.Define("derive_nwk_s_keys", "failed to derive network session keys")
errDevNonceTooHigh = errors.DefineInvalidArgument("dev_nonce_too_high", "DevNonce is too high")
errDevNonceTooSmall = errors.DefineInvalidArgument("dev_nonce_too_small", "DevNonce is too small")
errDuplicateIdentifiers = errors.DefineAlreadyExists("duplicate_identifiers", "a device identified by the identifiers already exists")
errEncodePayload = errors.DefineInvalidArgument("encode_payload", "failed to encode payload")
errEncryptPayload = errors.Define("encrypt_payload", "failed to encrypt JoinAccept")
errEndDeviceRequest = errors.DefineInvalidArgument("end_device_request", "GetEndDeviceRequest is invalid")
errForwardJoinRequest = errors.Define("forward_join_request", "failed to forward JoinRequest")
errGenerateSessionKeyID = errors.Define("generate_session_key_id", "failed to generate session key ID")
errDeviceNotFound = errors.DefineNotFound("device_not_found", "device not found")
errInvalidIdentifiers = errors.DefineInvalidArgument("invalid_identifiers", "invalid identifiers")
errJoinNonceTooHigh = errors.Define("join_nonce_too_high", "JoinNonce is too high")
errMICMismatch = errors.DefineInvalidArgument("mic_mismatch", "MIC mismatch")
errNetIDMismatch = errors.DefineInvalidArgument("net_id_mismatch", "NetID `{net_id}` does not match")
errNoAppKey = errors.DefineCorruption("no_app_key", "no AppKey specified")
errNoAppSKey = errors.DefineCorruption("no_app_s_key", "no AppSKey specified")
errNoDevAddr = errors.DefineCorruption("no_dev_addr", "no DevAddr specified")
errNoDevEUI = errors.DefineInvalidArgument("no_dev_eui", "no DevEUI specified")
errNoFNwkSIntKey = errors.DefineCorruption("no_f_nwk_s_int_key", "no FNwkSIntKey specified")
errNoJoinEUI = errors.DefineInvalidArgument("no_join_eui", "no JoinEUI specified")
errNoJoinRequest = errors.DefineInvalidArgument("no_join_request", "no JoinRequest specified")
errNoNwkKey = errors.DefineCorruption("no_nwk_key", "no NwkKey specified")
errNoNwkSEncKey = errors.DefineCorruption("no_nwk_s_enc_key", "no NwkSEncKey specified")
errNoNetID = errors.DefineFailedPrecondition("no_net_id", "no NetID specified")
errNoPayload = errors.DefineInvalidArgument("no_payload", "no message payload specified")
errNoRootKeys = errors.DefineCorruption("no_root_keys", "no root keys specified")
errNoSNwkSIntKey = errors.DefineCorruption("no_s_nwk_s_int_key", "no SNwkSIntKey specified")
errPayloadLengthMismatch = errors.DefineInvalidArgument("payload_length", "expected length of payload to be equal to 23 got {length}")
errProvisionerNotFound = errors.DefineNotFound("provisioner_not_found", "provisioner `{id}` not found")
errProvisionerDecode = errors.Define("provisioner_decode", "failed to decode provisioning data")
errProvisionEntryCount = errors.DefineInvalidArgument("provision_entry_count", "expected `{expected}` but have `{actual}` entries to provision")
errProvisioning = errors.DefineAborted("provisioning", "provisioning failed")
errRegistryOperation = errors.DefineInternal("registry_operation", "registry operation failed")
errReuseDevNonce = errors.DefineInvalidArgument("reuse_dev_nonce", "DevNonce has already been used")
errUnauthenticated = errors.DefineUnauthenticated("unauthenticated", "unauthenticated")
errAddressNotAuthorized = errors.DefinePermissionDenied("address_not_authorized", "address `{address}` is not authorized for the entity")
errUnknownAppEUI = errors.Define("unknown_app_eui", "AppEUI specified is not known")
errUnsupportedLoRaWANVersion = errors.DefineInvalidArgument("lorawan_version", "unsupported LoRaWAN version: {version}", "version")
errWrongPayloadType = errors.DefineInvalidArgument("payload_type", "wrong payload type: {type}")
)
| [] | [] | [] | [] | [] | go | null | null | null |
cmd/minikube/cmd/root.go | /*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/util/templates"
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/audit"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/translate"
)
var dirs = [...]string{
localpath.MiniPath(),
localpath.MakeMiniPath("certs"),
localpath.MakeMiniPath("machines"),
localpath.MakeMiniPath("cache"),
localpath.MakeMiniPath("cache", "iso"),
localpath.MakeMiniPath("config"),
localpath.MakeMiniPath("addons"),
localpath.MakeMiniPath("files"),
localpath.MakeMiniPath("logs"),
}
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "minikube",
Short: "minikube quickly sets up a local Kubernetes cluster",
Long: `minikube provisions and manages local Kubernetes clusters optimized for development workflows.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
for _, path := range dirs {
if err := os.MkdirAll(path, 0o777); err != nil {
exit.Error(reason.HostHomeMkdir, "Error creating minikube directory", err)
}
}
userName := viper.GetString(config.UserFlag)
if !validateUsername(userName) {
out.WarningT("User name '{{.username}}' is not valid", out.V{"username": userName})
exit.Message(reason.Usage, "User name must be 60 chars or less.")
}
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
defer audit.Log(time.Now())
// Check whether this is a windows binary (.exe) running inisde WSL.
if runtime.GOOS == "windows" && detect.IsMicrosoftWSL() {
var found = false
for _, a := range os.Args {
if a == "--force" {
found = true
break
}
}
if !found {
exit.Message(reason.WrongBinaryWSL, "You are trying to run windows .exe binary inside WSL, for better integration please use Linux binary instead (Download at https://minikube.sigs.k8s.io/docs/start/.). Otherwise if you still want to do this, you can do it using --force")
}
}
_, callingCmd := filepath.Split(os.Args[0])
if callingCmd == "kubectl" {
// If the user is using the minikube binary as kubectl, allow them to specify the kubectl context without also specifying minikube profile
profile := ""
for i, a := range os.Args {
if a == "--context" {
profile = fmt.Sprintf("--profile=%s", os.Args[i+1])
break
} else if strings.HasPrefix(a, "--context=") {
context := strings.Split(a, "=")[1]
profile = fmt.Sprintf("--profile=%s", context)
break
}
}
if profile != "" {
os.Args = append([]string{RootCmd.Use, callingCmd, profile, "--"}, os.Args[1:]...)
} else {
os.Args = append([]string{RootCmd.Use, callingCmd, "--"}, os.Args[1:]...)
}
}
for _, c := range RootCmd.Commands() {
c.Short = translate.T(c.Short)
c.Long = translate.T(c.Long)
c.Flags().VisitAll(func(f *pflag.Flag) {
f.Usage = translate.T(f.Usage)
})
c.SetUsageTemplate(usageTemplate())
}
RootCmd.Short = translate.T(RootCmd.Short)
RootCmd.Long = translate.T(RootCmd.Long)
RootCmd.Flags().VisitAll(func(f *pflag.Flag) {
f.Usage = translate.T(f.Usage)
})
if runtime.GOOS != "windows" {
// add minikube binaries to the path
targetDir := localpath.MakeMiniPath("bin")
addToPath(targetDir)
}
// Universally ensure that we never speak to the wrong DOCKER_HOST
if err := oci.PointToHostDockerDaemon(); err != nil {
klog.Errorf("oci env: %v", err)
}
if err := oci.PointToHostPodman(); err != nil {
klog.Errorf("oci env: %v", err)
}
if err := RootCmd.Execute(); err != nil {
// Cobra already outputs the error, typically because the user provided an unknown command.
os.Exit(reason.ExProgramUsage)
}
}
// usageTemplate just calls translate.T on the default usage template
// explicitly using the raw string instead of calling c.UsageTemplate()
// so the extractor can find this monstrosity of a string
func usageTemplate() string {
return fmt.Sprintf(`%s:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
%s:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
%s:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
%s:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
%s:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
%s:
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
%s:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
%s{{end}}
`, translate.T("Usage"), translate.T("Aliases"), translate.T("Examples"), translate.T("Available Commands"), translate.T("Flags"), translate.T("Global Flags"), translate.T("Additional help topics"), translate.T(`Use "{{.CommandPath}} [command] --help" for more information about a command.`))
}
func init() {
klog.InitFlags(nil)
// preset logtostderr and alsologtostderr only for test runs, for normal runs consider flags in main()
if strings.HasPrefix(filepath.Base(os.Args[0]), "e2e-") || strings.HasSuffix(os.Args[0], "test") {
if err := flag.Set("logtostderr", "false"); err != nil {
klog.Warningf("Unable to set default flag value for logtostderr: %v", err)
}
if err := flag.Set("alsologtostderr", "false"); err != nil {
klog.Warningf("Unable to set default flag value for alsologtostderr: %v", err)
}
}
pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // avoid `generate-docs_test.go` complaining about "Docs are not updated"
RootCmd.PersistentFlags().StringP(config.ProfileName, "p", constants.DefaultClusterName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", "kubeadm", "The name of the cluster bootstrapper that will set up the Kubernetes cluster.")
RootCmd.PersistentFlags().String(config.UserFlag, "", "Specifies the user executing the operation. Useful for auditing operations executed by 3rd party tools. Defaults to the operating system username.")
groups := templates.CommandGroups{
{
Message: translate.T("Basic Commands:"),
Commands: []*cobra.Command{
startCmd,
statusCmd,
stopCmd,
deleteCmd,
dashboardCmd,
pauseCmd,
unpauseCmd,
},
},
{
Message: translate.T("Images Commands:"),
Commands: []*cobra.Command{
dockerEnvCmd,
podmanEnvCmd,
cacheCmd,
imageCmd,
},
},
{
Message: translate.T("Configuration and Management Commands:"),
Commands: []*cobra.Command{
configCmd.AddonsCmd,
configCmd.ConfigCmd,
configCmd.ProfileCmd,
updateContextCmd,
},
},
{
Message: translate.T("Networking and Connectivity Commands:"),
Commands: []*cobra.Command{
serviceCmd,
tunnelCmd,
},
},
{
Message: translate.T("Advanced Commands:"),
Commands: []*cobra.Command{
mountCmd,
sshCmd,
kubectlCmd,
nodeCmd,
},
},
{
Message: translate.T("Troubleshooting Commands:"),
Commands: []*cobra.Command{
sshKeyCmd,
sshHostCmd,
ipCmd,
logsCmd,
updateCheckCmd,
versionCmd,
optionsCmd,
},
},
}
groups.Add(RootCmd)
// Ungrouped commands will show up in the "Other Commands" section
RootCmd.AddCommand(completionCmd)
templates.ActsAsRootCommand(RootCmd, []string{"options"}, groups...)
if err := viper.BindPFlags(RootCmd.PersistentFlags()); err != nil {
exit.Error(reason.InternalBindFlags, "Unable to bind flags", err)
}
translate.DetermineLocale()
cobra.OnInitialize(initConfig)
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
configPath := localpath.ConfigFile()
viper.SetConfigFile(configPath)
viper.SetConfigType("json")
if err := viper.ReadInConfig(); err != nil {
// This config file is optional, so don't emit errors if missing
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
klog.Warningf("Error reading config file at %s: %v", configPath, err)
}
}
setupViper()
}
func setupViper() {
viper.SetEnvPrefix(minikubeEnvPrefix)
// Replaces '-' in flags with '_' in env variables
// e.g. iso-url => $ENVPREFIX_ISO_URL
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
viper.SetDefault(config.WantUpdateNotification, true)
viper.SetDefault(config.ReminderWaitPeriodInHours, 24)
viper.SetDefault(config.WantReportError, false)
viper.SetDefault(config.WantReportErrorPrompt, true)
viper.SetDefault(config.WantKubectlDownloadMsg, true)
viper.SetDefault(config.WantNoneDriverWarning, true)
viper.SetDefault(config.ShowDriverDeprecationNotification, true)
viper.SetDefault(config.ShowBootstrapperDeprecationNotification, true)
}
func addToPath(dir string) {
new := fmt.Sprintf("%s:%s", dir, os.Getenv("PATH"))
klog.Infof("Updating PATH: %s", dir)
os.Setenv("PATH", new)
}
func validateUsername(name string) bool {
return len(name) <= 60
}
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
Bot/__init__.py | #!/usr/bin/env python3
# This is bot coded by FoxmanTech and used for educational purposes only
# https://github.com/samebanezar001
# Copyright FOXMANTECH
# Thank you https://github.com/pyrogram/pyrogram
import os
import logging
logging.basicConfig(level=logging.INFO,
handlers=[logging.FileHandler('log.txt'), logging.StreamHandler()],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
LOGGER = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
ENV = bool(os.environ.get('ENV', False))
try:
if ENV:
AUTH_USER = []
BOT_TOKEN = os.environ.get('BOT_TOKEN')
APP_ID = os.environ.get('APP_ID')
API_HASH = os.environ.get('API_HASH')
API_KEY = os.environ.get('API_KEY')
API_EMAIL = os.environ.get('API_EMAIL')
GET_AUTH_USER = os.environ.get('AUTH_USER')
for i in GET_AUTH_USER.split(','):
AUTH_USER.append(int(i))
print(AUTH_USER)
else:
from sample_config import Config
BOT_TOKEN = Config.BOT_TOKEN
APP_ID = Config.APP_ID
API_HASH = Config.API_HASH
API_KEY = Config.API_KEY
API_EMAIL = Config.API_EMAIL
AUTH_USER = Config.AUTH_USERS
print(AUTH_USER)
except KeyError:
LOGGER.error('One or more configuration values are missing exiting now.')
exit(1)
class Msg:
source = "\nsource:https://github.com/samebanezar001/File-Link-Bot"
start = "\n<b>This bot uploads telegram files to MixDrop.co,File.io.\nAdmin: @Foxmantech002</b>"
error = "something is went wrong\n{error} \ncontact admin @Foxmantech002"
help = "Usage: <b>Send any file and the bot will upload it to MixDrop.co,File.io</b>"
| [] | [] | [
"AUTH_USER",
"API_KEY",
"ENV",
"BOT_TOKEN",
"APP_ID",
"API_EMAIL",
"API_HASH"
] | [] | ["AUTH_USER", "API_KEY", "ENV", "BOT_TOKEN", "APP_ID", "API_EMAIL", "API_HASH"] | python | 7 | 0 | |
Source/Git/wb_git_callback_client_win32.py | '''
====================================================================
Copyright (c) 2016-2017 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_git_callback_client_win32.py
called with argv[1:] as the prompt
expects a single line output as response.
'''
import sys
import os
import ctypes
class WbGitcallback:
def __init__( self ):
self.pipe_name = r'\\.\pipe\SCM Workbench GIT callback'
def callback( self, argv ):
if argv[1] == 'askpass':
return self.askPass( argv[2] )
elif argv[1] == 'editor':
return self.editor( argv[2] )
elif argv[1] == 'sequence-editor':
return self.sequenceEditor( argv[2] )
else:
print( 'Error: Unknown callback command: %r' % (argv[1:],) )
return 1
def askPass( self, prompt ):
rc, reply = self.sendRequest( 'askpass', prompt )
if reply is not None:
print( reply )
return rc
def editor( self, filename ):
rc, reply = self.sendRequest( 'editor', filename )
if reply is not None and reply != '':
print( reply )
return rc
def sequenceEditor( self, filename ):
rc, reply = self.sendRequest( 'sequence-editor', filename )
if reply is not None and reply != '':
print( reply )
return rc
def sendRequest( self, facility, param ):
request = '%s\0%s' % (facility, param)
request = request.encode( 'utf-8' )
buf_size = ctypes.c_int( 256 )
buf_result = ctypes.create_string_buffer( buf_size.value )
rc = ctypes.windll.kernel32.CallNamedPipeW(
self.pipe_name,
request,
len(request),
buf_result,
buf_size,
ctypes.byref( buf_size ),
0
)
if rc == 0:
err = ctypes.windll.kernel32.GetLastError()
if err == 2:
return 1, None
errmsg = self.__getErrorMessage( err )
with open( os.path.join( os.environ['USERPROFILE'], 'wb_scm_git_callback.log' ), 'a' ) as f:
f.write( 'Error: CallNamedPipeA rc=%d err=%d errmsg=%r\n' % (rc, err, errmsg) )
return 1, None
else:
reply = buf_result.raw[:buf_size.value].decode( 'utf-8' )
return int(reply[0]), reply[1:]
def __getErrorMessage( self, err ):
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
errmsg_size = ctypes.c_int( 256 )
errmsg = ctypes.create_string_buffer( errmsg_size.value + 1 )
rc = ctypes.windll.kernel32.FormatMessageA(
FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, # __in DWORD dwFlags,
None, # __in_opt LPCVOID lpSource,
err, # __in DWORD dwMessageId,
0, # __in DWORD dwLanguageId,
errmsg, # __out LPTSTR lpBuffer,
errmsg_size, # __in DWORD nSize,
None # __in_opt va_list *Arguments
)
if rc == 0:
return 'error 0x%8.8x' % (err,)
return errmsg.value
if __name__ == '__main__':
sys.exit( WbGitcallback().callback( sys.argv ) )
| [] | [] | [
"USERPROFILE"
] | [] | ["USERPROFILE"] | python | 1 | 0 | |
examples/sendListMessage/main.go | package main
import (
"encoding/gob"
"fmt"
"os"
"time"
"github.com/Rhymen/go-whatsapp/binary/proto"
qrcodeTerminal "github.com/Baozisoftware/qrcode-terminal-go"
"github.com/Rhymen/go-whatsapp"
)
func main() {
//create new WhatsApp connection
wac, err := whatsapp.NewConn(5 * time.Second)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating connection: %v\n", err)
return
}
err = login(wac)
if err != nil {
fmt.Fprintf(os.Stderr, "error logging in: %v\n", err)
return
}
<-time.After(6 * time.Second)
previousMessage := "How about u"
quotedMessage := proto.Message{
Conversation: &previousMessage,
}
ContextInfo := whatsapp.ContextInfo{
QuotedMessage: "edMessage,
QuotedMessageID: "", //Original message ID
Participant: "", //Who sent the original message
}
var Section = []whatsapp.Section{
{
Title: "Section title 1",
Rows: []whatsapp.Row{{
Title: "Row title 1",
Description: "Row description 1",
RowId: "rowid1", // no white space in rowid
},
{
Title: "Row title 2",
Description: "Row description 2",
RowId: "rowId2",
},
},
},
}
Section = append(Section, whatsapp.Section{
Title: "Section title 2",
Rows: []whatsapp.Row{
{
Title: "Row title 3",
Description: "Row description 3",
RowId: "rowId3",
},
},
},
)
msg := whatsapp.ListMessage{
Info: whatsapp.MessageInfo{
RemoteJid: "[email protected]",
},
ContextInfo: ContextInfo,
Title: "This is List *title*",
Description: "This is List _description_",
ButtonText: "This is List buttonText", // ButtonText dosn't support html tag
FooterText: "This is List footerText", // this isn't actually showing in whatsapp web
ListType: proto.ListMessage_SINGLE_SELECT,
Sections: Section,
}
msgId, err := wac.Send(msg)
if err != nil {
fmt.Fprintf(os.Stderr, "error sending message: %v", err)
os.Exit(1)
} else {
fmt.Println("Message Sent -> ID : " + msgId)
<-time.After(3 * time.Second)
}
}
func login(wac *whatsapp.Conn) error {
//load saved session
session, err := readSession()
if err == nil {
//restore session
session, err = wac.RestoreWithSession(session)
if err != nil {
return fmt.Errorf("restoring failed: %v\n", err)
}
} else {
//no saved session -> regular login
qr := make(chan string)
go func() {
terminal := qrcodeTerminal.New()
terminal.Get(<-qr).Print()
}()
session, err = wac.Login(qr)
if err != nil {
return fmt.Errorf("error during login: %v\n", err)
}
}
//save session
err = writeSession(session)
if err != nil {
return fmt.Errorf("error saving session: %v\n", err)
}
return nil
}
func readSession() (whatsapp.Session, error) {
session := whatsapp.Session{}
file, err := os.Open(os.TempDir() + "/whatsappSession2.gob")
if err != nil {
return session, err
}
defer file.Close()
decoder := gob.NewDecoder(file)
err = decoder.Decode(&session)
if err != nil {
return session, err
}
return session, nil
}
func writeSession(session whatsapp.Session) error {
file, err := os.Create(os.TempDir() + "/whatsappSession.gob")
if err != nil {
return err
}
defer file.Close()
encoder := gob.NewEncoder(file)
err = encoder.Encode(session)
if err != nil {
return err
}
return nil
}
| [] | [] | [] | [] | [] | go | null | null | null |
vendor/github.com/lucaslorentz/caddy-docker-proxy/plugin/loader.go | package plugin
import (
"bytes"
"context"
"flag"
"log"
"os"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/mholt/caddy"
)
var pollingInterval = 30 * time.Second
func init() {
flag.DurationVar(&pollingInterval, "docker-polling-interval", 30*time.Second, "Interval caddy should manually check docker for a new caddyfile")
}
// DockerLoader generates caddy files from docker swarm information
type DockerLoader struct {
initialized bool
dockerClient *client.Client
generator *CaddyfileGenerator
timer *time.Timer
skipEvents bool
input caddy.CaddyfileInput
}
// CreateDockerLoader creates a docker loader
func CreateDockerLoader() *DockerLoader {
return &DockerLoader{
input: caddy.CaddyfileInput{
ServerTypeName: "http",
},
}
}
// Load returns the current caddy file input
func (dockerLoader *DockerLoader) Load(serverType string) (caddy.Input, error) {
if serverType != "http" {
return nil, nil
}
if !dockerLoader.initialized {
dockerLoader.initialized = true
dockerClient, err := client.NewEnvClient()
if err != nil {
log.Printf("Docker connection failed: %v", err)
return nil, nil
}
dockerPing, err := dockerClient.Ping(context.Background())
if err != nil {
log.Printf("Docker ping failed: %v", err)
return nil, nil
}
dockerClient.NegotiateAPIVersionPing(dockerPing)
dockerLoader.dockerClient = dockerClient
dockerLoader.generator = CreateGenerator(
WrapDockerClient(dockerClient),
CreateDockerUtils(),
GetGeneratorOptions(),
)
if pollingIntervalEnv := os.Getenv("CADDY_DOCKER_POLLING_INTERVAL"); pollingIntervalEnv != "" {
if p, err := time.ParseDuration(pollingIntervalEnv); err != nil {
log.Printf("Failed to parse CADDY_DOCKER_POLLING_INTERVAL: %v", err)
} else {
pollingInterval = p
}
}
log.Printf("[INFO] Docker polling interval: %v", pollingInterval)
dockerLoader.timer = time.AfterFunc(pollingInterval, func() {
dockerLoader.update(true)
})
dockerLoader.update(false)
go dockerLoader.monitorEvents()
}
return dockerLoader.input, nil
}
func (dockerLoader *DockerLoader) monitorEvents() {
args := filters.NewArgs()
args.Add("scope", "swarm")
args.Add("scope", "local")
args.Add("type", "service")
args.Add("type", "container")
args.Add("type", "config")
eventsChan, errorChan := dockerLoader.dockerClient.Events(context.Background(), types.EventsOptions{
Filters: args,
})
for {
select {
case event := <-eventsChan:
if dockerLoader.skipEvents {
continue
}
update := (event.Type == "container" && event.Action == "start") ||
(event.Type == "container" && event.Action == "stop") ||
(event.Type == "container" && event.Action == "die") ||
(event.Type == "service" && event.Action == "create") ||
(event.Type == "service" && event.Action == "update") ||
(event.Type == "service" && event.Action == "remove") ||
(event.Type == "config" && event.Action == "create") ||
(event.Type == "config" && event.Action == "remove")
if update {
dockerLoader.skipEvents = true
dockerLoader.timer.Reset(100 * time.Millisecond)
}
case err := <-errorChan:
log.Println(err)
}
}
}
func (dockerLoader *DockerLoader) update(reloadIfChanged bool) bool {
dockerLoader.timer.Reset(pollingInterval)
dockerLoader.skipEvents = false
newContents := dockerLoader.generator.GenerateCaddyFile()
if bytes.Equal(dockerLoader.input.Contents, newContents) {
return false
}
newInput := caddy.CaddyfileInput{
ServerTypeName: "http",
Contents: newContents,
}
if err := caddy.ValidateAndExecuteDirectives(newInput, nil, true); err != nil {
log.Printf("[ERROR] CaddyFile error: %s", err)
log.Printf("[INFO] Wrong CaddyFile:\n%s", newContents)
} else {
log.Printf("[INFO] New CaddyFile:\n%s", newInput.Contents)
dockerLoader.input = newInput
if reloadIfChanged {
ReloadCaddy(dockerLoader)
}
}
return true
}
| [
"\"CADDY_DOCKER_POLLING_INTERVAL\""
] | [] | [
"CADDY_DOCKER_POLLING_INTERVAL"
] | [] | ["CADDY_DOCKER_POLLING_INTERVAL"] | go | 1 | 0 | |
src/werkzeug/_reloader.py | import os
import subprocess
import sys
import threading
import time
from itertools import chain
from typing import Any
from typing import Optional
from ._internal import _log
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list is in case sys.modules is modified during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, "__file__", None)
if filename:
if os.path.isdir(filename) and os.path.exists(
os.path.join(filename, "__init__.py")
):
filename = os.path.join(filename, "__init__.py")
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
yield filename
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = {
os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
for x in sys.path
}
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, "__file__", None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv)
def _get_args_for_reloading():
"""Determine how the script was executed, and return the args needed
to execute it again in a new process.
"""
rv = [sys.executable]
py_script = sys.argv[0]
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(__main__, "__package__", None) is None or (
os.name == "nt"
and __main__.__package__ == ""
and not os.path.exists(py_script)
and os.path.exists(f"{py_script}.exe")
):
# Executed a file, like "python app.py".
py_script = os.path.abspath(py_script)
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(f"{py_script}.exe"):
py_script += ".exe"
if (
os.path.splitext(sys.executable)[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if sys.argv[0] == "-m":
# Flask works around previous behavior by putting
# "-m flask" in sys.argv.
# TODO remove this once Flask no longer misbehaves
args = sys.argv
else:
if os.path.isfile(py_script):
# Rewritten by Python from "-m script" to "/path/to/script.py".
py_module = __main__.__package__
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += f".{name}"
else:
# Incorrectly rewritten by pydevd debugger from "-m script" to "script".
py_module = py_script
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
paths = [x.split(os.path.sep) for x in paths]
root = {}
for chunks in sorted(paths, key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in node.items():
_walk(child, path + (prefix,))
if not node:
rv.add("/".join(path))
_walk(root, ())
return rv
class ReloaderLoop:
extra_files: Any
interval: float
name: Optional[str] = None
# Patched during tests. Wrapping with `staticmethod` is required in
# case `time.sleep` has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files: Optional[Any] = None, interval: float = 1):
self.extra_files = {os.path.abspath(x) for x in extra_files or ()}
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as the
current one, but running the reloader thread.
"""
while 1:
_log("info", f" * Restarting with {self.name}")
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ["WERKZEUG_RUN_MAIN"] = "true"
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename):
filename = os.path.abspath(filename)
_log("info", f" * Detected change in {filename!r}, reloading")
class StatReloaderLoop(ReloaderLoop):
name: Any = "stat"
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
class WatchdogReloaderLoop(ReloaderLoop):
observable_paths: Any
name: Any
observer_class: Any
event_handler: Any
should_reload: Any
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if filename in self.extra_files:
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith((".pyc", ".pyo", ".py")):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
def on_moved(self, event):
_check_modification(event.src_path)
_check_modification(event.dest_path)
def on_deleted(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith("observer"):
reloader_name = reloader_name[:-8]
reloader_name += " reloader"
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
try:
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=True
)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
finally:
observer.stop()
observer.join()
sys.exit(3)
reloader_loops: Any = {"stat": StatReloaderLoop, "watchdog": WatchdogReloaderLoop}
try:
__import__("watchdog.observers")
except ImportError:
reloader_loops["auto"] = reloader_loops["stat"]
else:
reloader_loops["auto"] = reloader_loops["watchdog"]
def ensure_echo_on():
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after a reload."""
# tcgetattr will fail if stdin isn't a tty
if not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(
main_func,
extra_files: Optional[Any] = None,
interval: float = 1,
reloader_type: str = "auto",
):
"""Run the given function in an independent Python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
| [] | [] | [
"WERKZEUG_RUN_MAIN"
] | [] | ["WERKZEUG_RUN_MAIN"] | python | 1 | 0 | |
web/tests/utils/test_pq.py | import pytest
import sys
import pdb
class TestPriorityQueue:
def test_add(self, pq):
test_val = '123456'
pq.add(test_val)
val = pq.pop()
pq.add(test_val)
assert val == test_val
def test_max_length(self, pq):
test_vals = range(pq.MAX_QUEUE_LENGTH + 1)
for t in test_vals:
pq.add(t)
assert len(pq) == pq.MAX_QUEUE_LENGTH
def test_priority_order(self, pq):
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.add('c', priority=2)
pq.add('d', priority=3)
assert pq.pop() == 'd'
def test_priority_order_remove(self, pq):
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.add('c', priority=2)
pq.add('d', priority=3)
val1 = pq.pop(remove=True)
val2 = pq.pop()
assert val1 == 'd'
assert val2 == 'c'
def test_score(self, pq):
pq.add('a', priority=0)
pq.add('b', priority=1)
assert pq.get_score('b') == 1
assert len(pq) == 2
def test_remove_lowest(self, pq):
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.remove_lowest_priority()
assert len(pq) == 1
assert pq.pop() == 'b'
def test_update_priority(self, pq):
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.increment_priority('b')
assert pq.get_score('b') == 2
pq.increment_priority('b', incr=10) # increment by 10
assert pq.get_score('b') == 12
def test_key_removal(self, pq):
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.remove('b')
assert len(pq) == 1
assert pq.pop() == 'a'
def test_ordering_in_same_priority_values(self, pq):
pq.add('a', priority=0)
pq.add('b', priority=0)
pq.add('c', priority=0)
pq.add('d', priority=0)
assert pq.pop() == 'd'
def test_multi_pop(self, pq):
pq.add('e', priority=3)
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.add('c', priority=2)
pq.add('d', priority=0)
items = pq.multi_pop(3)
assert items == ['e', 'c', 'b']
def test_rank(self, pq):
pq.add('a', priority=3)
pq.add('b', priority=0)
assert pq.get_rank('a') == 0
assert pq.get_rank('b') == 1
def test_multi_pop_with_scores(self, pq):
pq.add('e', priority=3)
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.add('c', priority=2)
pq.add('d', priority=0)
items = pq.multi_pop(3, with_scores=True)
keys = [k for k, v in items]
values = [v for k, v in items]
assert keys == ['e', 'c', 'b']
assert values == [3, 2, 1]
def test_multi_pop_with_sampling(self, pq):
pq.add('e', priority=3)
pq.add('a', priority=0)
pq.add('b', priority=1)
pq.add('c', priority=2)
pq.add('d', priority=0)
items = pq.multi_pop(3, sample_from=5, with_scores=True)
keys = [k for k, v in items]
values = [v for k, v in items]
assert len(keys) == 3
assert len(values) == 3
assert set(keys) == set(['e', 'b', 'c']) # true because probabilties for others are zero
if __name__ == "__main__":
# if running outside of docker, make sure redis is running on localhost
import os; os.environ["REDIS_HOST"] = "localhost"
pytest.main(['-s', '-m', 'focus'])
# pytest.main(['-s'])
| [] | [] | [
"REDIS_HOST"
] | [] | ["REDIS_HOST"] | python | 1 | 0 | |
src/recommendationservice/recommendation_server.py | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
import traceback
from concurrent import futures
import googleclouddebugger
import googlecloudprofiler
from google.auth.exceptions import DefaultCredentialsError
import grpc
from opencensus.ext.stackdriver import trace_exporter as stackdriver_exporter
from opencensus.ext.grpc import server_interceptor
from opencensus.trace import samplers
from opencensus.common.transports.async_ import AsyncTransport
import demo_pb2
import demo_pb2_grpc
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from logger import getJSONLogger
logger = getJSONLogger('recommendationservice-server')
def initStackdriverProfiling():
project_id = None
try:
project_id = os.environ["GCP_PROJECT_ID"]
except KeyError:
# Environment variable not set
pass
for retry in range(1, 4):
try:
if project_id:
googlecloudprofiler.start(
service='recommendation_server', service_version='1.0.0', verbose=0, project_id=project_id)
else:
googlecloudprofiler.start(
service='recommendation_server', service_version='1.0.0', verbose=0)
logger.info("Successfully started Stackdriver Profiler.")
return
except (BaseException) as exc:
logger.info(
"Unable to start Stackdriver Profiler Python agent. " + str(exc))
if (retry < 4):
logger.info(
"Sleeping %d seconds to retry Stackdriver Profiler agent initialization" % (retry*10))
time.sleep(1)
else:
logger.warning(
"Could not initialize Stackdriver Profiler after retrying, giving up")
return
class RecommendationService(demo_pb2_grpc.RecommendationServiceServicer):
def ListRecommendations(self, request, context):
max_responses = 5
# fetch list of products from product catalog stub
cat_response = product_catalog_stub.ListProducts(demo_pb2.Empty())
product_ids = [x.id for x in cat_response.products]
filtered_products = list(set(product_ids)-set(request.product_ids))
num_products = len(filtered_products)
num_return = min(max_responses, num_products)
# sample list of indicies to return
indices = random.sample(range(num_products), num_return)
# fetch product ids from indices
prod_list = [filtered_products[i] for i in indices]
logger.info(
"[Recv ListRecommendations] product_ids={}".format(prod_list))
# build and return response
response = demo_pb2.ListRecommendationsResponse()
response.product_ids.extend(prod_list)
return response
def Check(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.SERVING)
def Watch(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.UNIMPLEMENTED)
if __name__ == "__main__":
logger.info("initializing recommendationservice")
try:
if "DISABLE_PROFILER" in os.environ:
raise KeyError()
else:
logger.info("Profiler enabled.")
initStackdriverProfiling()
except KeyError:
logger.info("Profiler disabled.")
try:
if "DISABLE_TRACING" in os.environ:
raise KeyError()
else:
logger.info("Tracing enabled.")
sampler = samplers.AlwaysOnSampler()
exporter = stackdriver_exporter.StackdriverExporter(
project_id=os.environ.get('GCP_PROJECT_ID'),
transport=AsyncTransport)
tracer_interceptor = server_interceptor.OpenCensusServerInterceptor(
sampler, exporter)
except (KeyError, DefaultCredentialsError):
logger.info("Tracing disabled.")
tracer_interceptor = server_interceptor.OpenCensusServerInterceptor()
try:
if "DISABLE_DEBUGGER" in os.environ:
raise KeyError()
else:
logger.info("Debugger enabled.")
try:
googleclouddebugger.enable(
module='recommendationserver',
version='1.0.0'
)
except (Exception, DefaultCredentialsError):
logger.error("Could not enable debugger")
logger.error(traceback.print_exc())
pass
except (Exception, DefaultCredentialsError):
logger.info("Debugger disabled.")
port = os.environ.get('PORT', "8080")
catalog_addr = os.environ.get('PRODUCT_CATALOG_SERVICE_ADDR', '')
if catalog_addr == "":
raise Exception(
'PRODUCT_CATALOG_SERVICE_ADDR environment variable not set')
logger.info("product catalog address: " + catalog_addr)
channel = grpc.insecure_channel(catalog_addr)
product_catalog_stub = demo_pb2_grpc.ProductCatalogServiceStub(channel)
# create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10),
interceptors=(tracer_interceptor,))
# add class to gRPC server
service = RecommendationService()
demo_pb2_grpc.add_RecommendationServiceServicer_to_server(service, server)
health_pb2_grpc.add_HealthServicer_to_server(service, server)
# start server
logger.info("listening on port: " + port)
server.add_insecure_port('[::]:'+port)
server.start()
# keep alive
try:
while True:
time.sleep(10000)
except KeyboardInterrupt:
server.stop(0)
| [] | [] | [
"PORT",
"GCP_PROJECT_ID",
"PRODUCT_CATALOG_SERVICE_ADDR"
] | [] | ["PORT", "GCP_PROJECT_ID", "PRODUCT_CATALOG_SERVICE_ADDR"] | python | 3 | 0 | |
plugin.go | package main
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
)
type (
// Config holds input parameters for the plugin
Config struct {
Actions []string
Vars map[string]string
Secrets map[string]string
InitOptions InitOptions
FmtOptions FmtOptions
Assertions Assertions
Cacert string
Sensitive bool
RoleARN string
RootDir string
Parallelism int
Targets []string
VarFiles []string
TerraformDataDir string
}
// Netrc is credentials for cloning
Netrc struct {
Machine string
Login string
Password string
}
// InitOptions include options for the Terraform's init command
InitOptions struct {
BackendConfig []string `json:"backend-config"`
Lock *bool `json:"lock"`
LockTimeout string `json:"lock-timeout"`
}
// FmtOptions fmt options for the Terraform's fmt command
FmtOptions struct {
List *bool `json:"list"`
Write *bool `json:"write"`
Diff *bool `json:"diff"`
Check *bool `json:"check"`
}
// Assertions will check number of expected additions, changes and deletions after terraform plan
Assertions struct {
AdditionsExact int `json:"additions_exact"`
ChangesExact int `json:"changes_exact"`
DeletionsExact int `json:"deletions_exact"`
}
// Plugin represents the plugin instance to be executed
Plugin struct {
Config Config
Netrc Netrc
Terraform Terraform
}
)
// Exec executes the plugin
func (p Plugin) Exec() error {
// Install specified version of terraform
if p.Terraform.Version != "" {
err := installTerraform(p.Terraform.Version)
if err != nil {
return err
}
}
if p.Config.RoleARN != "" {
assumeRole(p.Config.RoleARN)
}
// writing the .netrc file with Github credentials in it.
err := writeNetrc(p.Netrc.Machine, p.Netrc.Login, p.Netrc.Password)
if err != nil {
return err
}
var terraformDataDir string = ".terraform"
if p.Config.TerraformDataDir != "" {
terraformDataDir = p.Config.TerraformDataDir
os.Setenv("TF_DATA_DIR", p.Config.TerraformDataDir)
}
var commands []*exec.Cmd
commands = append(commands, exec.Command("terraform", "version"))
CopyTfEnv()
if p.Config.Cacert != "" {
commands = append(commands, installCaCert(p.Config.Cacert))
}
commands = append(commands, deleteCache(terraformDataDir))
commands = append(commands, initCommand(p.Config.InitOptions))
commands = append(commands, getModules())
// Add commands listed from Actions
for _, action := range p.Config.Actions {
switch action {
case "fmt":
commands = append(commands, tfFmt(p.Config))
case "validate":
commands = append(commands, tfValidate(p.Config))
case "plan":
commands = append(commands, tfPlan(p.Config, false))
case "plan-destroy":
commands = append(commands, tfPlan(p.Config, true))
case "apply":
commands = append(commands, tfApply(p.Config))
case "destroy":
commands = append(commands, tfDestroy(p.Config))
default:
return fmt.Errorf("valid actions are: fmt, validate, plan, apply, plan-destroy, destroy. You provided %s", action)
}
}
commands = append(commands, deleteCache(terraformDataDir))
for _, c := range commands {
if c.Dir == "" {
wd, err := os.Getwd()
if err == nil {
c.Dir = wd
}
}
if p.Config.RootDir != "" {
c.Dir = c.Dir + "/" + p.Config.RootDir
}
if !p.Config.Sensitive {
trace(c)
}
// Directly pass strerr to standard
c.Stderr = os.Stderr
// Capture stdout to use for assertions
var stdout []byte
var errStdout error
stdoutIn, _ := c.StdoutPipe()
err := c.Start()
if err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
}).Fatal("Failed to execute a command")
}
// cmd.Wait() should be called only after we finish reading
// from stdoutIn and stderrIn.
// wg ensures that we finish
var wg sync.WaitGroup
wg.Add(1)
go func() {
stdout, errStdout = copyAndCapture(os.Stdout, stdoutIn)
wg.Done()
}()
wg.Wait()
err = c.Wait()
if err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
}).Fatal("Failed to run a command")
}
if errStdout != nil {
logrus.WithFields(logrus.Fields{
"error": errStdout,
}).Fatal("Failed to capture stdout or stderr")
}
// Evaluate assertions only when running terraform plan
if c.Args[1] == "plan" {
p.evaluateAssertions(string(stdout))
}
logrus.Debug("Command completed successfully")
}
return nil
}
func (p Plugin) evaluateAssertions(planOutput string) {
var additions = 0
var changes = 0
var deletions = 0
updateToDateRe := regexp.MustCompile(`No changes\. Infrastructure is up-to-date\.`)
if !updateToDateRe.MatchString(planOutput) {
// Check if assertions are met based on "Plan: X to add, X to change, X to destroy." in output
planRe := regexp.MustCompile(`(?P<Additions>[0-9]+) to add, (?P<Changes>[0-9]+) to change, (?P<Deletions>[0-9]+) to destroy\.`)
matches := planRe.FindStringSubmatch(planOutput)
if len(matches) != 4 {
logrus.Fatal("Unexpected number of matches in terraform output when evaluating assertions")
}
additions, _ = strconv.Atoi(matches[1])
changes, _ = strconv.Atoi(matches[2])
deletions, _ = strconv.Atoi(matches[3])
}
if p.Config.Assertions.AdditionsExact > -1 {
if additions != p.Config.Assertions.AdditionsExact {
logrus.Fatal(fmt.Sprintf("FATAL: Expected %d additions but saw %d additions on terraform plan", p.Config.Assertions.AdditionsExact, additions))
} else {
fmt.Println(fmt.Sprintf("INFO: As expected saw %d additions on terraform plan.", additions))
}
}
if p.Config.Assertions.ChangesExact > -1 {
if changes != p.Config.Assertions.ChangesExact {
logrus.Fatal(fmt.Sprintf("FATAL: Expected %d changes but saw %d changes on terraform plan", p.Config.Assertions.ChangesExact, changes))
} else {
fmt.Println(fmt.Sprintf("INFO: As expected saw %d changes on terraform plan.", changes))
}
}
if p.Config.Assertions.DeletionsExact > -1 {
if deletions != p.Config.Assertions.DeletionsExact {
logrus.Fatal(fmt.Sprintf("FATAL: Expected %d deletions but saw %d deletions on terraform plan", p.Config.Assertions.DeletionsExact, deletions))
} else {
fmt.Println(fmt.Sprintf("INFO: As expected saw %d deletions on terraform plan.", deletions))
}
}
}
func copyAndCapture(w io.Writer, r io.Reader) ([]byte, error) {
var out []byte
buf := make([]byte, 1024, 1024)
for {
n, err := r.Read(buf[:])
if n > 0 {
d := buf[:n]
out = append(out, d...)
_, err := w.Write(d)
if err != nil {
return out, err
}
}
if err != nil {
// Read returns io.EOF at the end of file, which is not an error for us
if err == io.EOF {
err = nil
}
return out, err
}
}
}
// CopyTfEnv creates copies of TF_VAR_ to lowercase
func CopyTfEnv() {
tfVar := regexp.MustCompile(`^TF_VAR_.*$`)
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if tfVar.MatchString(pair[0]) {
name := strings.Split(pair[0], "TF_VAR_")
os.Setenv(fmt.Sprintf("TF_VAR_%s", strings.ToLower(name[1])), pair[1])
}
}
}
func assumeRole(roleArn string) {
client := sts.New(session.New())
duration := time.Hour * 1
stsProvider := &stscreds.AssumeRoleProvider{
Client: client,
Duration: duration,
RoleARN: roleArn,
RoleSessionName: "drone",
}
value, err := credentials.NewCredentials(stsProvider).Get()
if err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
}).Fatal("Error assuming role!")
}
os.Setenv("AWS_ACCESS_KEY_ID", value.AccessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", value.SecretAccessKey)
os.Setenv("AWS_SESSION_TOKEN", value.SessionToken)
}
func deleteCache(terraformDataDir string) *exec.Cmd {
return exec.Command(
"rm",
"-rf",
terraformDataDir,
)
}
func getModules() *exec.Cmd {
return exec.Command(
"terraform",
"get",
)
}
func initCommand(config InitOptions) *exec.Cmd {
args := []string{
"init",
}
for _, v := range config.BackendConfig {
args = append(args, fmt.Sprintf("-backend-config=%s", v))
}
// True is default in TF
if config.Lock != nil {
args = append(args, fmt.Sprintf("-lock=%t", *config.Lock))
}
// "0s" is default in TF
if config.LockTimeout != "" {
args = append(args, fmt.Sprintf("-lock-timeout=%s", config.LockTimeout))
}
// Fail Terraform execution on prompt
args = append(args, "-input=false")
return exec.Command(
"terraform",
args...,
)
}
func installCaCert(cacert string) *exec.Cmd {
ioutil.WriteFile("/usr/local/share/ca-certificates/ca_cert.crt", []byte(cacert), 0644)
return exec.Command(
"update-ca-certificates",
)
}
func trace(cmd *exec.Cmd) {
fmt.Println("$", strings.Join(cmd.Args, " "))
}
func tfApply(config Config) *exec.Cmd {
args := []string{
"apply",
}
for _, v := range config.Targets {
args = append(args, "--target", fmt.Sprintf("%s", v))
}
if config.Parallelism > 0 {
args = append(args, fmt.Sprintf("-parallelism=%d", config.Parallelism))
}
if config.InitOptions.Lock != nil {
args = append(args, fmt.Sprintf("-lock=%t", *config.InitOptions.Lock))
}
if config.InitOptions.LockTimeout != "" {
args = append(args, fmt.Sprintf("-lock-timeout=%s", config.InitOptions.LockTimeout))
}
args = append(args, getTfoutPath())
return exec.Command(
"terraform",
args...,
)
}
func tfDestroy(config Config) *exec.Cmd {
args := []string{
"destroy",
}
for _, v := range config.Targets {
args = append(args, fmt.Sprintf("-target=%s", v))
}
args = append(args, varFiles(config.VarFiles)...)
args = append(args, vars(config.Vars)...)
if config.Parallelism > 0 {
args = append(args, fmt.Sprintf("-parallelism=%d", config.Parallelism))
}
if config.InitOptions.Lock != nil {
args = append(args, fmt.Sprintf("-lock=%t", *config.InitOptions.Lock))
}
if config.InitOptions.LockTimeout != "" {
args = append(args, fmt.Sprintf("-lock-timeout=%s", config.InitOptions.LockTimeout))
}
args = append(args, "-force")
return exec.Command(
"terraform",
args...,
)
}
func tfPlan(config Config, destroy bool) *exec.Cmd {
args := []string{
"plan",
}
if destroy {
args = append(args, "-destroy")
} else {
args = append(args, fmt.Sprintf("-out=%s", getTfoutPath()))
}
for _, v := range config.Targets {
args = append(args, "--target", fmt.Sprintf("%s", v))
}
args = append(args, varFiles(config.VarFiles)...)
args = append(args, vars(config.Vars)...)
if config.Parallelism > 0 {
args = append(args, fmt.Sprintf("-parallelism=%d", config.Parallelism))
}
if config.InitOptions.Lock != nil {
args = append(args, fmt.Sprintf("-lock=%t", *config.InitOptions.Lock))
}
if config.InitOptions.LockTimeout != "" {
args = append(args, fmt.Sprintf("-lock-timeout=%s", config.InitOptions.LockTimeout))
}
return exec.Command(
"terraform",
args...,
)
}
func tfValidate(config Config) *exec.Cmd {
args := []string{
"validate",
}
for _, v := range config.VarFiles {
args = append(args, fmt.Sprintf("-var-file=%s", v))
}
for k, v := range config.Vars {
args = append(args, "-var", fmt.Sprintf("%s=%s", k, v))
}
return exec.Command(
"terraform",
args...,
)
}
func tfFmt(config Config) *exec.Cmd {
args := []string{
"fmt",
}
if config.FmtOptions.List != nil {
args = append(args, fmt.Sprintf("-list=%t", *config.FmtOptions.List))
}
if config.FmtOptions.Write != nil {
args = append(args, fmt.Sprintf("-write=%t", *config.FmtOptions.Write))
}
if config.FmtOptions.Diff != nil {
args = append(args, fmt.Sprintf("-diff=%t", *config.FmtOptions.Diff))
}
if config.FmtOptions.Check != nil {
args = append(args, fmt.Sprintf("-check=%t", *config.FmtOptions.Check))
}
return exec.Command(
"terraform",
args...,
)
}
func getTfoutPath() string {
terraformDataDir := os.Getenv("TF_DATA_DIR")
if terraformDataDir == ".terraform" || terraformDataDir == "" {
return "plan.tfout"
} else {
return fmt.Sprintf("%s.plan.tfout", terraformDataDir)
}
}
func vars(vs map[string]string) []string {
var args []string
for k, v := range vs {
args = append(args, "-var", fmt.Sprintf("%s=%s", k, v))
}
return args
}
func varFiles(vfs []string) []string {
var args []string
for _, v := range vfs {
args = append(args, fmt.Sprintf("-var-file=%s", v))
}
return args
}
// helper function to write a netrc file.
// The following code comes from the official Git plugin for Drone:
// https://github.com/drone-plugins/drone-git/blob/8386effd2fe8c8695cf979427f8e1762bd805192/utils.go#L43-L68
func writeNetrc(machine, login, password string) error {
if machine == "" {
return nil
}
out := fmt.Sprintf(
netrcFile,
machine,
login,
password,
)
home := "/root"
u, err := user.Current()
if err == nil {
home = u.HomeDir
}
path := filepath.Join(home, ".netrc")
return ioutil.WriteFile(path, []byte(out), 0600)
}
const netrcFile = `
machine %s
login %s
password %s
`
| [
"\"TF_DATA_DIR\""
] | [] | [
"TF_DATA_DIR"
] | [] | ["TF_DATA_DIR"] | go | 1 | 0 | |
depend/zcash/qa/zcash/smoke_tests.py | #!/usr/bin/env python3
#
# Execute the standard smoke tests for Zcash releases.
#
import argparse
import datetime
import os
import requests
import subprocess
import sys
import time
import traceback
from decimal import Decimal
from slickrpc import Proxy
from slickrpc.exc import RpcException
DEFAULT_FEE = Decimal('0.0001')
URL_FAUCET_DONATION = 'https://faucet.testnet.z.cash/donations'
URL_FAUCET_TAP = 'https://faucet.testnet.z.cash/'
#
# Smoke test definitions
#
# (case, expected_mainnet, expected_testnet)
SMOKE_TESTS = [
# zcashd start/stop/restart flows
('1a', True, True), # zcashd start
('1b', True, True), # Graceful zcashd stop
('1c', True, True), # Ungraceful zcashd stop
('1d', True, True), # zcashd start; graceful zcashd stop; zcashd start
('1e', True, True), # zcashd start; ungraceful zcashd stop; zcashd start
# Control
('2a', True, True), # Run getinfo
('2b', True, True), # Run help
# Address generation
('3a', True, True), # Generate a Sprout z-addr
('3b', True, True), # Generate multiple Sprout z-addrs
('3c', True, True), # Generate a t-addr
('3d', True, True), # Generate multiple t-addrs
('3e', True, True), # Generate a Sapling z-addr
('3f', True, True), # Generate multiple Sapling z-addrs
# Transactions
('4a', True, True ), # Send funds from Sprout z-addr to same Sprout z-addr
('4b', True, True ), # Send funds from Sprout z-addr to a different Sprout z-addr
('4c', True, True ), # Send funds from Sprout z-addr to a t-addr
('4d', True, True ), # Send funds from t-addr to Sprout z-addr
('4e', True, True ), # Send funds from t-addr to t-addr
('4f', True, True ), # Send funds from t-addr to Sapling z-addr
('4g', True, True ), # Send funds from Sapling z-addr to same Sapling z-addr
('4h', True, True ), # Send funds from Sapling z-addr to a different Sapling z-addr
('4i', True, True ), # Send funds from Sapling z-addr to a t-addr
('4j', False, False), # Send funds from Sprout z-addr to Sapling z-addr
('4k', True, True ), # Send funds from Sprout z-addr to multiple Sprout z-addrs
('4l', True, True ), # Send funds from Sprout z-addr to multiple t-addrs
('4m', True, True ), # Send funds from Sprout z-addr to t-addr and Sprout z-addrs
('4n', False, False), # Send funds from Sprout z-addr to t-addr and Sapling z-addr
('4o', False, False), # Send funds from Sprout z-addr to multiple Sapling z-addrs
('4p', True, True ), # Send funds from t-addr to multiple t-addrs
('4q', True, True ), # Send funds from t-addr to multiple Sprout z-addrs
('4r', True, True ), # Send funds from t-addr to multiple Sapling z-addrs
('4s', False, False), # Send funds from t-addr to Sprout z-addr and Sapling z-addr
('4t', True, True ), # Send funds from Sapling z-addr to multiple Sapling z-addrs
('4u', False, False), # Send funds from Sapling z-addr to multiple Sprout z-addrs
('4v', True, True ), # Send funds from Sapling z-addr to multiple t-addrs
('4w', True, True ), # Send funds from Sapling z-addr to t-addr and Sapling z-addr
('4x', False, False), # Send funds from Sapling z-addr to Sapling z-addr and Sprout z-addr
('4y', True, True ), # Send funds from t-addr to Sprout z-addr using z_mergetoaddress
('4z', True, True ), # Send funds from 2 different t-addrs to Sprout z-addr using z_mergetoaddress
('4aa', False, False), # Send funds from the same 2 t-addrs to Sprout z-addr using z_mergetoaddress
('4bb', True, True ), # Send funds from 2 different t-addrs to Sapling z-addr using z_mergetoaddress
('4cc', True, True ), # Send funds from t-addr to Sapling z-addr using z_mergetoaddress
('4dd', True, True ), # Send funds from t-addr and Sprout z-addr to Sprout z-addr using z_mergetoaddress
('4ee', True, True ), # Send funds from t-addr and Sapling z-addr to Sapling z-addr using z_mergetoaddress
('4ff', True, True ), # Send funds from Sprout z-addr and Sprout z-addr to Sprout z-addr using z_mergetoaddress
('4gg', True, True ), # Send funds from Sapling z-addr and Sapling z-addr to Sapling z-addr using z_mergetoaddress
# Wallet
('5a', True, True), # After generating multiple z-addrs, run z_listaddresses
('5b', True, True), # Run z_validateaddress with a Sprout z-addr
('5c', True, True), # Run z_validateaddress with a Sapling z-addr
('5d', True, True), # After a transaction, run z_listunspent
('5e', True, True), # After a transaction, run z_listreceivedbyaddress
('5f', True, True), # After a transaction, run z_getbalance
('5g', True, True), # After a transaction, run z_gettotalbalance
('5h', True, True), # Run z_exportkey using a Sprout z-addr
('5i', True, True), # Run z_importkey using the zkey from a Sprout z-addr
('5j', True, True), # Run z_exportkey using a Sapling z-addr
('5k', True, True), # Run z_importkey using the zkey from a Sapling z-addr
('5l', True, True), # Run z_exportwallet
('5m', True, True), # Run z_importwallet
('5n', True, True), # Run z_shieldcoinbase
('5o', True, True), # Run getwalletinfo
# Network
('6a', True, True), # Run getpeerinfo
('6b', True, True), # Run getnetworkinfo
('6c', True, False), # Run getdeprecationinfo
('6d', True, True), # Run getconnectioncount
('6e', True, True), # Run getaddednodeinfo
# Mining
('7a', True, True), # Run getblocksubsidy
('7b', True, True), # Run getblocktemplate
('7c', True, True), # Run getmininginfo
('7d', True, True), # Run getnetworkhashps
('7e', True, True), # Run getnetworksolps
]
TIME_STARTED = datetime.datetime.now()
#
# Test helpers
#
def run_cmd(results, case, zcash, name, args=[]):
print('----- %s -----' % (datetime.datetime.now() - TIME_STARTED))
print('%s $ zcash-cli %s %s' % (
case.ljust(3),
name,
' '.join([str(arg) for arg in args],
)))
try:
res = zcash.__getattr__(name)(*args)
print(res)
print()
if results is not None and len(case) > 0:
results[case] = True
return res
except RpcException as e:
print('ERROR: %s' % str(e))
if results is not None and len(case) > 0:
results[case] = False
return None
def wait_for_balance(zcash, zaddr, expected=None):
print('Waiting for funds to %s...' % zaddr)
unconfirmed_balance = Decimal(zcash.z_getbalance(zaddr, 0)).quantize(Decimal('1.00000000'))
print('Expecting: %s; Unconfirmed Balance: %s' % (expected, unconfirmed_balance))
if expected is not None and unconfirmed_balance != expected:
print('WARNING: Unconfirmed balance does not match expected balance')
# Default timeout is 15 minutes
ttl = 900
while True:
balance = Decimal(zcash.z_getbalance(zaddr)).quantize(Decimal('1.00000000'))
if (expected is not None and balance == unconfirmed_balance) or (expected is None and balance > 0):
print('Received %s' % balance)
return balance
time.sleep(1)
ttl -= 1
if ttl == 0:
if zcash.automated:
# Reset timeout
ttl = 300
else:
# Ask user if they want to keep waiting
print()
print('Balance: %s Expected: %s' % (balance, expected))
ret = input('Do you wish to continue waiting? (Y/n) ')
if ret.lower() == 'n':
print('Address contained %s at timeout' % balance)
return balance
else:
# Wait another 5 minutes before asking again
ttl = 300
def wait_and_check_balance(results, case, zcash, addr, expected):
balance = wait_for_balance(zcash, addr, expected)
if balance != expected and results is not None and len(case) > 0:
results[case] = False
return balance
def wait_for_txid_operation(zcash, opid, timeout=300):
print('Waiting for async operation %s' % opid)
result = None
for _ in iter(range(timeout)):
results = zcash.z_getoperationresult([opid])
if len(results) > 0:
result = results[0]
break
time.sleep(1)
status = result['status']
if status == 'failed':
print('Operation failed')
print(result['error']['message'])
return None
elif status == 'success':
txid = result['result']['txid']
print('txid: %s' % txid)
return txid
def async_txid_cmd(results, case, zcash, name, args=[]):
opid = run_cmd(results, case, zcash, name, args)
# Some async commands return a dictionary containing the opid
if isinstance(opid, dict):
opid = opid['opid']
if opid is None:
if results is not None and len(case) > 0:
results[case] = False
return None
txid = wait_for_txid_operation(zcash, opid)
if txid is None:
if results is not None and len(case) > 0:
results[case] = False
return txid
def z_sendmany(results, case, zcash, from_addr, recipients):
return async_txid_cmd(results, case, zcash, 'z_sendmany', [
from_addr,
[{
'address': to_addr,
'amount': amount,
} for (to_addr, amount) in recipients]
])
def check_z_sendmany(results, case, zcash, from_addr, recipients):
txid = z_sendmany(results, case, zcash, from_addr, recipients)
if txid is None:
return [Decimal('0')]
return [wait_and_check_balance(results, case, zcash, to_addr, amount) for (to_addr, amount) in recipients]
def check_z_sendmany_parallel(results, zcash, runs):
# First attempt to create all the transactions
txids = [(run, z_sendmany(results, run[0], zcash, run[1], run[2])) for run in runs]
# Then wait for balance updates caused by successful transactions
return [
wait_and_check_balance(results, run[0], zcash, to_addr, amount) if txid is not None else Decimal('0')
for (run, txid) in txids
for (to_addr, amount) in run[2]]
def z_mergetoaddress(results, case, zcash, from_addrs, to_addr):
return async_txid_cmd(results, case, zcash, 'z_mergetoaddress', [from_addrs, to_addr])
def check_z_mergetoaddress(results, case, zcash, from_addrs, to_addr, amount):
txid = z_mergetoaddress(results, case, zcash, from_addrs, to_addr)
if txid is None:
return Decimal('0')
return wait_and_check_balance(results, case, zcash, to_addr, amount)
def check_z_mergetoaddress_parallel(results, zcash, runs):
# First attempt to create all the transactions
txids = [(run, z_mergetoaddress(results, run[0], zcash, run[1], run[2])) for run in runs]
# Then wait for balance updates caused by successful transactions
return [
wait_and_check_balance(results, run[0], zcash, run[2], run[3]) if txid is not None else Decimal('0')
for (run, txid) in txids]
def tap_zfaucet(addr):
with requests.Session() as session:
# Get token to request TAZ from faucet with a given zcash address
response = session.get(URL_FAUCET_TAP)
if response.status_code != 200:
print("Error establishing session at:", URL_FAUCET_TAP)
os.sys.exit(1)
csrftoken = response.cookies['csrftoken']
# Request TAZ from the faucet
data_params = dict(csrfmiddlewaretoken=csrftoken, address=addr)
response2 = session.post(URL_FAUCET_TAP, data=data_params, headers=dict(Referer=URL_FAUCET_TAP))
if response2.status_code != 200:
print("Error tapping faucet at:", URL_FAUCET_TAP)
os.sys.exit(1)
def get_zfaucet_addrs():
with requests.Session() as session:
response = session.get(URL_FAUCET_DONATION)
if response.status_code != 200:
print("Error establishing session at:", URL_FAUCET_DONATION)
os.sys.exit(1)
data = response.json()
return data
def get_zfaucet_taddr():
return get_zfaucet_addrs()["t_address"]
def get_zfaucet_zsapaddr():
# At the time of writing this, it appears these(keys) are backwards
return get_zfaucet_addrs()["z_address_legacy"]
def get_zfaucet_zsproutaddr():
# At the time of writing this, it appears these(keys) are backwards
return get_zfaucet_addrs()["z_address_sapling"]
#
# Test runners
#
def simple_commands(zcash):
results = {}
run_cmd(results, '2a', zcash, 'getinfo'),
run_cmd(results, '2b', zcash, 'help'),
run_cmd(results, '5o', zcash, 'getwalletinfo'),
run_cmd(results, '6a', zcash, 'getpeerinfo'),
run_cmd(results, '6b', zcash, 'getnetworkinfo'),
run_cmd(results, '6c', zcash, 'getdeprecationinfo'),
run_cmd(results, '6d', zcash, 'getconnectioncount'),
run_cmd(results, '6e', zcash, 'getaddednodeinfo', [False]),
run_cmd(results, '7a', zcash, 'getblocksubsidy'),
run_cmd(results, '7c', zcash, 'getmininginfo'),
run_cmd(results, '7d', zcash, 'getnetworkhashps'),
run_cmd(results, '7e', zcash, 'getnetworksolps'),
return results
def transaction_chain(zcash):
results = {}
# Generate the various addresses we will use
sprout_zaddr_1 = run_cmd(results, '3a', zcash, 'z_getnewaddress', ['sprout'])
sprout_zaddr_2 = run_cmd(results, '3b', zcash, 'z_getnewaddress', ['sprout'])
sprout_zaddr_3 = run_cmd(results, '3b', zcash, 'z_getnewaddress', ['sprout'])
taddr_1 = run_cmd(results, '3c', zcash, 'getnewaddress')
taddr_2 = run_cmd(results, '3d', zcash, 'getnewaddress')
taddr_3 = run_cmd(results, '3d', zcash, 'getnewaddress')
taddr_4 = run_cmd(results, '3d', zcash, 'getnewaddress')
taddr_5 = run_cmd(results, '3d', zcash, 'getnewaddress')
sapling_zaddr_1 = run_cmd(results, '3e', zcash, 'z_getnewaddress', ['sapling'])
sapling_zaddr_2 = run_cmd(results, '3f', zcash, 'z_getnewaddress', ['sapling'])
sapling_zaddr_3 = run_cmd(results, '3f', zcash, 'z_getnewaddress', ['sapling'])
# Check that the zaddrs are all listed
zaddrs = run_cmd(results, '5a', zcash, 'z_listaddresses')
if (sprout_zaddr_1 not in zaddrs or
sprout_zaddr_2 not in zaddrs or
sapling_zaddr_1 not in zaddrs or
sapling_zaddr_2 not in zaddrs):
results['5a'] = False
# Validate the addresses
ret = run_cmd(results, '5b', zcash, 'z_validateaddress', [sprout_zaddr_1])
if not ret['isvalid'] or ret['type'] != 'sprout':
results['5b'] = False
ret = run_cmd(results, '5c', zcash, 'z_validateaddress', [sapling_zaddr_1])
if not ret['isvalid'] or ret['type'] != 'sapling':
results['5c'] = False
# Set up beginning and end of the chain
print('#')
print('# Initialising transaction chain')
print('#')
print()
if zcash.use_faucet:
print('Tapping the testnet faucet for testing funds...')
chain_end = get_zfaucet_taddr()
tap_zfaucet(sprout_zaddr_1)
print('Done! Leftover funds will be sent back to the faucet.')
else:
chain_end = input('Type or paste transparent address where leftover funds should be sent: ')
if not zcash.validateaddress(chain_end)['isvalid']:
print('Invalid transparent address')
return results
print()
print('Please send at least 0.01 ZEC/TAZ to the following address:')
print(sprout_zaddr_1)
print()
input('Press Enter once the funds have been sent.')
print()
# Wait to receive starting balance
sprout_balance = wait_for_balance(zcash, sprout_zaddr_1)
starting_balance = sprout_balance
#
# Start the transaction chain!
#
print()
print('#')
print('# Starting transaction chain')
print('#')
print()
try:
#
# First, split the funds across all three pools
#
# Sprout -> taddr
taddr_balance = check_z_sendmany(
results, '4c', zcash, sprout_zaddr_1, [(taddr_1, (starting_balance / Decimal('10')) * Decimal('6'))])[0]
sprout_balance -= taddr_balance + DEFAULT_FEE
balance = Decimal(run_cmd(results, '5f', zcash, 'z_getbalance', [sprout_zaddr_1])).quantize(Decimal('1.00000000'))
if balance != sprout_balance:
results['5f'] = False
# taddr -> Sapling
# Send it all here because z_sendmany pick a new t-addr for change
sapling_balance = check_z_sendmany(
results, '4f', zcash, taddr_1, [(sapling_zaddr_1, taddr_balance - DEFAULT_FEE)])[0]
taddr_balance = Decimal('0')
# Sapling -> taddr
taddr_balance = check_z_sendmany(
results, '4i', zcash, sapling_zaddr_1, [(taddr_1, (starting_balance / Decimal('10')) * Decimal('3'))])[0]
sapling_balance -= taddr_balance + DEFAULT_FEE
#
# Intra-pool tests
#
# Sprout -> same Sprout
# Sapling -> same Sapling
(sprout_balance, sapling_balance) = check_z_sendmany_parallel(results, zcash, [
('4a', sprout_zaddr_1, [(sprout_zaddr_1, sprout_balance - DEFAULT_FEE)]),
('4g', sapling_zaddr_1, [(sapling_zaddr_1, sapling_balance - DEFAULT_FEE)]),
])
# Sprout -> different Sprout
# taddr -> different taddr
# Sapling -> different Sapling
(sprout_balance, taddr_balance, sapling_balance) = check_z_sendmany_parallel(results, zcash, [
('4b', sprout_zaddr_1, [(sprout_zaddr_2, sprout_balance - DEFAULT_FEE)]),
('4e', taddr_1, [(taddr_2, taddr_balance - DEFAULT_FEE)]),
('4h', sapling_zaddr_1, [(sapling_zaddr_2, sapling_balance - DEFAULT_FEE)]),
])
# Sprout -> multiple Sprout
# taddr -> multiple taddr
# Sapling -> multiple Sapling
check_z_sendmany_parallel(results, zcash, [
('4k', sprout_zaddr_2, [
(sprout_zaddr_1, starting_balance / Decimal('10')),
(sprout_zaddr_3, starting_balance / Decimal('10')),
]),
('4p', taddr_2, [
(taddr_1, starting_balance / Decimal('10')),
(taddr_3, taddr_balance - (starting_balance / Decimal('10')) - DEFAULT_FEE),
]),
('4t', sapling_zaddr_2, [
(sapling_zaddr_1, starting_balance / Decimal('10')),
(sapling_zaddr_3, starting_balance / Decimal('10')),
]),
])
sprout_balance -= DEFAULT_FEE
taddr_balance -= DEFAULT_FEE
sapling_balance -= DEFAULT_FEE
# multiple Sprout -> Sprout
# multiple Sapling -> Sapling
# multiple taddr -> taddr
check_z_mergetoaddress_parallel(results, zcash, [
('4ff', [sprout_zaddr_1, sprout_zaddr_3], sprout_zaddr_2, sprout_balance - DEFAULT_FEE),
('4gg', [sapling_zaddr_1, sapling_zaddr_3], sapling_zaddr_2, sapling_balance - DEFAULT_FEE),
('', [taddr_1, taddr_3], taddr_2, taddr_balance - DEFAULT_FEE),
])
sprout_balance -= DEFAULT_FEE
sapling_balance -= DEFAULT_FEE
taddr_balance -= DEFAULT_FEE
#
# Now test a bunch of failing cases
#
# Sprout -> Sapling
txid = z_sendmany(results, '4j', zcash, sprout_zaddr_2, [(sapling_zaddr_1, sprout_balance - DEFAULT_FEE)])
if txid is not None:
print('Should have failed')
return results
# Sprout -> taddr and Sapling
txid = z_sendmany(results, '4n', zcash, sprout_zaddr_2, [
(taddr_2, starting_balance / Decimal('10')),
(sapling_zaddr_1, starting_balance / Decimal('10')),
])
if txid is not None:
print('Should have failed')
return results
# Sprout -> multiple Sapling
txid = z_sendmany(results, '4o', zcash, sprout_zaddr_2, [
(sapling_zaddr_1, starting_balance / Decimal('10')),
(sapling_zaddr_2, starting_balance / Decimal('10')),
])
if txid is not None:
print('Should have failed')
return results
# taddr -> Sprout and Sapling
txid = z_sendmany(results, '4s', zcash, taddr_2, [
(sprout_zaddr_1, starting_balance / Decimal('10')),
(sapling_zaddr_1, starting_balance / Decimal('10')),
])
if txid is not None:
print('Should have failed')
return results
# Sapling -> multiple Sprout
txid = z_sendmany(results, '4u', zcash, sapling_zaddr_2, [
(sprout_zaddr_1, starting_balance / Decimal('10')),
(sprout_zaddr_2, starting_balance / Decimal('10')),
])
if txid is not None:
print('Should have failed')
return results
# Sapling -> Sapling and Sprout
txid = z_sendmany(results, '4x', zcash, sapling_zaddr_2, [
(sapling_zaddr_1, starting_balance / Decimal('10')),
(sprout_zaddr_1, starting_balance / Decimal('10')),
])
if txid is not None:
print('Should have failed')
return results
# multiple same taddr -> Sprout
txid = z_mergetoaddress(results, '4aa', zcash, [taddr_2, taddr_2], sprout_zaddr_2)
if txid is not None:
print('Should have failed')
return results
#
# Inter-pool tests
#
# Sprout -> taddr and Sprout
# Sapling -> taddr and Sapling
check_z_sendmany_parallel(results, zcash, [
('4m', sprout_zaddr_2, [
(taddr_1, starting_balance / Decimal('10')),
(sprout_zaddr_1, starting_balance / Decimal('10')),
]),
('4w', sapling_zaddr_2, [
(taddr_3, starting_balance / Decimal('10')),
(sapling_zaddr_1, starting_balance / Decimal('10')),
]),
])
sprout_balance -= (starting_balance / Decimal('10')) + DEFAULT_FEE
sapling_balance -= (starting_balance / Decimal('10')) + DEFAULT_FEE
taddr_balance += (starting_balance / Decimal('10')) * Decimal('2')
# taddr and Sprout -> Sprout
# taddr and Sapling -> Sapling
check_z_mergetoaddress_parallel(results, zcash, [
('4dd', [taddr_1, sprout_zaddr_1], sprout_zaddr_2, sprout_balance + (starting_balance / Decimal('10')) - DEFAULT_FEE),
('4ee', [taddr_3, sapling_zaddr_1], sapling_zaddr_2, sapling_balance + (starting_balance / Decimal('10')) - DEFAULT_FEE),
])
sprout_balance += (starting_balance / Decimal('10')) - DEFAULT_FEE
sapling_balance += (starting_balance / Decimal('10')) - DEFAULT_FEE
taddr_balance -= (starting_balance / Decimal('10')) * Decimal('2')
# Sprout -> multiple taddr
# Sapling -> multiple taddr
check_z_sendmany_parallel(results, zcash, [
('4l', sprout_zaddr_2, [
(taddr_1, (starting_balance / Decimal('10'))),
(taddr_3, (starting_balance / Decimal('10'))),
]),
('4v', sapling_zaddr_2, [
(taddr_4, (starting_balance / Decimal('10'))),
(taddr_5, (starting_balance / Decimal('10'))),
]),
])
sprout_balance -= ((starting_balance / Decimal('10')) * Decimal('2')) + DEFAULT_FEE
sapling_balance -= ((starting_balance / Decimal('10')) * Decimal('2')) + DEFAULT_FEE
taddr_balance += (starting_balance / Decimal('10')) * Decimal('4')
# multiple taddr -> Sprout
# multiple taddr -> Sapling
check_z_mergetoaddress_parallel(results, zcash, [
('4z', [taddr_1, taddr_3], sprout_zaddr_2, sprout_balance + ((starting_balance / Decimal('10')) * Decimal('2')) - DEFAULT_FEE),
('4bb', [taddr_4, taddr_5], sapling_zaddr_2, sapling_balance + ((starting_balance / Decimal('10')) * Decimal('2')) - DEFAULT_FEE),
])
sprout_balance += ((starting_balance / Decimal('10')) * Decimal('2')) - DEFAULT_FEE
sapling_balance += ((starting_balance / Decimal('10')) * Decimal('2')) - DEFAULT_FEE
taddr_balance -= (starting_balance / Decimal('10')) * Decimal('4')
# taddr -> Sprout
check_z_sendmany_parallel(results, zcash, [
('4d', taddr_2, [(sprout_zaddr_3, taddr_balance - DEFAULT_FEE)]),
])
sprout_balance += taddr_balance - DEFAULT_FEE
taddr_balance = Decimal('0')
# multiple Sprout -> taddr
# multiple Sapling -> taddr
check_z_mergetoaddress_parallel(None, zcash, [
('', [sprout_zaddr_1, sprout_zaddr_2, sprout_zaddr_3], taddr_1, sprout_balance - DEFAULT_FEE),
('', [sapling_zaddr_1, sapling_zaddr_2, sapling_zaddr_3], taddr_2, sapling_balance - DEFAULT_FEE),
])
taddr_balance = sprout_balance + sapling_balance - (2 * DEFAULT_FEE)
sprout_balance = Decimal('0')
sapling_balance = Decimal('0')
# taddr -> multiple Sprout
# taddr -> multiple Sapling
taddr_1_balance = Decimal(zcash.z_getbalance(taddr_1)).quantize(Decimal('1.00000000'))
taddr_2_balance = Decimal(zcash.z_getbalance(taddr_2)).quantize(Decimal('1.00000000'))
check_z_sendmany_parallel(results, zcash, [
('4q', taddr_1, [
(sprout_zaddr_1, (starting_balance / Decimal('10'))),
(sprout_zaddr_2, taddr_1_balance - (starting_balance / Decimal('10')) - DEFAULT_FEE),
]),
('4r', taddr_2, [
(sapling_zaddr_1, (starting_balance / Decimal('10'))),
(sapling_zaddr_2, taddr_2_balance - (starting_balance / Decimal('10')) - DEFAULT_FEE),
]),
])
sprout_balance = taddr_1_balance - DEFAULT_FEE
sapling_balance = taddr_2_balance - DEFAULT_FEE
taddr_balance = Decimal('0')
# multiple Sprout -> taddr
# multiple Sapling -> taddr
check_z_mergetoaddress_parallel(None, zcash, [
('', [sprout_zaddr_1, sprout_zaddr_2], taddr_1, sprout_balance - DEFAULT_FEE),
('', [sapling_zaddr_1, sapling_zaddr_2], taddr_2, sapling_balance - DEFAULT_FEE),
])
taddr_balance = sprout_balance + sapling_balance - (Decimal('2') * DEFAULT_FEE)
sprout_balance = Decimal('0')
sapling_balance = Decimal('0')
# z_mergetoaddress taddr -> Sprout
# z_mergetoaddress taddr -> Sapling
taddr_1_balance = Decimal(zcash.z_getbalance(taddr_1)).quantize(Decimal('1.00000000'))
taddr_2_balance = Decimal(zcash.z_getbalance(taddr_2)).quantize(Decimal('1.00000000'))
check_z_mergetoaddress_parallel(results, zcash, [
('4y', [taddr_1], sprout_zaddr_1, taddr_1_balance - DEFAULT_FEE),
('4cc', [taddr_2], sapling_zaddr_1, taddr_2_balance - DEFAULT_FEE),
])
sprout_balance = taddr_1_balance - DEFAULT_FEE
sapling_balance = taddr_2_balance - DEFAULT_FEE
taddr_balance = Decimal('0')
except Exception as e:
print('Error: %s' % e)
traceback.print_exc()
finally:
#
# End the chain by returning the remaining funds
#
print()
print('#')
print('# Finishing transaction chain')
print('#')
all_addrs = [
sprout_zaddr_1, sprout_zaddr_2, sprout_zaddr_3,
taddr_1, taddr_2, taddr_3, taddr_4, taddr_5,
sapling_zaddr_1, sapling_zaddr_2, sapling_zaddr_3,
]
print()
print('Waiting for all transactions to be mined')
for addr in all_addrs:
balance = Decimal(zcash.z_getbalance(addr, 0)).quantize(Decimal('1.00000000'))
if balance > 0:
wait_for_balance(zcash, addr, balance)
print()
print('Returning remaining balance minus fees')
for addr in all_addrs:
balance = Decimal(zcash.z_getbalance(addr)).quantize(Decimal('1.00000000'))
if balance > 0:
z_sendmany(None, '', zcash, addr, [(chain_end, balance - DEFAULT_FEE)])
return results
#
# Test stages
#
STAGES = [
'simple-commands',
'transaction-chain'
]
STAGE_COMMANDS = {
'simple-commands': simple_commands,
'transaction-chain': transaction_chain,
}
def run_stage(stage, zcash):
print('Running stage %s' % stage)
print('=' * (len(stage) + 14))
print()
cmd = STAGE_COMMANDS[stage]
if cmd is not None:
ret = cmd(zcash)
else:
print('WARNING: stage not yet implemented, skipping')
ret = {}
print()
print('-' * (len(stage) + 15))
print('Finished stage %s' % stage)
print()
return ret
#
# Zcash wrapper
#
class ZcashNode(object):
def __init__(self, args, zcashd=None, zcash_cli=None):
if zcashd is None:
zcashd = os.getenv('ZCASHD', 'zcashd')
if zcash_cli is None:
zcash_cli = os.getenv('ZCASHCLI', 'zcash-cli')
self.__datadir = args.datadir
self.__wallet = args.wallet
self.__testnet = not args.mainnet
self.__zcashd = zcashd
self.__zcash_cli = zcash_cli
self.__process = None
self.__proxy = None
self.automated = args.automate
self.use_faucet = args.faucet
def start(self, extra_args=None, timewait=None):
if self.__proxy is not None:
raise RuntimeError('Already started')
rpcuser = 'st'
rpcpassword = 'st'
args = [
self.__zcashd,
'-datadir=%s' % self.__datadir,
'-wallet=%s' % self.__wallet,
'-rpcuser=%s' % rpcuser,
'-rpcpassword=%s' % rpcpassword,
'-showmetrics=0',
'-experimentalfeatures',
'-zmergetoaddress',
]
if self.__testnet:
args.append('-testnet=1')
if extra_args is not None:
args.extend(extra_args)
self.__process = subprocess.Popen(args)
cli_args = [
self.__zcash_cli,
'-datadir=%s' % self.__datadir,
'-rpcuser=%s' % rpcuser,
'-rpcpassword=%s' % rpcpassword,
'-rpcwait',
]
if self.__testnet:
cli_args.append('-testnet=1')
cli_args.append('getblockcount')
devnull = open('/dev/null', 'w+')
if os.getenv('PYTHON_DEBUG', ''):
print('start_node: zcashd started, calling zcash-cli -rpcwait getblockcount')
subprocess.check_call(cli_args, stdout=devnull)
if os.getenv('PYTHON_DEBUG', ''):
print('start_node: calling zcash-cli -rpcwait getblockcount returned')
devnull.close()
rpcuserpass = '%s:%s' % (rpcuser, rpcpassword)
rpchost = '127.0.0.1'
rpcport = 18232 if self.__testnet else 8232
url = 'http://%s@%s:%d' % (rpcuserpass, rpchost, rpcport)
if timewait is not None:
self.__proxy = Proxy(url, timeout=timewait)
else:
self.__proxy = Proxy(url)
def stop(self):
if self.__proxy is None:
raise RuntimeError('Not running')
self.__proxy.stop()
self.__process.wait()
self.__proxy = None
self.__process = None
def __getattr__(self, name):
if self.__proxy is None:
raise RuntimeError('Not running')
return self.__proxy.__getattr__(name)
#
# Test driver
#
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--automate', action='store_true', help='Run the smoke tests without a user present')
parser.add_argument('--list-stages', dest='list', action='store_true')
parser.add_argument('--mainnet', action='store_true', help='Use mainnet instead of testnet')
parser.add_argument('--use-faucet', dest='faucet', action='store_true', help='Use testnet faucet as source of funds')
parser.add_argument('--wallet', default='wallet.dat', help='Wallet file to use (within data directory)')
parser.add_argument('datadir', help='Data directory to use for smoke testing', default=None)
parser.add_argument('stage', nargs='*', default=STAGES, help='One of %s' % STAGES)
args = parser.parse_args()
# Check for list
if args.list:
for s in STAGES:
print(s)
sys.exit(0)
# Check validity of stages
for s in args.stage:
if s not in STAGES:
print("Invalid stage '%s' (choose from %s)" % (s, STAGES))
sys.exit(1)
# Don't allow using the default wallet.dat in mainnet mode
if args.mainnet and args.wallet == 'wallet.dat':
print('Cannot use wallet.dat as wallet file when running mainnet tests. Keep your funds safe!')
sys.exit(1)
# Testnet faucet cannot be used in mainnet mode
if args.mainnet and args.faucet:
print('Cannot use testnet faucet when running mainnet tests.')
sys.exit(1)
# Enforce correctly-configured automatic mode
if args.automate:
if args.mainnet:
print('Cannot yet automate mainnet tests.')
sys.exit(1)
if not args.faucet:
print('--automate requires --use-faucet')
sys.exit(1)
# Start zcashd
zcash = ZcashNode(args)
print('Start time: %s' % TIME_STARTED)
print('Starting zcashd...')
zcash.start()
print()
# Run the stages
results = {}
for s in args.stage:
results.update(run_stage(s, zcash))
# Stop zcashd
print('Stopping zcashd...')
zcash.stop()
passed = True
print()
print('========================')
print(' Results')
print('========================')
print('Case | Expected | Actual')
print('========================')
for test_case in SMOKE_TESTS:
case = test_case[0]
expected = test_case[1 if args.mainnet else 2]
if case in results:
actual = results[case]
actual_str = '%s%s' % (
'Passed' if actual else 'Failed',
'' if expected == actual else '!!!'
)
passed &= (expected == actual)
else:
actual_str = ' N/A'
print('%s | %s | %s' % (
case.ljust(4),
' Passed ' if expected else ' Failed ',
actual_str
))
if not passed:
print()
print("!!! One or more smoke test stages failed !!!")
sys.exit(1)
if __name__ == '__main__':
main()
| [] | [] | [
"ZCASHD",
"PYTHON_DEBUG",
"ZCASHCLI"
] | [] | ["ZCASHD", "PYTHON_DEBUG", "ZCASHCLI"] | python | 3 | 0 | |
pytorch_lightning/accelerators/gpu.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any
import torch
import pytorch_lightning as pl
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.plugins import DataParallelPlugin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
_log = logging.getLogger(__name__)
class GPUAccelerator(Accelerator):
""" Accelerator for GPU devices. """
def setup(self, trainer: 'pl.Trainer', model: 'pl.LightningModule') -> None:
"""
Raises:
MisconfigurationException:
If the selected device is not GPU.
"""
if "cuda" not in str(self.root_device):
raise MisconfigurationException(f"Device should be GPU, got {self.root_device} instead")
self.set_nvidia_flags(trainer.local_rank)
torch.cuda.set_device(self.root_device)
return super().setup(trainer, model)
def on_train_start(self) -> None:
# clear cache before training
# use context because of:
# https://discuss.pytorch.org/t/out-of-memory-when-i-use-torch-cuda-empty-cache/57898
with torch.cuda.device(self.root_device):
torch.cuda.empty_cache()
def teardown(self) -> None:
self.lightning_module.cpu()
# clean up memory
with torch.cuda.device(self.root_device):
torch.cuda.empty_cache()
@staticmethod
def set_nvidia_flags(local_rank: int) -> None:
# set the correct cuda visible devices (using pci order)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
all_gpu_ids = ",".join([str(x) for x in range(torch.cuda.device_count())])
devices = os.getenv("CUDA_VISIBLE_DEVICES", all_gpu_ids)
_log.info(f"LOCAL_RANK: {local_rank} - CUDA_VISIBLE_DEVICES: [{devices}]")
def to_device(self, batch: Any) -> Any:
# no need to transfer batch to device in DP mode
# TODO: Add support to allow batch transfer to device in Lightning for DP mode.
if not isinstance(self.training_type_plugin, DataParallelPlugin):
batch = super().to_device(batch)
return batch
| [] | [] | [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
machine-learning-scripts/examples/tf2-gtsrb-cnn-simple.py | # # Traffic sign classification with CNNs
#
# In this notebook, we'll train a convolutional neural network (CNN,
# ConvNet) to classify images of traffic signs from [The German
# Traffic Sign Recognition
# Benchmark](http://benchmark.ini.rub.de/?section=gtsrb&subsection=news)
# using TensorFlow 2.0 / Keras. This notebook is largely based on the
# blog post [Building powerful image classification models using very
# little
# data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html) by François Chollet.
#
# **Note that using a GPU with this notebook is highly recommended.**
#
# First, the needed imports.
import os
import datetime
import pathlib
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
from PIL import Image
print('Using Tensorflow version: {}, and Keras version: {}.'.format(
tf.__version__, keras.__version__))
# # Data
#
# The training dataset consists of 5535 images of traffic signs of
# varying size. There are 43 different types of traffic signs. In
# addition, the validation consists of 999.
if 'DATADIR' in os.environ:
DATADIR = os.environ['DATADIR']
else:
DATADIR = "/scratch/project_2003747/data/"
datapath = os.path.join(DATADIR, "gtsrb/train-5535/")
assert os.path.exists(datapath), "Data not found at "+datapath
nimages = dict()
(nimages['train'], nimages['validation']) = (5535, 999)
# ### Parameters
INPUT_IMAGE_SIZE = [75, 75, 3]
BATCH_SIZE = 50
NUM_CLASSES = 43
# ### Image paths and labels
def get_paths(dataset):
data_root = pathlib.Path(datapath+dataset)
image_paths = list(data_root.glob('*/*'))
image_paths = [str(path) for path in image_paths]
image_count = len(image_paths)
assert image_count == nimages[dataset], \
"Found {} images, expected {}".format(image_count, nimages[dataset])
return image_paths
image_paths = dict()
image_paths['train'] = get_paths('train')
image_paths['validation'] = get_paths('validation')
label_names = sorted(item.name for item in
pathlib.Path(datapath+'train').glob('*/') if
item.is_dir())
label_to_index = dict((name, index) for index, name in enumerate(label_names))
def get_labels(dataset):
return [label_to_index[pathlib.Path(path).parent.name]
for path in image_paths[dataset]]
image_labels = dict()
image_labels['train'] = get_labels('train')
image_labels['validation'] = get_labels('validation')
# ### Data augmentation
#
# We need to resize all training and validation images to a fixed
# size.
#
# Then, to make the most of our limited number of training examples,
# we'll apply random transformations (crop and horizontal flip) to
# them each time we are looping over them. This way, we "augment" our
# training dataset to contain more data. There are various
# transformations readily available in TensorFlow, see
# [tf.image](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/image)
# for more information.
def _load_image(path, label):
image = Image.open(path.numpy())
return np.array(image), label
def load_image(path, label):
return tf.py_function(_load_image, (path, label), (tf.float32, tf.int32))
def preprocess_image(image, augment):
image.set_shape([None, None, None])
if augment:
image = tf.image.resize(image, [80, 80])
image = tf.image.random_crop(image, INPUT_IMAGE_SIZE)
#image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, 0.1)
image = tf.clip_by_value(image, 0.0, 255.0)
else:
image = tf.image.resize(image, INPUT_IMAGE_SIZE[:2])
image /= 255.0 # normalize to [0,1] range
image.set_shape(INPUT_IMAGE_SIZE)
return image
def process_and_augment_image(image, label):
label.set_shape([])
return preprocess_image(image, True), label
def process_and_not_augment_image(image, label):
label.set_shape([])
return preprocess_image(image, False), label
# ### TF Datasets
train_dataset = tf.data.Dataset.from_tensor_slices(
(image_paths['train'], image_labels['train']))
train_dataset = train_dataset.map(load_image,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_dataset = train_dataset.map(process_and_augment_image,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_dataset = train_dataset.shuffle(500).batch(BATCH_SIZE,
drop_remainder=True)
validation_dataset = tf.data.Dataset.from_tensor_slices(
(image_paths['validation'], image_labels['validation']))
validation_dataset = validation_dataset.map(load_image,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
validation_dataset = validation_dataset.map(process_and_not_augment_image,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=True)
# ## Train a small CNN from scratch
#
# Similarly as with MNIST digits, we can start from scratch and train
# a CNN for the classification task.
#
# ### Initialization
inputs = keras.Input(shape=INPUT_IMAGE_SIZE)
x = layers.Conv2D(32, (3, 3), activation='relu')(inputs)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Conv2D(32, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Conv2D(64, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(43, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs,
name="gtsrb-cnn-simple")
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
# ### Learning
# We'll use TensorBoard to visualize our progress during training.
logdir = os.path.join(os.getcwd(), "logs", "gtsrb-cnn-simple-" +
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print('TensorBoard log directory:', logdir)
os.makedirs(logdir)
callbacks = [TensorBoard(log_dir=logdir)]
epochs = 20
history = model.fit(train_dataset, epochs=epochs,
validation_data=validation_dataset,
callbacks=callbacks, verbose=2)
fname = "gtsrb-cnn-simple.h5"
print('Saving model to', fname)
model.save(fname)
| [] | [] | [
"DATADIR"
] | [] | ["DATADIR"] | python | 1 | 0 | |
inference_speed.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from dataset.dataloader import load_tfds
from time import time
import argparse
from nets.simple_basline import SimpleBaseline
from nets.evopose2d import EvoPose
from nets.hrnet import HRNet
from utils import detect_hardware
def speed_test(strategy, cfg, split='val', n=1000):
with strategy.scope():
if cfg.MODEL.TYPE == 'simple_baseline':
model = SimpleBaseline(cfg)
elif cfg.MODEL.TYPE == 'hrnet':
model = HRNet(cfg)
elif cfg.MODEL.TYPE == 'evopose':
model = EvoPose(cfg)
cfg.DATASET.OUTPUT_SHAPE = model.output_shape[1:]
ds = load_tfds(cfg, split, det=cfg.VAL.DET,
predict_kp=True, drop_remainder=cfg.VAL.DROP_REMAINDER)
ds = strategy.experimental_distribute_dataset(ds)
@tf.function
def predict(imgs, flip=False):
if flip:
imgs = imgs[:, :, ::-1, :]
return model(imgs, training=False)
for count, batch in enumerate(ds):
if count == 1: # skip first pass
ti = time()
_, imgs, _, _, scores = batch
hms = strategy.run(predict, args=(imgs,)).numpy()
if cfg.VAL.FLIP:
flip_hms = strategy.run(predict, args=(imgs, True,)).numpy()
flip_hms = flip_hms[:, :, ::-1, :]
tmp = flip_hms.copy()
for i in range(len(cfg.DATASET.KP_FLIP)):
flip_hms[:, :, :, i] = tmp[:, :, :, cfg.DATASET.KP_FLIP[i]]
# shift to align features
flip_hms[:, :, 1:, :] = flip_hms[:, :, 0:-1, :].copy()
hms = (hms + flip_hms) / 2.
if count == n:
break
print('FPS: {:.5f}'.format((n * cfg.VAL.BATCH_SIZE) / (time() - ti)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--tpu', default='')
parser.add_argument('-c', '--cfg', required=True) # yaml
parser.add_argument('-bs', '--batch-size', type=int, default=1)
parser.add_argument('-n', type=int, default=1000)
args = parser.parse_args()
from dataset.coco import cn as cfg
cfg.merge_from_file('configs/' + args.cfg)
cfg.MODEL.NAME = args.cfg.split('.')[0]
cfg.VAL.BATCH_SIZE = args.batch_size
if args.cpu:
strategy = tf.distribute.OneDeviceStrategy('/CPU:0')
elif args.gpu:
strategy = tf.distribute.OneDeviceStrategy('/GPU:0')
else:
tpu, strategy = detect_hardware(args.tpu)
tf.config.optimizer.set_experimental_options({'disable_meta_optimizer': True})
speed_test(strategy, cfg, split='val', n=args.n)
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
analyze.py | # BirdWeather edits by @timsterc
# Other edits by @CaiusX and @mcguirepr89
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
try:
import tflite_runtime.interpreter as tflite
except:
from tensorflow import lite as tflite
import argparse
import operator
import librosa
import numpy as np
import math
import time
from decimal import Decimal
import json
###############################################################################
import requests
import mysql.connector
###############################################################################
import datetime
import pytz
from tzlocal import get_localzone
from pathlib import Path
def loadModel():
global INPUT_LAYER_INDEX
global OUTPUT_LAYER_INDEX
global MDATA_INPUT_INDEX
global CLASSES
print('LOADING TF LITE MODEL...', end=' ')
# Load TFLite model and allocate tensors.
interpreter = tflite.Interpreter(model_path='model/BirdNET_6K_GLOBAL_MODEL.tflite',num_threads=2)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Get input tensor index
INPUT_LAYER_INDEX = input_details[0]['index']
MDATA_INPUT_INDEX = input_details[1]['index']
OUTPUT_LAYER_INDEX = output_details[0]['index']
# Load labels
CLASSES = []
with open('model/labels.txt', 'r') as lfile:
for line in lfile.readlines():
CLASSES.append(line.replace('\n', ''))
print('DONE!')
return interpreter
def loadCustomSpeciesList(path):
slist = []
if os.path.isfile(path):
with open(path, 'r') as csfile:
for line in csfile.readlines():
slist.append(line.replace('\r', '').replace('\n', ''))
return slist
def splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5):
# Split signal with overlap
sig_splits = []
for i in range(0, len(sig), int((seconds - overlap) * rate)):
split = sig[i:i + int(seconds * rate)]
# End of signal?
if len(split) < int(minlen * rate):
break
# Signal chunk too short? Fill with zeros.
if len(split) < int(rate * seconds):
temp = np.zeros((int(rate * seconds)))
temp[:len(split)] = split
split = temp
sig_splits.append(split)
return sig_splits
def readAudioData(path, overlap, sample_rate=48000):
print('READING AUDIO DATA...', end=' ', flush=True)
# Open file with librosa (uses ffmpeg or libav)
sig, rate = librosa.load(path, sr=sample_rate, mono=True, res_type='kaiser_fast')
# Split audio into 3-second chunks
chunks = splitSignal(sig, rate, overlap)
print('DONE! READ', str(len(chunks)), 'CHUNKS.')
return chunks
def convertMetadata(m):
# Convert week to cosine
if m[2] >= 1 and m[2] <= 48:
m[2] = math.cos(math.radians(m[2] * 7.5)) + 1
else:
m[2] = -1
# Add binary mask
mask = np.ones((3,))
if m[0] == -1 or m[1] == -1:
mask = np.zeros((3,))
if m[2] == -1:
mask[2] = 0.0
return np.concatenate([m, mask])
def custom_sigmoid(x, sensitivity=1.0):
return 1 / (1.0 + np.exp(-sensitivity * x))
def predict(sample, interpreter, sensitivity):
# Make a prediction
interpreter.set_tensor(INPUT_LAYER_INDEX, np.array(sample[0], dtype='float32'))
interpreter.set_tensor(MDATA_INPUT_INDEX, np.array(sample[1], dtype='float32'))
interpreter.invoke()
prediction = interpreter.get_tensor(OUTPUT_LAYER_INDEX)[0]
# Apply custom sigmoid
p_sigmoid = custom_sigmoid(prediction, sensitivity)
# Get label and scores for pooled predictions
p_labels = dict(zip(CLASSES, p_sigmoid))
# Sort by score
p_sorted = sorted(p_labels.items(), key=operator.itemgetter(1), reverse=True)
# Remove species that are on blacklist
for i in range(min(10, len(p_sorted))):
if p_sorted[i][0] in ['Human_Human', 'Non-bird_Non-bird', 'Noise_Noise']:
p_sorted[i] = (p_sorted[i][0], 0.0)
# Only return first the top ten results
return p_sorted[:10]
def analyzeAudioData(chunks, lat, lon, week, sensitivity, overlap, interpreter):
detections = {}
start = time.time()
print('ANALYZING AUDIO...', end=' ', flush=True)
# Convert and prepare metadata
mdata = convertMetadata(np.array([lat, lon, week]))
mdata = np.expand_dims(mdata, 0)
# Parse every chunk
pred_start = 0.0
for c in chunks:
# Prepare as input signal
sig = np.expand_dims(c, 0)
# Make prediction
p = predict([sig, mdata], interpreter, sensitivity)
# Save result and timestamp
pred_end = pred_start + 3.0
detections[str(pred_start) + ';' + str(pred_end)] = p
pred_start = pred_end - overlap
print('DONE! Time', int((time.time() - start) * 10) / 10.0, 'SECONDS')
return detections
def writeResultsToFile(detections, min_conf, path):
print('WRITING RESULTS TO', path, '...', end=' ')
rcnt = 0
with open(path, 'w') as rfile:
rfile.write('Start (s);End (s);Scientific name;Common name;Confidence\n')
for d in detections:
for entry in detections[d]:
if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0):
rfile.write(d + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) + '\n')
rcnt += 1
print('DONE! WROTE', rcnt, 'RESULTS.')
def main():
global WHITE_LIST
# Parse passed arguments
parser = argparse.ArgumentParser()
parser.add_argument('--i', help='Path to input file.')
parser.add_argument('--o', default='result.csv', help='Path to output file. Defaults to result.csv.')
parser.add_argument('--lat', type=float, default=-1, help='Recording location latitude. Set -1 to ignore.')
parser.add_argument('--lon', type=float, default=-1, help='Recording location longitude. Set -1 to ignore.')
parser.add_argument('--week', type=int, default=-1, help='Week of the year when the recording was made. Values in [1, 48] (4 weeks per month). Set -1 to ignore.')
parser.add_argument('--overlap', type=float, default=0.0, help='Overlap in seconds between extracted spectrograms. Values in [0.0, 2.9]. Defaults tp 0.0.')
parser.add_argument('--sensitivity', type=float, default=1.0, help='Detection sensitivity; Higher values result in higher sensitivity. Values in [0.5, 1.5]. Defaults to 1.0.')
parser.add_argument('--min_conf', type=float, default=0.1, help='Minimum confidence threshold. Values in [0.01, 0.99]. Defaults to 0.1.')
parser.add_argument('--custom_list', default='', help='Path to text file containing a list of species. Not used if not provided.')
parser.add_argument('--birdweather_id', default='99999', help='Private Station ID for BirdWeather.')
args = parser.parse_args()
# Load model
interpreter = loadModel()
# Load custom species list
if not args.custom_list == '':
WHITE_LIST = loadCustomSpeciesList(args.custom_list)
else:
WHITE_LIST = []
birdweather_id = args.birdweather_id
# Read audio data
audioData = readAudioData(args.i, args.overlap)
# Get Date/Time from filename in case Pi gets behind
#now = datetime.now()
full_file_name = args.i
file_name = Path(full_file_name).stem
file_date = file_name.split('-birdnet-')[0]
file_time = file_name.split('-birdnet-')[1]
date_time_str = file_date + ' ' + file_time
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
#print('Date:', date_time_obj.date())
#print('Time:', date_time_obj.time())
print('Date-time:', date_time_obj)
now = date_time_obj
current_date = now.strftime("%Y/%m/%d")
current_time = now.strftime("%H:%M:%S")
current_iso8601 = now.astimezone(get_localzone()).isoformat()
week_number = int(now.strftime("%V"))
week = max(1, min(week_number, 48))
sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5))
# Process audio data and get detections
detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap, interpreter)
# Write detections to output file
min_conf = max(0.01, min(args.min_conf, 0.99))
writeResultsToFile(detections, min_conf, args.o)
###############################################################################
###############################################################################
soundscape_uploaded = False
# Write detections to Database
for i in detections:
print("\n", detections[i][0],"\n")
with open('BirdDB.txt', 'a') as rfile:
for d in detections:
print("\n", "Database Entry", "\n")
for entry in detections[d]:
if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0):
rfile.write(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' \
+ str(entry[1]) +";" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' \
+ str(sensitivity) +';' + str(args.overlap) + '\n')
def insert_variables_into_table(Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap):
try:
connection = mysql.connector.connect(host='localhost',
database='birds',
user='birder',
password='birdnet')
cursor = connection.cursor()
mySql_insert_query = """INSERT INTO detections (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """
record = (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap)
cursor.execute(mySql_insert_query, record)
connection.commit()
print("Record inserted successfully into detections table")
except mysql.connector.Error as error:
print("Failed to insert record into detections table {}".format(error))
finally:
if connection.is_connected():
connection.close()
print("MySQL connection is closed")
species = entry[0]
sci_name,com_name = species.split('_')
insert_variables_into_table(str(current_date), str(current_time), sci_name, com_name, \
str(entry[1]), str(args.lat), str(args.lon), str(min_conf), str(week), \
str(args.sensitivity), str(args.overlap))
print(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) +";" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' + str(args.sensitivity) +';' + str(args.overlap) + '\n')
if birdweather_id != "99999":
if soundscape_uploaded is False:
# POST soundscape to server
soundscape_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/soundscapes" + "?timestamp=" + current_iso8601
with open(args.i, 'rb') as f:
wav_data = f.read()
response = requests.post(url=soundscape_url, data=wav_data, headers={'Content-Type': 'application/octet-stream'})
print("Soundscape POST Response Status - ", response.status_code)
sdata = response.json()
soundscape_id = sdata['soundscape']['id']
soundscape_uploaded = True
# POST detection to server
detection_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/detections"
start_time = d.split(';')[0]
end_time = d.split(';')[1]
post_begin = "{ "
now_p_start = now + datetime.timedelta(seconds=float(start_time))
current_iso8601 = now_p_start.astimezone(get_localzone()).isoformat()
post_timestamp = "\"timestamp\": \"" + current_iso8601 + "\","
post_lat = "\"lat\": " + str(args.lat) + ","
post_lon = "\"lon\": " + str(args.lon) + ","
post_soundscape_id = "\"soundscapeId\": " + str(soundscape_id) + ","
post_soundscape_start_time = "\"soundscapeStartTime\": " + start_time + ","
post_soundscape_end_time = "\"soundscapeEndTime\": " + end_time + ","
post_commonName = "\"commonName\": \"" + entry[0].split('_')[1] + "\","
post_scientificName = "\"scientificName\": \"" + entry[0].split('_')[0] + "\","
post_algorithm = "\"algorithm\": " + "\"alpha\"" + ","
post_confidence = "\"confidence\": " + str(entry[1])
post_end = " }"
post_json = post_begin + post_timestamp + post_lat + post_lon + post_soundscape_id + post_soundscape_start_time + post_soundscape_end_time + post_commonName + post_scientificName + post_algorithm + post_confidence + post_end
print(post_json)
response = requests.post(detection_url, json=json.loads(post_json))
print("Detection POST Response Status - ", response.status_code)
#time.sleep(3)
###############################################################################
###############################################################################
if __name__ == '__main__':
main()
# Example calls
# python3 analyze.py --i 'example/XC558716 - Soundscape.mp3' --lat 35.4244 --lon -120.7463 --week 18
# python3 analyze.py --i 'example/XC563936 - Soundscape.mp3' --lat 47.6766 --lon -122.294 --week 11 --overlap 1.5 --min_conf 0.25 --sensitivity 1.25 --custom_list 'example/custom_species_list.txt'
| [] | [] | [
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
utils/dependency_management.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Utility CLI to manage dependencies for aiida-core."""
import os
import sys
import re
import json
import subprocess
from pathlib import Path
from collections import OrderedDict, defaultdict
from pkg_resources import Requirement, parse_requirements
from packaging.utils import canonicalize_name
import click
import yaml
import tomlkit as toml
ROOT = Path(__file__).resolve().parent.parent # repository root
SETUPTOOLS_CONDA_MAPPINGS = {
'psycopg2-binary': 'psycopg2',
'graphviz': 'python-graphviz',
}
CONDA_IGNORE = []
GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS') == 'true'
class DependencySpecificationError(click.ClickException):
"""Indicates an issue in a dependency specification."""
def _load_setup_cfg():
"""Load the setup configuration from the 'setup.json' file."""
try:
with open(ROOT / 'setup.json') as setup_json_file:
return json.load(setup_json_file)
except json.decoder.JSONDecodeError as error: # pylint: disable=no-member
raise DependencySpecificationError(f"Error while parsing 'setup.json' file: {error}")
except FileNotFoundError:
raise DependencySpecificationError("The 'setup.json' file is missing!")
def _load_environment_yml():
"""Load the conda environment specification from the 'environment.yml' file."""
try:
with open(ROOT / 'environment.yml') as file:
return yaml.load(file, Loader=yaml.SafeLoader)
except yaml.error.YAMLError as error:
raise DependencySpecificationError(f"Error while parsing 'environment.yml':\n{error}")
except FileNotFoundError as error:
raise DependencySpecificationError(str(error))
def _setuptools_to_conda(req):
"""Map package names from setuptools to conda where necessary.
In case that the same underlying dependency is listed under different names
on PyPI and conda-forge.
"""
for pattern, replacement in SETUPTOOLS_CONDA_MAPPINGS.items():
if re.match(pattern, str(req)):
req = Requirement.parse(re.sub(pattern, replacement, str(req)))
break
# markers are not supported by conda
req.marker = None
# We need to parse the modified required again, to ensure consistency.
return Requirement.parse(str(req))
def _find_linenos_of_requirements_in_setup_json(requirements):
"""Determine the line numbers of requirements specified in 'setup.json'.
Returns a dict that maps a requirement, e.g., `numpy~=1.15.0` to the
line numbers at which said requirement is defined within the 'setup.json'
file.
"""
linenos = defaultdict(list)
with open(ROOT / 'setup.json') as setup_json_file:
lines = list(setup_json_file)
# Determine the lines that correspond to affected requirements in setup.json.
for requirement in requirements:
for lineno, line in enumerate(lines):
if str(requirement) in line:
linenos[requirement].append(lineno)
return linenos
class _Entry:
"""Helper class to check whether a given distribution fulfills a requirement."""
def __init__(self, requirement):
self._req = requirement
def fulfills(self, requirement):
"""Returns True if this entry fullfills the requirement."""
return canonicalize_name(self._req.name) == canonicalize_name(requirement.name) \
and self._req.specs[0][1] in requirement.specifier
def _parse_working_set(entries):
for req in parse_requirements(entries):
yield _Entry(req)
@click.group()
def cli():
"""Manage dependencies of the aiida-core package."""
@cli.command('generate-environment-yml')
def generate_environment_yml():
"""Generate 'environment.yml' file."""
# needed for ordered dict, see https://stackoverflow.com/a/52621703
yaml.add_representer(
OrderedDict,
lambda self, data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()),
Dumper=yaml.SafeDumper
)
# Read the requirements from 'setup.json'
setup_cfg = _load_setup_cfg()
install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']]
# python version cannot be overriden from outside environment.yml
# (even if it is not specified at all in environment.yml)
# https://github.com/conda/conda/issues/9506
conda_requires = ['python~=3.7']
for req in install_requirements:
if req.name == 'python' or any(re.match(ignore, str(req)) for ignore in CONDA_IGNORE):
continue
conda_requires.append(str(_setuptools_to_conda(req)))
environment = OrderedDict([
('name', 'aiida'),
('channels', ['conda-forge', 'defaults']),
('dependencies', conda_requires),
])
with open(ROOT / 'environment.yml', 'w') as env_file:
env_file.write('# Usage: conda env create -n myenvname -f environment.yml\n')
yaml.safe_dump(
environment, env_file, explicit_start=True, default_flow_style=False, encoding='utf-8', allow_unicode=True
)
@cli.command()
def update_pyproject_toml():
"""Generate a 'pyproject.toml' file, or update an existing one.
This function generates/updates the ``build-system`` section,
to be consistent with the 'setup.json' file.
"""
# read the current file
toml_path = ROOT / 'pyproject.toml'
if toml_path.exists():
pyproject = toml.loads(toml_path.read_text(encoding='utf8'))
else:
pyproject = {}
# Read the requirements from 'setup.json'
setup_cfg = _load_setup_cfg()
install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']]
for requirement in install_requirements:
if requirement.name == 'reentry':
reentry_requirement = requirement
break
else:
raise DependencySpecificationError("Failed to find reentry requirement in 'setup.json'.")
# update the build-system key
pyproject.setdefault('build-system', {})
pyproject['build-system'].update({
'requires': ['setuptools>=40.8.0,<50', 'wheel',
str(reentry_requirement), 'fastentrypoints~=0.12'],
'build-backend':
'setuptools.build_meta:__legacy__',
})
# write the new file
toml_path.write_text(toml.dumps(pyproject), encoding='utf8')
@cli.command()
@click.pass_context
def generate_all(ctx):
"""Generate all dependent requirement files."""
ctx.invoke(generate_environment_yml)
ctx.invoke(update_pyproject_toml)
@cli.command('validate-environment-yml', help="Validate 'environment.yml'.")
def validate_environment_yml(): # pylint: disable=too-many-branches
"""Validate that 'environment.yml' is consistent with 'setup.json'."""
# Read the requirements from 'setup.json' and 'environment.yml'.
setup_cfg = _load_setup_cfg()
install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']]
python_requires = Requirement.parse('python' + setup_cfg['python_requires'])
environment_yml = _load_environment_yml()
try:
assert environment_yml['name'] == 'aiida', "environment name should be 'aiida'."
assert environment_yml['channels'] == [
'conda-forge', 'defaults'
], "channels should be 'conda-forge', 'defaults'."
except AssertionError as error:
raise DependencySpecificationError(f"Error in 'environment.yml': {error}")
try:
conda_dependencies = {Requirement.parse(d) for d in environment_yml['dependencies']}
except TypeError as error:
raise DependencySpecificationError(f"Error while parsing requirements from 'environment_yml': {error}")
# Attempt to find the specification of Python among the 'environment.yml' dependencies.
for dependency in conda_dependencies:
if dependency.name == 'python': # Found the Python dependency specification
conda_python_dependency = dependency
conda_dependencies.remove(dependency)
break
else: # Failed to find Python dependency specification
raise DependencySpecificationError("Did not find specification of Python version in 'environment.yml'.")
# The Python version specified in 'setup.json' should be listed as trove classifiers.
for spec in conda_python_dependency.specifier:
expected_classifier = 'Programming Language :: Python :: ' + spec.version
if expected_classifier not in setup_cfg['classifiers']:
raise DependencySpecificationError(f"Trove classifier '{expected_classifier}' missing from 'setup.json'.")
# The Python version should be specified as supported in 'setup.json'.
if not any(spec.version >= other_spec.version for other_spec in python_requires.specifier):
raise DependencySpecificationError(
f"Required Python version {spec.version} from 'environment.yaml' is not consistent with " +
"required version in 'setup.json'."
)
break
else:
raise DependencySpecificationError(f"Missing specifier: '{conda_python_dependency}'.")
# Check that all requirements specified in the setup.json file are found in the
# conda environment specification.
for req in install_requirements:
if any(re.match(ignore, str(req)) for ignore in CONDA_IGNORE):
continue # skip explicitly ignored packages
try:
conda_dependencies.remove(_setuptools_to_conda(req))
except KeyError:
raise DependencySpecificationError(f"Requirement '{req}' not specified in 'environment.yml'.")
# The only dependency left should be the one for Python itself, which is not part of
# the install_requirements for setuptools.
if conda_dependencies:
raise DependencySpecificationError(
"The 'environment.yml' file contains dependencies that are missing "
"in 'setup.json':\n- {}".format('\n- '.join(map(str, conda_dependencies)))
)
click.secho('Conda dependency specification is consistent.', fg='green')
@cli.command('validate-pyproject-toml', help="Validate 'pyproject.toml'.")
def validate_pyproject_toml():
"""Validate that 'pyproject.toml' is consistent with 'setup.json'."""
# Read the requirements from 'setup.json'
setup_cfg = _load_setup_cfg()
install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']]
for requirement in install_requirements:
if requirement.name == 'reentry':
reentry_requirement = requirement
break
else:
raise DependencySpecificationError("Failed to find reentry requirement in 'setup.json'.")
pyproject_file = ROOT / 'pyproject.toml'
if not pyproject_file.exists():
raise DependencySpecificationError("The 'pyproject.toml' file is missing!")
pyproject = toml.loads(pyproject_file.read_text(encoding='utf8'))
pyproject_requires = [Requirement.parse(r) for r in pyproject['build-system']['requires']]
if reentry_requirement not in pyproject_requires:
raise DependencySpecificationError(f"Missing requirement '{reentry_requirement}' in 'pyproject.toml'.")
click.secho('Pyproject.toml dependency specification is consistent.', fg='green')
@cli.command('validate-all', help='Validate consistency of all requirements.')
@click.pass_context
def validate_all(ctx):
"""Validate consistency of all requirement specifications of the package.
Validates that the specification of requirements/dependencies is consistent across
the following files:
- setup.py
- setup.json
- environment.yml
- pyproject.toml
"""
ctx.invoke(validate_environment_yml)
ctx.invoke(validate_pyproject_toml)
@cli.command()
@click.argument('extras', nargs=-1)
@click.option(
'--github-annotate/--no-github-annotate',
default=True,
hidden=True,
help='Control whether to annotate files with context-specific warnings '
'as part of a GitHub actions workflow. Note: Requires environment '
'variable GITHUB_ACTIONS=true .'
)
def check_requirements(extras, github_annotate): # pylint disable: too-many-locals-too-many-branches
"""Check the 'requirements/*.txt' files.
Checks that the environments specified in the requirements files
match all the dependencies specified in 'setup.json.
The arguments allow to specify which 'extra' requirements to expect.
Use 'DEFAULT' to select 'atomic_tools', 'docs', 'notebook', 'rest', and 'tests'.
"""
if len(extras) == 1 and extras[0] == 'DEFAULT':
extras = ['atomic_tools', 'docs', 'notebook', 'rest', 'tests']
# Read the requirements from 'setup.json'
setup_cfg = _load_setup_cfg()
install_requires = setup_cfg['install_requires']
for extra in extras:
install_requires.extend(setup_cfg['extras_require'][extra])
install_requires = set(parse_requirements(install_requires))
not_installed = defaultdict(list)
for fn_req in (ROOT / 'requirements').iterdir():
match = re.match(r'.*-py-(.*)\.txt', str(fn_req))
if not match:
continue
env = {'python_version': match.groups()[0]}
required = {r for r in install_requires if r.marker is None or r.marker.evaluate(env)}
with open(fn_req) as req_file:
working_set = list(_parse_working_set(req_file))
installed = {req for req in required for entry in working_set if entry.fulfills(req)}
for dependency in required.difference(installed):
not_installed[dependency].append(fn_req)
if any(not_installed.values()):
setup_json_linenos = _find_linenos_of_requirements_in_setup_json(not_installed)
# Format error message to be presented to user.
error_msg = ["The requirements/ files are missing dependencies specified in the 'setup.json' file.", '']
for dependency, fn_reqs in not_installed.items():
src = 'setup.json:' + ','.join(str(lineno + 1) for lineno in setup_json_linenos[dependency])
error_msg.append(f'{src}: No match for dependency `{dependency}` in:')
for fn_req in sorted(fn_reqs):
error_msg.append(f' - {fn_req.relative_to(ROOT)}')
if GITHUB_ACTIONS:
# Set the step ouput error message which can be used, e.g., for display as part of an issue comment.
print('::set-output name=error::' + '%0A'.join(error_msg))
if GITHUB_ACTIONS and github_annotate:
# Annotate the setup.json file with specific warnings.
for dependency, fn_reqs in not_installed.items():
for lineno in setup_json_linenos[dependency]:
print(
f'::warning file=setup.json,line={lineno+1}::'
f"No match for dependency '{dependency}' in: " +
','.join(str(fn_req.relative_to(ROOT)) for fn_req in fn_reqs)
)
raise DependencySpecificationError('\n'.join(error_msg))
click.secho("Requirements files appear to be in sync with specifications in 'setup.json'.", fg='green')
@cli.command()
@click.argument('extras', nargs=-1)
def pip_install_extras(extras):
"""Install extra requirements.
For example:
pip-install-extras docs
This will install *only* the extra the requirements for docs, but without triggering
the installation of the main installations requirements of the aiida-core package.
"""
# Read the requirements from 'setup.json'
setup_cfg = _load_setup_cfg()
to_install = set()
for key in extras:
to_install.update(Requirement.parse(r) for r in setup_cfg['extras_require'][key])
cmd = [sys.executable, '-m', 'pip', 'install'] + [str(r) for r in to_install]
subprocess.run(cmd, check=True)
if __name__ == '__main__':
cli() # pylint: disable=no-value-for-parameter
| [] | [] | [
"GITHUB_ACTIONS"
] | [] | ["GITHUB_ACTIONS"] | python | 1 | 0 | |
modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.testframework;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.net.InetAddress;
import java.net.MulticastSocket;
import java.net.ServerSocket;
import java.nio.file.attribute.PosixFilePermission;
import java.security.AccessController;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
import java.security.PrivilegedAction;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Queue;
import java.util.Random;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.cache.CacheException;
import javax.cache.configuration.Factory;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.Ignition;
import org.apache.ignite.cache.query.annotations.QuerySqlFunction;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.internal.IgniteEx;
import org.apache.ignite.internal.IgniteFutureCancelledCheckedException;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.IgniteInterruptedCheckedException;
import org.apache.ignite.internal.IgniteKernal;
import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory;
import org.apache.ignite.internal.client.ssl.GridSslContextFactory;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter;
import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology;
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
import org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2;
import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor;
import org.apache.ignite.internal.processors.port.GridPortRecord;
import org.apache.ignite.internal.util.GridBusyLock;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.lang.GridAbsClosure;
import org.apache.ignite.internal.util.lang.GridAbsPredicate;
import org.apache.ignite.internal.util.lang.IgnitePair;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.G;
import org.apache.ignite.internal.util.typedef.T2;
import org.apache.ignite.internal.util.typedef.X;
import org.apache.ignite.internal.util.typedef.internal.LT;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteFuture;
import org.apache.ignite.lang.IgniteInClosure;
import org.apache.ignite.lang.IgnitePredicate;
import org.apache.ignite.lang.IgniteRunnable;
import org.apache.ignite.plugin.extensions.communication.Message;
import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage;
import org.apache.ignite.spi.discovery.DiscoverySpiListener;
import org.apache.ignite.ssl.SslContextFactory;
import org.apache.ignite.testframework.config.GridTestProperties;
import org.apache.ignite.testframework.junits.GridAbstractTest;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Utility class for tests.
*/
public final class GridTestUtils {
/** Default busy wait sleep interval in milliseconds. */
public static final long DFLT_BUSYWAIT_SLEEP_INTERVAL = 200;
/** */
public static final long DFLT_TEST_TIMEOUT = 5 * 60 * 1000;
/** */
static final String ALPHABETH = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890_";
/**
* Hook object intervenes to discovery message handling
* and thus allows to make assertions or other actions like skipping certain discovery messages.
*/
public static class DiscoveryHook {
/**
* @param msg Message.
*/
public void handleDiscoveryMessage(DiscoverySpiCustomMessage msg) {
}
/**
* @param ignite Ignite.
*/
public void ignite(IgniteEx ignite) {
// No-op.
}
}
/**
* Injects {@link DiscoveryHook} into handling logic.
*/
public static final class DiscoverySpiListenerWrapper implements DiscoverySpiListener {
/** */
private final DiscoverySpiListener delegate;
/** */
private final DiscoveryHook hook;
/**
* @param delegate Delegate.
* @param hook Hook.
*/
private DiscoverySpiListenerWrapper(DiscoverySpiListener delegate, DiscoveryHook hook) {
this.hook = hook;
this.delegate = delegate;
}
/** {@inheritDoc} */
@Override public IgniteFuture<?> onDiscovery(int type, long topVer, ClusterNode node, Collection<ClusterNode> topSnapshot, @Nullable Map<Long, Collection<ClusterNode>> topHist, @Nullable DiscoverySpiCustomMessage spiCustomMsg) {
hook.handleDiscoveryMessage(spiCustomMsg);
return delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
}
/** {@inheritDoc} */
@Override public void onLocalNodeInitialized(ClusterNode locNode) {
delegate.onLocalNodeInitialized(locNode);
}
/**
* @param delegate Delegate.
* @param discoveryHook Discovery hook.
*/
public static DiscoverySpiListener wrap(DiscoverySpiListener delegate, DiscoveryHook discoveryHook) {
return new DiscoverySpiListenerWrapper(delegate, discoveryHook);
}
}
/** Test parameters scale factor util. */
public static final class SF extends ScaleFactorUtil {
}
/** */
private static final Map<Class<?>, String> addrs = new HashMap<>();
/** */
private static final Map<Class<? extends GridAbstractTest>, Integer> mcastPorts = new HashMap<>();
/** */
private static final Map<Class<? extends GridAbstractTest>, Integer> discoPorts = new HashMap<>();
/** */
private static final Map<Class<? extends GridAbstractTest>, Integer> commPorts = new HashMap<>();
/** */
private static int[] addr;
/** */
private static final int default_mcast_port = 50000;
/** */
private static final int max_mcast_port = 54999;
/** */
private static final int default_comm_port = 45000;
/** */
private static final int max_comm_port = 49999;
/** */
private static final int default_disco_port = 55000;
/** */
private static final int max_disco_port = 59999;
/** */
private static int mcastPort = default_mcast_port;
/** */
private static int discoPort = default_disco_port;
/** */
private static int commPort = default_comm_port;
/** */
private static final GridBusyLock busyLock = new GridBusyLock();
/** */
public static final ConcurrentMap<IgnitePair<UUID>, IgnitePair<Queue<Message>>> msgMap = new ConcurrentHashMap<>();
/**
* Ensure singleton.
*/
private GridTestUtils() {
// No-op.
}
/**
* @param from From node ID.
* @param to To node ID.
* @param msg Message.
* @param sent Sent or received.
*/
public static void addMessage(UUID from, UUID to, Message msg, boolean sent) {
IgnitePair<UUID> key = new IgnitePair<>(from, to);
IgnitePair<Queue<Message>> val = msgMap.get(key);
if (val == null) {
IgnitePair<Queue<Message>> old = msgMap.putIfAbsent(key,
val = new IgnitePair<Queue<Message>>(
new ConcurrentLinkedQueue<Message>(), new ConcurrentLinkedQueue<Message>()));
if (old != null)
val = old;
}
(sent ? val.get1() : val.get2()).add(msg);
}
/**
* Dumps all messages tracked with {@link #addMessage(UUID, UUID, Message, boolean)} to std out.
*/
public static void dumpMessages() {
for (Map.Entry<IgnitePair<UUID>, IgnitePair<Queue<Message>>> entry : msgMap.entrySet()) {
U.debug("\n" + entry.getKey().get1() + " [sent to] " + entry.getKey().get2());
for (Message message : entry.getValue().get1())
U.debug("\t" + message);
U.debug(entry.getKey().get2() + " [received from] " + entry.getKey().get1());
for (Message message : entry.getValue().get2())
U.debug("\t" + message);
}
}
// static {
// new Thread(new Runnable() {
// @Override public void run() {
// JOptionPane.showMessageDialog(null, "Close this to dump messages.");
//
// dumpMessages();
// }
// }).start();
// }
/**
* Checks that string {@param str} doesn't contains substring {@param substr}. Logs both strings
* and throws {@link java.lang.AssertionError}, if contains.
*
* @param log Logger (optional).
* @param str String.
* @param substr Substring.
*/
public static void assertNotContains(@Nullable IgniteLogger log, String str, String substr) {
try {
assertFalse(str.contains(substr));
} catch (AssertionError e) {
U.warn(log, String.format("String contain substring: '%s', but shouldn't:", substr));
U.warn(log, "String:");
U.warn(log, str);
throw e;
}
}
/**
* Checks that string {@param str} contains substring {@param substr}. Logs both strings
* and throws {@link java.lang.AssertionError}, if not.
*
* @param log Logger (optional).
* @param str String.
* @param substr Substring.
*/
public static void assertContains(@Nullable IgniteLogger log, String str, String substr) {
try {
assertTrue(str != null && str.contains(substr));
} catch (AssertionError e) {
U.warn(log, String.format("String does not contain substring: '%s':", substr));
U.warn(log, "String:");
U.warn(log, str);
throw e;
}
}
/**
* Checks that collection {@param col} contains string {@param str}. Logs collection, string
* and throws {@link java.lang.AssertionError}, if not.
*
* @param log Logger (optional).
* @param col Collection.
* @param str String.
*/
public static <C extends Collection<String>> void assertContains(@Nullable IgniteLogger log, C col, String str) {
try {
assertTrue(col.contains(str));
} catch (AssertionError e) {
U.warn(log, String.format("Collection does not contain string: '%s':", str));
U.warn(log, "Collection:");
U.warn(log, col);
throw e;
}
}
/**
* Checks that collection {@param col} doesn't contains string {@param str}. Logs collection, string
* and throws {@link java.lang.AssertionError}, if contains.
*
* @param log Logger (optional).
* @param col Collection.
* @param str String.
*/
public static <C extends Collection<String>> void assertNotContains(@Nullable IgniteLogger log, C col, String str) {
try {
assertFalse(col.contains(str));
} catch (AssertionError e) {
U.warn(log, String.format("Collection contain string: '%s' but shouldn't:", str));
U.warn(log, "Collection:");
U.warn(log, col);
throw e;
}
}
/**
* Checks whether callable throws expected exception or not.
*
* @param log Logger (optional).
* @param call Callable.
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @return Thrown throwable.
*/
public static Throwable assertThrows(@Nullable IgniteLogger log, Callable<?> call,
Class<? extends Throwable> cls, @Nullable String msg) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
if (cls != e.getClass() && !cls.isAssignableFrom(e.getClass())) {
if (e.getClass() == CacheException.class && e.getCause() != null && e.getCause().getClass() == cls)
e = e.getCause();
else {
U.error(log, "Unexpected exception.", e);
fail("Exception class is not as expected [expected=" + cls + ", actual=" + e.getClass() + ']', e);
}
}
if (msg != null && (e.getMessage() == null || !e.getMessage().contains(msg))) {
U.error(log, "Unexpected exception message.", e);
fail("Exception message is not as expected [expected=" + msg + ", actual=" + e.getMessage() + ']', e);
}
if (log != null) {
if (log.isInfoEnabled())
log.info("Caught expected exception: " + e.getMessage());
}
else
X.println("Caught expected exception: " + e.getMessage());
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether callable throws an exception with specified cause.
*
* @param log Logger (optional).
* @param call Callable.
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @return Thrown throwable.
*/
public static Throwable assertThrowsAnyCause(@Nullable IgniteLogger log, Callable<?> call,
Class<? extends Throwable> cls, @Nullable String msg) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
Throwable t = e;
while (t != null) {
if (cls == t.getClass() && (msg == null || (t.getMessage() != null && t.getMessage().contains(msg)))) {
if (log != null && log.isInfoEnabled())
log.info("Caught expected exception: " + t.getMessage());
return t;
}
t = t.getCause();
}
fail("Unexpected exception", e);
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether callable throws expected exception or its child or not.
*
* @param log Logger (optional).
* @param call Callable.
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrowsInherited(@Nullable IgniteLogger log, Callable<?> call,
Class<? extends Throwable> cls, @Nullable String msg) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
if (!cls.isAssignableFrom(e.getClass()))
fail("Exception class is not as expected [expected=" + cls + ", actual=" + e.getClass() + ']', e);
if (msg != null && (e.getMessage() == null || !e.getMessage().startsWith(msg)))
fail("Exception message is not as expected [expected=" + msg + ", actual=" + e.getMessage() + ']', e);
if (log != null) {
if (log.isDebugEnabled())
log.debug("Caught expected exception: " + e.getMessage());
}
else
X.println("Caught expected exception: " + e.getMessage());
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether callable throws exception, which is itself of a specified
* class, or has a cause of the specified class.
*
* @param runnable Runnable.
* @param cls Expected class.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrowsWithCause(Runnable runnable, Class<? extends Throwable> cls) {
return assertThrowsWithCause(new Callable<Integer>() {
@Override public Integer call() throws Exception {
runnable.run();
return 0;
}
}, cls);
}
/**
* Checks whether callable throws exception, which is itself of a specified
* class, or has a cause of the specified class.
*
* @param call Callable.
* @param cls Expected class.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrowsWithCause(Callable<?> call, Class<? extends Throwable> cls) {
assert call != null;
assert cls != null;
try {
call.call();
}
catch (Throwable e) {
if (!X.hasCause(e, cls))
fail("Exception is neither of a specified class, nor has a cause of the specified class: " + cls, e);
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Checks whether closure throws exception, which is itself of a specified
* class, or has a cause of the specified class.
*
* @param call Closure.
* @param p Parameter passed to closure.
* @param cls Expected class.
* @return Thrown throwable.
*/
public static <P> Throwable assertThrowsWithCause(IgniteInClosure<P> call, P p, Class<? extends Throwable> cls) {
assert call != null;
assert cls != null;
try {
call.apply(p);
}
catch (Throwable e) {
if (!X.hasCause(e, cls))
fail("Exception is neither of a specified class, nor has a cause of the specified class: " + cls, e);
return e;
}
throw new AssertionError("Exception has not been thrown.");
}
/**
* Asserts that the specified runnable completes within the specified timeout.
*
* @param msg Assertion message in case of timeout.
* @param timeout Timeout.
* @param timeUnit Timeout {@link TimeUnit}.
* @param runnable {@link Runnable} to check.
* @throws Exception In case of any exception distinct from {@link TimeoutException}.
*/
public static void assertTimeout(String msg, long timeout, TimeUnit timeUnit, Runnable runnable) throws Exception {
ExecutorService executorSvc = Executors.newSingleThreadExecutor();
Future<?> fut = executorSvc.submit(runnable);
try {
fut.get(timeout, timeUnit);
}
catch (TimeoutException ignored) {
fail(msg, null);
}
finally {
executorSvc.shutdownNow();
}
}
/**
* Asserts that the specified runnable completes within the specified timeout.
*
* @param timeout Timeout.
* @param timeUnit Timeout {@link TimeUnit}.
* @param runnable {@link Runnable} to check.
* @throws Exception In case of any exception distinct from {@link TimeoutException}.
*/
public static void assertTimeout(long timeout, TimeUnit timeUnit, Runnable runnable) throws Exception {
assertTimeout("Timeout occurred.", timeout, timeUnit, runnable);
}
/**
* Throw assertion error with specified error message and initialized cause.
*
* @param msg Error message.
* @param cause Error cause.
* @return Assertion error.
*/
private static AssertionError fail(String msg, @Nullable Throwable cause) {
AssertionError e = new AssertionError(msg);
if (cause != null)
e.initCause(cause);
throw e;
}
/**
* Checks whether object's method call throws expected exception or not.
*
* @param log Logger (optional).
* @param cls Exception class.
* @param msg Exception message (optional). If provided exception message
* and this message should be equal.
* @param obj Object to invoke method for.
* @param mtd Object's method to invoke.
* @param params Method parameters.
* @return Thrown throwable.
*/
@Nullable public static Throwable assertThrows(@Nullable IgniteLogger log, Class<? extends Throwable> cls,
@Nullable String msg, final Object obj, final String mtd, final Object... params) {
return assertThrows(log, new Callable() {
@Override public Object call() throws Exception {
return invoke(obj, mtd, params);
}
}, cls, msg);
}
/**
* Asserts that each element in iterable has one-to-one correspondence with a
* predicate from list.
*
* @param it Input iterable of elements.
* @param ps Array of predicates (by number of elements in iterable).
*/
public static <T> void assertOneToOne(Iterable<T> it, IgnitePredicate<T>... ps) {
Collection<IgnitePredicate<T>> ps0 = new ArrayList<>(Arrays.asList(ps));
Collection<T2<IgnitePredicate<T>, T>> passed = new ArrayList<>();
for (T elem : it) {
for (T2<IgnitePredicate<T>, T> p : passed) {
if (p.get1().apply(elem))
throw new AssertionError("Two elements match one predicate [elem1=" + p.get2() +
", elem2=" + elem + ", pred=" + p.get1() + ']');
}
IgnitePredicate<T> matched = null;
for (IgnitePredicate<T> p : ps0) {
if (p.apply(elem)) {
if (matched != null)
throw new AssertionError("Element matches more than one predicate [elem=" + elem +
", pred1=" + p + ", pred2=" + matched + ']');
matched = p;
}
}
if (matched == null) // None matched.
throw new AssertionError("The element does not match [elem=" + elem +
", numRemainingPreds=" + ps0.size() + ']');
ps0.remove(matched);
passed.add(new T2<>(matched, elem));
}
}
/**
* Every invocation of this method will never return a
* repeating multicast port for a different test case.
*
* @param cls Class.
* @return Next multicast port.
*/
public static synchronized int getNextMulticastPort(Class<? extends GridAbstractTest> cls) {
Integer portRet = mcastPorts.get(cls);
if (portRet != null)
return portRet;
int startPort = mcastPort;
while (true) {
if (mcastPort >= max_mcast_port)
mcastPort = default_mcast_port;
else
mcastPort++;
if (startPort == mcastPort)
break;
portRet = mcastPort;
MulticastSocket sock = null;
try {
sock = new MulticastSocket(portRet);
break;
}
catch (IOException ignored) {
// No-op.
}
finally {
U.closeQuiet(sock);
}
}
// Cache port to be reused by the same test.
mcastPorts.put(cls, portRet);
return portRet;
}
/**
* Every invocation of this method will never return a
* repeating communication port for a different test case.
*
* @param cls Class.
* @return Next communication port.
*/
public static synchronized int getNextCommPort(Class<? extends GridAbstractTest> cls) {
Integer portRet = commPorts.get(cls);
if (portRet != null)
return portRet;
if (commPort >= max_comm_port)
commPort = default_comm_port;
else
// Reserve 10 ports per test.
commPort += 10;
portRet = commPort;
// Cache port to be reused by the same test.
commPorts.put(cls, portRet);
return portRet;
}
/**
* Every invocation of this method will never return a
* repeating discovery port for a different test case.
*
* @param cls Class.
* @return Next discovery port.
*/
public static synchronized int getNextDiscoPort(Class<? extends GridAbstractTest> cls) {
Integer portRet = discoPorts.get(cls);
if (portRet != null)
return portRet;
if (discoPort >= max_disco_port)
discoPort = default_disco_port;
else
discoPort += 10;
portRet = discoPort;
// Cache port to be reused by the same test.
discoPorts.put(cls, portRet);
return portRet;
}
/**
* @return Free communication port number on localhost.
* @throws IOException If unable to find a free port.
*/
public static int getFreeCommPort() throws IOException {
for (int port = default_comm_port; port < max_comm_port; port++) {
try (ServerSocket sock = new ServerSocket(port)) {
return sock.getLocalPort();
}
catch (IOException ignored) {
// No-op.
}
}
throw new IOException("Unable to find a free communication port.");
}
/**
* Every invocation of this method will never return a
* repeating multicast group for a different test case.
*
* @param cls Class.
* @return Next multicast group.
*/
public static synchronized String getNextMulticastGroup(Class<?> cls) {
String addrStr = addrs.get(cls);
if (addrStr != null)
return addrStr;
// Increment address.
if (addr[3] == 255) {
if (addr[2] == 255)
assert false;
else {
addr[2] += 1;
addr[3] = 1;
}
}
else
addr[3] += 1;
// Convert address to string.
StringBuilder b = new StringBuilder(15);
for (int i = 0; i < addr.length; i++) {
b.append(addr[i]);
if (i < addr.length - 1)
b.append('.');
}
addrStr = b.toString();
// Cache address to be reused by the same test.
addrs.put(cls, addrStr);
return addrStr;
}
/**
* Runs runnable object in specified number of threads.
*
* @param run Target runnable.
* @param threadNum Number of threads.
* @param threadName Thread name.
* @return Execution time in milliseconds.
* @throws Exception Thrown if at least one runnable execution failed.
*/
public static long runMultiThreaded(Runnable run, int threadNum, String threadName) throws Exception {
return runMultiThreaded(makeCallable(run, null), threadNum, threadName);
}
/**
* Runs runnable object in specified number of threads.
*
* @param run Target runnable.
* @param threadNum Number of threads.
* @param threadName Thread name.
* @return Future for the run. Future returns execution time in milliseconds.
*/
public static IgniteInternalFuture<Long> runMultiThreadedAsync(Runnable run, int threadNum, String threadName) {
return runMultiThreadedAsync(makeCallable(run, null), threadNum, threadName);
}
/**
* Runs callable object in specified number of threads.
*
* @param call Callable.
* @param threadNum Number of threads.
* @param threadName Thread names.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(Callable<?> call, int threadNum, String threadName) throws Exception {
List<Callable<?>> calls = Collections.<Callable<?>>nCopies(threadNum, call);
return runMultiThreaded(calls, threadName);
}
/**
* @param call Closure that receives thread index.
* @param threadNum Number of threads.
* @param threadName Thread names.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(final IgniteInClosure<Integer> call, int threadNum, String threadName)
throws Exception {
List<Callable<?>> calls = new ArrayList<>(threadNum);
for (int i = 0; i < threadNum; i++) {
final int idx = i;
calls.add(new Callable<Void>() {
@Override public Void call() throws Exception {
call.apply(idx);
return null;
}
});
}
return runMultiThreaded(calls, threadName);
}
/**
* Runs callable object in specified number of threads.
*
* @param call Callable.
* @param threadNum Number of threads.
* @param threadName Thread names.
* @return Future for the run. Future returns execution time in milliseconds.
*/
public static IgniteInternalFuture<Long> runMultiThreadedAsync(Callable<?> call, int threadNum, final String threadName) {
final List<Callable<?>> calls = Collections.<Callable<?>>nCopies(threadNum, call);
final GridTestSafeThreadFactory threadFactory = new GridTestSafeThreadFactory(threadName);
IgniteInternalFuture<Long> runFut = runAsync(() -> runMultiThreaded(calls, threadFactory));
GridFutureAdapter<Long> resFut = new GridFutureAdapter<Long>() {
@Override public boolean cancel() throws IgniteCheckedException {
super.cancel();
if (isDone())
return false;
runFut.cancel();
threadFactory.interruptAllThreads();
return onCancelled();
}
};
runFut.listen(fut -> {
try {
resFut.onDone(fut.get());
}
catch (IgniteFutureCancelledCheckedException e) {
resFut.onCancelled();
}
catch (Throwable e) {
resFut.onDone(e);
}
});
return resFut;
}
/**
* Runs callable tasks each in separate threads.
*
* @param calls Callable tasks.
* @param threadName Thread name.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(Iterable<Callable<?>> calls, String threadName) throws Exception {
return runMultiThreaded(calls, new GridTestSafeThreadFactory(threadName));
}
/**
* Runs callable tasks each in separate threads.
*
* @param calls Callable tasks.
* @param threadFactory Thread factory.
* @return Execution time in milliseconds.
* @throws Exception If failed.
*/
public static long runMultiThreaded(Iterable<Callable<?>> calls, GridTestSafeThreadFactory threadFactory)
throws Exception {
if (!busyLock.enterBusy())
throw new IllegalStateException("Failed to start new threads (test is being stopped).");
Collection<Thread> threads = new ArrayList<>();
long time;
try {
for (Callable<?> call : calls)
threads.add(threadFactory.newThread(call));
time = System.currentTimeMillis();
for (Thread t : threads)
t.start();
}
finally {
busyLock.leaveBusy();
}
// Wait threads finish their job.
try {
for (Thread t : threads)
t.join();
} catch (InterruptedException e) {
for (Thread t : threads)
t.interrupt();
throw e;
}
time = System.currentTimeMillis() - time;
// Validate errors happens
threadFactory.checkError();
return time;
}
/**
* Runs runnable task asyncronously.
*
* @param task Runnable.
* @return Future with task result.
*/
public static IgniteInternalFuture runAsync(final Runnable task) {
return runAsync(task,"async-runnable-runner");
}
/**
* Runs runnable task asyncronously.
*
* @param task Runnable.
* @return Future with task result.
*/
public static IgniteInternalFuture runAsync(final Runnable task, String threadName) {
return runAsync(() -> {
task.run();
return null;
}, threadName);
}
/**
* Runs callable task asyncronously.
*
* @param task Callable.
* @return Future with task result.
*/
public static <T> IgniteInternalFuture<T> runAsync(final Callable<T> task) {
return runAsync(task, "async-callable-runner");
}
/**
* Runs callable task asyncronously.
*
* @param task Callable.
* @param threadName Thread name.
* @return Future with task result.
*/
public static <T> IgniteInternalFuture<T> runAsync(final Callable<T> task, String threadName) {
if (!busyLock.enterBusy())
throw new IllegalStateException("Failed to start new threads (test is being stopped).");
try {
final GridTestSafeThreadFactory thrFactory = new GridTestSafeThreadFactory(threadName);
final GridFutureAdapter<T> fut = new GridFutureAdapter<T>() {
@Override public boolean cancel() throws IgniteCheckedException {
super.cancel();
if (isDone())
return false;
thrFactory.interruptAllThreads();
try {
get();
return false;
}
catch (IgniteFutureCancelledCheckedException e) {
return true;
}
catch (IgniteCheckedException e) {
return false;
}
}
};
thrFactory.newThread(() -> {
try {
// Execute task.
T res = task.call();
fut.onDone(res);
}
catch (InterruptedException e) {
fut.onCancelled();
}
catch (Throwable e) {
fut.onDone(e);
}
}).start();
return fut;
}
finally {
busyLock.leaveBusy();
}
}
/**
* Wait for all passed futures to complete even if they fail.
*
* @param futs Futures.
* @throws AssertionError Suppresses underlying exceptions if some futures failed.
*/
public static void waitForAllFutures(IgniteInternalFuture<?>... futs) {
AssertionError err = null;
for (IgniteInternalFuture<?> fut : futs) {
try {
fut.get();
}
catch (Throwable t) {
if (err == null)
err = new AssertionError("One or several futures threw the exception.");
err.addSuppressed(t);
}
}
if (err != null)
throw err;
}
/**
* Interrupts and waits for termination of all the threads started
* so far by current test.
*
* @param log Logger.
*/
public static void stopThreads(IgniteLogger log) {
busyLock.block();
try {
GridTestSafeThreadFactory.stopAllThreads(log);
}
finally {
busyLock.unblock();
}
}
/**
* @return Ignite home.
* @throws Exception If failed.
*/
@SuppressWarnings({"ProhibitedExceptionThrown"})
public static String getIgniteHome() throws Exception {
String ggHome = System.getProperty("IGNITE_HOME");
if (ggHome == null)
ggHome = System.getenv("IGNITE_HOME");
if (ggHome == null)
throw new Exception("IGNITE_HOME parameter must be set either as system or environment variable.");
File dir = new File(ggHome);
if (!dir.exists())
throw new Exception("Ignite home does not exist [ignite-home=" + dir.getAbsolutePath() + ']');
if (!dir.isDirectory())
throw new Exception("Ignite home is not a directory [ignite-home=" + dir.getAbsolutePath() + ']');
return ggHome;
}
/**
* @param <T> Type.
* @param cls Class.
* @param annCls Annotation class.
* @return Annotation.
*/
@Nullable public static <T extends Annotation> T getAnnotation(Class<?> cls, Class<T> annCls) {
for (Class<?> cls0 = cls; cls0 != null; cls0 = cls0.getSuperclass()) {
T ann = cls0.getAnnotation(annCls);
if (ann != null)
return ann;
}
return null;
}
/**
* Initializes address.
*/
static {
InetAddress locHost = null;
try {
locHost = U.getLocalHost();
}
catch (IOException e) {
assert false : "Unable to get local address. This leads to the same multicast addresses " +
"in the local network.";
}
if (locHost != null) {
int thirdByte = locHost.getAddress()[3];
if (thirdByte < 0)
thirdByte += 256;
// To get different addresses for different machines.
addr = new int[] {229, thirdByte, 1, 1};
}
else
addr = new int[] {229, 1, 1, 1};
}
/**
* @param path Path.
* @param startFilter Start filter.
* @param endFilter End filter.
* @return List of JARs that corresponds to the filters.
* @throws IOException If failed.
*/
private static Collection<String> getFiles(String path, @Nullable final String startFilter,
@Nullable final String endFilter) throws IOException {
Collection<String> res = new ArrayList<>();
File file = new File(path);
assert file.isDirectory();
File[] jars = file.listFiles(new FilenameFilter() {
/**
* @see FilenameFilter#accept(File, String)
*/
@SuppressWarnings({"UnnecessaryJavaDocLink"})
@Override public boolean accept(File dir, String name) {
// Exclude spring.jar because it tries to load META-INF/spring-handlers.xml from
// all available JARs and create instances of classes from there for example.
// Exclude logging as it is used by spring and casted to Log interface.
// Exclude log4j because of the design - 1 per VM.
if (name.startsWith("spring") || name.startsWith("log4j") ||
name.startsWith("commons-logging") || name.startsWith("junit") ||
name.startsWith("ignite-tests"))
return false;
boolean ret = true;
if (startFilter != null)
ret = name.startsWith(startFilter);
if (ret && endFilter != null)
ret = name.endsWith(endFilter);
return ret;
}
});
for (File jar : jars)
res.add(jar.getCanonicalPath());
return res;
}
/**
* Silent stop grid.
* Method doesn't throw any exception.
*
* @param ignite Grid to stop.
* @param log Logger.
*/
public static void close(Ignite ignite, IgniteLogger log) {
if (ignite != null)
try {
G.stop(ignite.name(), false);
}
catch (Throwable e) {
U.error(log, "Failed to stop grid: " + ignite.name(), e);
}
}
/**
* Silent stop grid.
* Method doesn't throw any exception.
*
* @param igniteInstanceName Ignite instance name.
* @param log Logger.
*/
public static void stopGrid(String igniteInstanceName, IgniteLogger log) {
try {
G.stop(igniteInstanceName, false);
}
catch (Throwable e) {
U.error(log, "Failed to stop grid: " + igniteInstanceName, e);
}
}
/**
* Gets file representing the path passed in. First the check is made if path is absolute.
* If not, then the check is made if path is relative to ${IGNITE_HOME}. If both checks fail,
* then {@code null} is returned, otherwise file representing path is returned.
* <p>
* See {@link #getIgniteHome()} for information on how {@code IGNITE_HOME} is retrieved.
*
* @param path Path to resolve.
* @return Resolved path, or {@code null} if file cannot be resolved.
* @see #getIgniteHome()
*/
@Nullable public static File resolveIgnitePath(String path) {
return resolvePath(null, path);
}
/**
* @param igniteHome Optional ignite home path.
* @param path Path to resolve.
* @return Resolved path, or {@code null} if file cannot be resolved.
*/
@Nullable private static File resolvePath(@Nullable String igniteHome, String path) {
File file = new File(path).getAbsoluteFile();
if (!file.exists()) {
String home = igniteHome != null ? igniteHome : U.getIgniteHome();
if (home == null)
return null;
file = new File(home, path);
return file.exists() ? file : null;
}
return file;
}
/**
* @param cache Cache.
* @return Cache context.
*/
public static <K, V> GridCacheContext<K, V> cacheContext(IgniteCache<K, V> cache) {
return ((IgniteKernal)cache.unwrap(Ignite.class)).<K, V>internalCache(cache.getName()).context();
}
/**
* @param cache Cache.
* @return Near cache.
*/
public static <K, V> GridNearCacheAdapter<K, V> near(IgniteCache<K, V> cache) {
return cacheContext(cache).near();
}
/**
* @param cache Cache.
* @return DHT cache.
*/
public static <K, V> GridDhtCacheAdapter<K, V> dht(IgniteCache<K, V> cache) {
return near(cache).dht();
}
/**
* @param cacheName Cache name.
* @param backups Number of backups.
* @param log Logger.
* @throws Exception If failed.
*/
@SuppressWarnings("BusyWait")
public static <K, V> void waitTopologyUpdate(@Nullable String cacheName, int backups, IgniteLogger log)
throws Exception {
for (Ignite g : Ignition.allGrids()) {
IgniteCache<K, V> cache = ((IgniteEx)g).cache(cacheName);
GridDhtPartitionTopology top = dht(cache).topology();
while (true) {
boolean wait = false;
for (int p = 0; p < g.affinity(cacheName).partitions(); p++) {
Collection<ClusterNode> nodes = top.nodes(p, AffinityTopologyVersion.NONE);
if (nodes.size() > backups + 1) {
LT.warn(log, "Partition map was not updated yet (will wait) [igniteInstanceName=" + g.name() +
", p=" + p + ", nodes=" + F.nodeIds(nodes) + ']');
wait = true;
break;
}
}
if (wait)
Thread.sleep(20);
else
break; // While.
}
}
}
/**
* Convert runnable tasks with callable.
*
* @param run Runnable task to convert into callable one.
* @param res Callable result.
* @param <T> The result type of method <tt>call</tt>, always {@code null}.
* @return Callable task around the specified runnable one.
*/
public static <T> Callable<T> makeCallable(final Runnable run, @Nullable final T res) {
return new Callable<T>() {
@Override public T call() throws Exception {
run.run();
return res;
}
};
}
/**
* Get object field value via reflection.
*
* @param obj Object or class to get field value from.
* @param cls Class.
* @param fieldName Field names to get value for.
* @param <T> Expected field class.
* @return Field value.
* @throws IgniteException In case of error.
*/
public static <T> T getFieldValue(Object obj, Class cls, String fieldName) throws IgniteException {
assert obj != null;
assert fieldName != null;
try {
return (T)findField(cls, obj, fieldName);
}
catch (NoSuchFieldException | IllegalAccessException e) {
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldName=" + fieldName + ']', e);
}
}
/**
* Get object field value via reflection.
*
* @param obj Object or class to get field value from.
* @param fieldNames Field names to get value for: obj->field1->field2->...->fieldN.
* @param <T> Expected field class.
* @return Field value.
* @throws IgniteException In case of error.
*/
public static <T> T getFieldValue(Object obj, String... fieldNames) throws IgniteException {
assert obj != null;
assert fieldNames != null;
assert fieldNames.length >= 1;
try {
for (String fieldName : fieldNames) {
Class<?> cls = obj instanceof Class ? (Class)obj : obj.getClass();
try {
obj = findField(cls, obj, fieldName);
}
catch (NoSuchFieldException e) {
// Resolve inner class, if not an inner field.
Class<?> innerCls = getInnerClass(cls, fieldName);
if (innerCls == null)
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldNames=" + Arrays.toString(fieldNames) + ']', e);
obj = innerCls;
}
}
return (T)obj;
}
catch (IllegalAccessException e) {
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldNames=" + Arrays.toString(fieldNames) + ']', e);
}
}
/**
* Get object field value via reflection(including superclass).
*
* @param obj Object or class to get field value from.
* @param fieldNames Field names to get value for: obj->field1->field2->...->fieldN.
* @param <T> Expected field class.
* @return Field value.
* @throws IgniteException In case of error.
*/
public static <T> T getFieldValueHierarchy(Object obj, String... fieldNames) throws IgniteException {
assert obj != null;
assert fieldNames != null;
assert fieldNames.length >= 1;
try {
for (String fieldName : fieldNames) {
Class<?> cls = obj instanceof Class ? (Class)obj : obj.getClass();
while (cls != null) {
try {
obj = findField(cls, obj, fieldName);
break;
}
catch (NoSuchFieldException e) {
cls = cls.getSuperclass();
}
}
}
return (T)obj;
}
catch (IllegalAccessException e) {
throw new IgniteException("Failed to get object field [obj=" + obj +
", fieldNames=" + Arrays.toString(fieldNames) + ']', e);
}
}
/**
* @param cls Class for searching.
* @param obj Target object.
* @param fieldName Field name for search.
* @return Field from object if it was found.
*/
private static Object findField(Class<?> cls, Object obj,
String fieldName) throws NoSuchFieldException, IllegalAccessException {
// Resolve inner field.
Field field = cls.getDeclaredField(fieldName);
boolean accessible = field.isAccessible();
if (!accessible)
field.setAccessible(true);
return field.get(obj);
}
/**
* Change static final fields.
* @param field Need to be changed.
* @param newVal New value.
* @throws Exception If failed.
*/
public static void setFieldValue(Field field, Object newVal) throws Exception {
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
AccessController.doPrivileged(new PrivilegedAction() {
@Override
public Object run() {
modifiersField.setAccessible(true);
return null;
}
});
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.set(null, newVal);
}
/**
* Get inner class by its name from the enclosing class.
*
* @param parentCls Parent class to resolve inner class for.
* @param innerClsName Name of the inner class.
* @return Inner class.
*/
@Nullable public static <T> Class<T> getInnerClass(Class<?> parentCls, String innerClsName) {
for (Class<?> cls : parentCls.getDeclaredClasses())
if (innerClsName.equals(cls.getSimpleName()))
return (Class<T>)cls;
return null;
}
/**
* Set object field value via reflection.
*
* @param obj Object to set field value to.
* @param fieldName Field name to set value for.
* @param val New field value.
* @throws IgniteException In case of error.
*/
public static void setFieldValue(Object obj, String fieldName, Object val) throws IgniteException {
assert obj != null;
assert fieldName != null;
try {
Class<?> cls = obj instanceof Class ? (Class)obj : obj.getClass();
Field field = cls.getDeclaredField(fieldName);
boolean accessible = field.isAccessible();
if (!accessible)
field.setAccessible(true);
field.set(obj, val);
}
catch (NoSuchFieldException | IllegalAccessException e) {
throw new IgniteException("Failed to set object field [obj=" + obj + ", field=" + fieldName + ']', e);
}
}
/**
* Set object field value via reflection.
*
* @param obj Object to set field value to.
* @param cls Class to get field from.
* @param fieldName Field name to set value for.
* @param val New field value.
* @throws IgniteException In case of error.
*/
public static void setFieldValue(Object obj, Class cls, String fieldName, Object val) throws IgniteException {
assert fieldName != null;
try {
Field field = cls.getDeclaredField(fieldName);
boolean accessible = field.isAccessible();
if (!accessible)
field.setAccessible(true);
boolean isFinal = (field.getModifiers() & Modifier.FINAL) != 0;
if (isFinal) {
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
}
field.set(obj, val);
}
catch (NoSuchFieldException | IllegalAccessException e) {
throw new IgniteException("Failed to set object field [obj=" + obj + ", field=" + fieldName + ']', e);
}
}
/**
* Invoke method on an object.
*
* @param obj Object to call method on.
* @param mtd Method to invoke.
* @param params Parameters of the method.
* @return Method invocation result.
* @throws Exception If failed.
*/
@Nullable public static <T> T invoke(Object obj, String mtd, Object... params) throws Exception {
Class<?> cls = obj.getClass();
do {
// We cannot resolve method by parameter classes due to some of parameters can be null.
// Search correct method among all methods collection.
for (Method m : cls.getDeclaredMethods()) {
// Filter methods by name.
if (!m.getName().equals(mtd))
continue;
if (!areCompatible(params, m.getParameterTypes()))
continue;
try {
boolean accessible = m.isAccessible();
if (!accessible)
m.setAccessible(true);
return (T)m.invoke(obj, params);
}
catch (IllegalAccessException e) {
throw new RuntimeException("Failed to access method" +
" [obj=" + obj + ", mtd=" + mtd + ", params=" + Arrays.toString(params) + ']', e);
}
catch (InvocationTargetException e) {
Throwable cause = e.getCause();
if (cause instanceof Error)
throw (Error) cause;
if (cause instanceof Exception)
throw (Exception) cause;
throw new RuntimeException("Failed to invoke method)" +
" [obj=" + obj + ", mtd=" + mtd + ", params=" + Arrays.toString(params) + ']', e);
}
}
cls = cls.getSuperclass();
} while (cls != Object.class);
throw new RuntimeException("Failed to find method" +
" [obj=" + obj + ", mtd=" + mtd + ", params=" + Arrays.toString(params) + ']');
}
/**
* Check objects and corresponding types are compatible.
*
* @param objs Objects array.
* @param types Classes array.
* @return Objects in array can be casted to corresponding types.
*/
private static boolean areCompatible(Object[] objs, Class[] types) {
if (objs.length != types.length)
return false;
for (int i = 0, size = objs.length; i < size; i++) {
Object o = objs[i];
if (o != null && !types[i].isInstance(o))
return false;
}
return true;
}
/**
* Tries few times to perform some assertion. In the worst case
* {@code assertion} closure will be executed {@code retries} + 1 times and
* thread will spend approximately {@code retries} * {@code retryInterval} sleeping.
*
* @param log Log.
* @param retries Number of retries.
* @param retryInterval Interval between retries in milliseconds.
* @param c Closure with assertion. All {@link AssertionError}s thrown
* from this closure will be ignored {@code retries} times.
* @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted.
*/
@SuppressWarnings("ErrorNotRethrown")
public static void retryAssert(@Nullable IgniteLogger log, int retries, long retryInterval, GridAbsClosure c)
throws IgniteInterruptedCheckedException {
for (int i = 0; i < retries; i++) {
try {
c.apply();
return;
}
catch (AssertionError e) {
U.warn(log, "Check failed (will retry in " + retryInterval + "ms).", e);
U.sleep(retryInterval);
}
}
// Apply the last time without guarding try.
c.apply();
}
/**
* Reads entire file into byte array.
*
* @param file File to read.
* @return Content of file in byte array.
* @throws IOException If failed.
*/
public static byte[] readFile(File file) throws IOException {
assert file.exists();
assert file.length() < Integer.MAX_VALUE;
byte[] bytes = new byte[(int) file.length()];
try (FileInputStream fis = new FileInputStream(file)) {
int readBytesCnt = fis.read(bytes);
assert readBytesCnt == bytes.length;
}
return bytes;
}
/**
* Reads resource into byte array.
*
* @param classLoader Classloader.
* @param resourceName Resource name.
* @return Content of resorce in byte array.
* @throws IOException If failed.
*/
public static byte[] readResource(ClassLoader classLoader, String resourceName) throws IOException {
try (InputStream is = classLoader.getResourceAsStream(resourceName)) {
assertNotNull("Resource is missing: " + resourceName , is);
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
U.copy(is, baos);
return baos.toByteArray();
}
}
}
/**
* Sleeps and increments an integer.
* <p>
* Allows for loops like the following:
* <pre>{@code
* for (int i = 0; i < 20 && !condition; i = sleepAndIncrement(200, i)) {
* ...
* }
* }</pre>
* for busy-waiting limited number of iterations.
*
* @param sleepDur Sleep duration in milliseconds.
* @param i Integer to increment.
* @return Incremented value.
* @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If sleep was interrupted.
*/
public static int sleepAndIncrement(int sleepDur, int i) throws IgniteInterruptedCheckedException {
U.sleep(sleepDur);
return i + 1;
}
/**
* Waits for condition, polling in busy wait loop.
*
* @param cond Condition to wait for.
* @param timeout Max time to wait in milliseconds.
* @return {@code true} if condition was achieved, {@code false} otherwise.
* @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted.
*/
public static boolean waitForCondition(GridAbsPredicate cond, long timeout) throws IgniteInterruptedCheckedException {
long curTime = U.currentTimeMillis();
long endTime = curTime + timeout;
if (endTime < 0)
endTime = Long.MAX_VALUE;
while (curTime < endTime) {
if (cond.apply())
return true;
U.sleep(DFLT_BUSYWAIT_SLEEP_INTERVAL);
curTime = U.currentTimeMillis();
}
return false;
}
/**
* Creates an SSL context from test key store with disabled trust manager.
*
* @return Initialized context.
* @throws GeneralSecurityException In case if context could not be initialized.
* @throws IOException If keystore cannot be accessed.
*/
public static SSLContext sslContext() throws GeneralSecurityException, IOException {
SSLContext ctx = SSLContext.getInstance("TLS");
char[] storePass = keyStorePassword().toCharArray();
KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509");
KeyStore keyStore = KeyStore.getInstance("JKS");
keyStore.load(new FileInputStream(U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path"))),
storePass);
keyMgrFactory.init(keyStore, storePass);
ctx.init(keyMgrFactory.getKeyManagers(),
new TrustManager[]{GridSslBasicContextFactory.getDisabledTrustManager()}, null);
return ctx;
}
/**
* Creates test-purposed SSL context factory from test key store with disabled trust manager.
*
* @return SSL context factory used in test.
*/
public static GridSslContextFactory sslContextFactory() {
GridSslBasicContextFactory factory = new GridSslBasicContextFactory();
factory.setKeyStoreFilePath(
U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path")).getAbsolutePath());
factory.setKeyStorePassword(keyStorePassword().toCharArray());
factory.setTrustManagers(GridSslBasicContextFactory.getDisabledTrustManager());
return factory;
}
/**
* Creates test-purposed SSL context factory from test key store with disabled trust manager.
*
* @return SSL context factory used in test.
*/
public static Factory<SSLContext> sslFactory() {
SslContextFactory factory = new SslContextFactory();
factory.setKeyStoreFilePath(
U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path")).getAbsolutePath());
factory.setKeyStorePassword(keyStorePassword().toCharArray());
factory.setTrustManagers(SslContextFactory.getDisabledTrustManager());
return factory;
}
/**
* Creates test-purposed SSL context factory from specified key store and trust store.
*
* @param keyStore Key store name.
* @param trustStore Trust store name.
* @return SSL context factory used in test.
*/
public static Factory<SSLContext> sslTrustedFactory(String keyStore, String trustStore) {
SslContextFactory factory = new SslContextFactory();
factory.setKeyStoreFilePath(keyStorePath(keyStore));
factory.setKeyStorePassword(keyStorePassword().toCharArray());
factory.setTrustStoreFilePath(keyStorePath(trustStore));
factory.setTrustStorePassword(keyStorePassword().toCharArray());
return factory;
}
public static String keyStorePassword() {
return GridTestProperties.getProperty("ssl.keystore.password");
}
@NotNull public static String keyStorePath(String keyStore) {
return U.resolveIgnitePath(GridTestProperties.getProperty(
"ssl.keystore." + keyStore + ".path")).getAbsolutePath();
}
/**
* @param o1 Object 1.
* @param o2 Object 2.
* @return Equals or not.
*/
public static boolean deepEquals(@Nullable Object o1, @Nullable Object o2) {
if (o1 == o2)
return true;
else if (o1 == null || o2 == null)
return false;
else if (o1.getClass() != o2.getClass())
return false;
else {
Class<?> cls = o1.getClass();
assert o2.getClass() == cls;
for (Field f : cls.getDeclaredFields()) {
f.setAccessible(true);
Object v1;
Object v2;
try {
v1 = f.get(o1);
v2 = f.get(o2);
}
catch (IllegalAccessException e) {
throw new AssertionError(e);
}
if (!Objects.deepEquals(v1, v2))
return false;
}
return true;
}
}
/**
* Converts integer permission mode into set of {@link PosixFilePermission}.
*
* @param mode File mode.
* @return Set of {@link PosixFilePermission}.
*/
public static Set<PosixFilePermission> modeToPermissionSet(int mode) {
Set<PosixFilePermission> res = EnumSet.noneOf(PosixFilePermission.class);
if ((mode & 0400) > 0)
res.add(PosixFilePermission.OWNER_READ);
if ((mode & 0200) > 0)
res.add(PosixFilePermission.OWNER_WRITE);
if ((mode & 0100) > 0)
res.add(PosixFilePermission.OWNER_EXECUTE);
if ((mode & 040) > 0)
res.add(PosixFilePermission.GROUP_READ);
if ((mode & 020) > 0)
res.add(PosixFilePermission.GROUP_WRITE);
if ((mode & 010) > 0)
res.add(PosixFilePermission.GROUP_EXECUTE);
if ((mode & 04) > 0)
res.add(PosixFilePermission.OTHERS_READ);
if ((mode & 02) > 0)
res.add(PosixFilePermission.OTHERS_WRITE);
if ((mode & 01) > 0)
res.add(PosixFilePermission.OTHERS_EXECUTE);
return res;
}
/**
* @param name Name.
* @param run Run.
*/
public static void benchmark(@Nullable String name, @NotNull Runnable run) {
benchmark(name, 8000, 10000, run);
}
/**
* @param name Name.
* @param warmup Warmup.
* @param executionTime Time.
* @param run Run.
*/
public static void benchmark(@Nullable String name, long warmup, long executionTime, @NotNull Runnable run) {
final AtomicBoolean stop = new AtomicBoolean();
class Stopper extends TimerTask {
@Override public void run() {
stop.set(true);
}
}
new Timer(true).schedule(new Stopper(), warmup);
while (!stop.get())
run.run();
stop.set(false);
new Timer(true).schedule(new Stopper(), executionTime);
long startTime = System.currentTimeMillis();
int cnt = 0;
do {
run.run();
cnt++;
}
while (!stop.get());
double dur = (System.currentTimeMillis() - startTime) / 1000d;
System.out.printf("%s:\n operations:%d, duration=%fs, op/s=%d, latency=%fms\n", name, cnt, dur,
(long)(cnt / dur), dur / cnt);
}
/**
* Prompt to execute garbage collector.
* {@code System.gc();} is not guaranteed to garbage collection, this method try to fill memory to crowd out dead
* objects.
*/
public static void runGC() {
System.gc();
ReferenceQueue<byte[]> queue = new ReferenceQueue<>();
Collection<SoftReference<byte[]>> refs = new ArrayList<>();
while (true) {
byte[] bytes = new byte[128 * 1024];
refs.add(new SoftReference<>(bytes, queue));
if (queue.poll() != null)
break;
}
System.gc();
}
/**
* @return Path to apache ignite.
*/
public static String apacheIgniteTestPath() {
return System.getProperty("IGNITE_TEST_PATH", U.getIgniteHome() + "/target/ignite");
}
/**
* {@link Class#getSimpleName()} does not return outer class name prefix for inner classes, for example,
* getSimpleName() returns "RegularDiscovery" instead of "GridDiscoveryManagerSelfTest$RegularDiscovery"
* This method return correct simple name for inner classes.
*
* @param cls Class
* @return Simple name with outer class prefix.
*/
public static String fullSimpleName(@NotNull Class cls) {
if (cls.getEnclosingClass() != null)
return cls.getEnclosingClass().getSimpleName() + "." + cls.getSimpleName();
else
return cls.getSimpleName();
}
/**
* Adds test class to the list only if it's not in {@code ignoredTests} set.
*
* @param suite List where to place the test class.
* @param test Test.
* @param ignoredTests Tests to ignore. If test contained in the collection it is not included in suite
*/
public static void addTestIfNeeded(@NotNull final List<Class<?>> suite, @NotNull final Class<?> test,
@Nullable final Collection<Class> ignoredTests) {
if (ignoredTests != null && ignoredTests.contains(test))
return;
suite.add(test);
}
/**
* Generate random alphabetical string.
*
* @param rnd Random object.
* @param maxLen Maximal length of string
* @return Random string object.
*/
public static String randomString(Random rnd, int maxLen) {
int len = rnd.nextInt(maxLen);
StringBuilder b = new StringBuilder(len);
for (int i = 0; i < len; i++)
b.append(ALPHABETH.charAt(rnd.nextInt(ALPHABETH.length())));
return b.toString();
}
/**
* @param node Node.
* @param topVer Ready exchange version to wait for before trying to merge exchanges.
*/
public static void mergeExchangeWaitVersion(Ignite node, long topVer) {
((IgniteEx)node).context().cache().context().exchange().mergeExchangesTestWaitVersion(
new AffinityTopologyVersion(topVer, 0), null);
}
/**
* @param node Node.
* @param topVer Ready exchange version to wait for before trying to merge exchanges.
*/
public static void mergeExchangeWaitVersion(Ignite node, long topVer, List mergedEvts) {
((IgniteEx)node).context().cache().context().exchange().mergeExchangesTestWaitVersion(
new AffinityTopologyVersion(topVer, 0), mergedEvts);
}
/** Test parameters scale factor util. */
private static class ScaleFactorUtil {
/** Test speed scale factor property name. */
private static final String TEST_SCALE_FACTOR_PROPERTY = "TEST_SCALE_FACTOR";
/** Min test scale factor value. */
private static final double MIN_TEST_SCALE_FACTOR_VALUE = 0.1;
/** Max test scale factor value. */
private static final double MAX_TEST_SCALE_FACTOR_VALUE = 1.0;
/** Test speed scale factor. */
private static final double TEST_SCALE_FACTOR_VALUE = readScaleFactor();
/** */
private static double readScaleFactor() {
double scaleFactor = Double.parseDouble(System.getProperty(TEST_SCALE_FACTOR_PROPERTY, "1.0"));
scaleFactor = Math.max(scaleFactor, MIN_TEST_SCALE_FACTOR_VALUE);
scaleFactor = Math.min(scaleFactor, MAX_TEST_SCALE_FACTOR_VALUE);
return scaleFactor;
}
/** */
public static int apply(int val) {
return (int)Math.round(TEST_SCALE_FACTOR_VALUE * val);
}
/** */
public static int apply(int val, int lowerBound, int upperBound) {
return applyUB(applyLB(val, lowerBound), upperBound);
}
/** Apply scale factor with lower bound */
public static int applyLB(int val, int lowerBound) {
return Math.max(apply(val), lowerBound);
}
/** Apply scale factor with upper bound */
public static int applyUB(int val, int upperBound) {
return Math.min(apply(val), upperBound);
}
}
/**
* @param node Node to connect to.
* @param params Connection parameters.
* @return Thin JDBC connection to specified node.
*/
public static Connection connect(IgniteEx node, String params) throws SQLException {
Collection<GridPortRecord> recs = node.context().ports().records();
GridPortRecord cliLsnrRec = null;
for (GridPortRecord rec : recs) {
if (rec.clazz() == ClientListenerProcessor.class) {
cliLsnrRec = rec;
break;
}
}
assertNotNull(cliLsnrRec);
String connStr = "jdbc:ignite:thin://127.0.0.1:" + cliLsnrRec.port();
if (!F.isEmpty(params))
connStr += "/?" + params;
return DriverManager.getConnection(connStr);
}
/**
* Removes idle_verify log files created in tests.
*/
public static void cleanIdleVerifyLogFiles() {
File dir = new File(".");
for (File f : dir.listFiles(n -> n.getName().startsWith(IdleVerifyResultV2.IDLE_VERIFY_FILE_PREFIX)))
f.delete();
}
public static class SqlTestFunctions {
/** Sleep milliseconds. */
public static volatile long sleepMs;
/** Fail flag. */
public static volatile boolean fail;
/**
* Do sleep {@code sleepMs} milliseconds
*
* @return amount of milliseconds to sleep
*/
@QuerySqlFunction
@SuppressWarnings("BusyWait")
public static long sleep() {
long end = System.currentTimeMillis() + sleepMs;
long remainTime =sleepMs;
do {
try {
Thread.sleep(remainTime);
}
catch (InterruptedException ignored) {
// No-op
}
}
while ((remainTime = end - System.currentTimeMillis()) > 0);
return sleepMs;
}
/**
* Function do fail in case of {@code fail} is true, return 0 otherwise.
*
* @return in case of {@code fail} is false return 0, fail otherwise.
*/
@QuerySqlFunction
public static int can_fail() {
if (fail)
throw new IllegalArgumentException();
else
return 0;
}
/**
* Function do sleep {@code sleepMs} milliseconds and do fail in case of {@code fail} is true, return 0 otherwise.
*
* @return amount of milliseconds to sleep in case of {@code fail} is false, fail otherwise.
*/
@QuerySqlFunction
public static long sleep_and_can_fail() {
long sleep = sleep();
can_fail();
return sleep;
}
}
/**
* Runnable that can throw exceptions.
*/
@FunctionalInterface
public interface RunnableX extends Runnable {
/**
* Runnable body.
*
* @throws Exception If failed.
*/
void runx() throws Exception;
/** {@inheritdoc} */
@Override default void run() {
try {
runx();
}
catch (Exception e) {
throw new IgniteException(e);
}
}
}
/**
* IgniteRunnable that can throw exceptions.
*/
@FunctionalInterface
public interface IgniteRunnableX extends IgniteRunnable {
/**
* Runnable body.
*
* @throws Exception If failed.
*/
void runx() throws Exception;
/** {@inheritdoc} */
@Override default void run() {
try {
runx();
}
catch (Exception e) {
throw new IgniteException(e);
}
}
}
}
| [
"\"IGNITE_HOME\""
] | [] | [
"IGNITE_HOME"
] | [] | ["IGNITE_HOME"] | java | 1 | 0 | |
network/proxy/mucp/mucp.go | // Package mucp transparently forwards the incoming request using a go-micro client.
package mucp
import (
"context"
"io"
"os"
"strings"
"sync"
"github.com/micro/go-micro/client"
"github.com/micro/go-micro/client/selector"
"github.com/micro/go-micro/codec"
"github.com/micro/go-micro/codec/bytes"
"github.com/micro/go-micro/config/options"
"github.com/micro/go-micro/network/proxy"
"github.com/micro/go-micro/network/router"
"github.com/micro/go-micro/server"
pb "github.com/micro/go-micro/network/router/proto"
)
// Proxy will transparently proxy requests to an endpoint.
// If no endpoint is specified it will call a service using the client.
type Proxy struct {
// embed options
options.Options
// Endpoint specified the fixed service endpoint to call.
Endpoint string
// The client to use for outbound requests
Client client.Client
// The router for routes
Router router.Router
// The router service client
RouterService pb.RouterService
// A fib of routes service:address
sync.RWMutex
Routes map[string][]router.Route
}
// read client request and write to server
func readLoop(r server.Request, s client.Stream) error {
// request to backend server
req := s.Request()
for {
// get data from client
// no need to decode it
body, err := r.Read()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
// get the header from client
hdr := r.Header()
msg := &codec.Message{
Type: codec.Request,
Header: hdr,
Body: body,
}
// write the raw request
err = req.Codec().Write(msg, nil)
if err == io.EOF {
return nil
} else if err != nil {
return err
}
}
}
func (p *Proxy) getRoute(service string) ([]string, error) {
// converts routes to just addresses
toNodes := func(routes []router.Route) []string {
var nodes []string
for _, node := range routes {
nodes = append(nodes, node.Gateway)
}
return nodes
}
// lookup the route cache first
p.RLock()
routes, ok := p.Routes[service]
// got it!
if ok {
p.RUnlock()
return toNodes(routes), nil
}
p.RUnlock()
// route cache miss, now lookup the router
// if it does not exist, don't error out
// the proxy will just hand off to the client
// and try the registry
// in future we might set a default gateway
if p.Router != nil {
// lookup the router
routes, err := p.Router.Table().Lookup(
router.NewQuery(router.QueryDestination(service)),
)
if err != nil {
return nil, err
}
p.Lock()
if p.Routes == nil {
p.Routes = make(map[string][]router.Route)
}
p.Routes[service] = routes
p.Unlock()
return toNodes(routes), nil
}
// we've tried getting cached routes
// we've tried using the router
addr := os.Getenv("MICRO_ROUTER_ADDRESS")
name := os.Getenv("MICRO_ROUTER")
// no router is specified we're going to set the default
if len(name) == 0 && len(addr) == 0 {
p.Router = router.DefaultRouter
go p.Router.Advertise()
// recursively execute getRoute
return p.getRoute(service)
}
if len(name) == 0 {
name = "go.micro.router"
}
// lookup the remote router
var addrs []string
// set the remote address if specified
if len(addr) > 0 {
addrs = append(addrs, addr)
} else {
// we have a name so we need to check the registry
services, err := p.Client.Options().Registry.GetService(name)
if err != nil {
return nil, err
}
for _, service := range services {
for _, node := range service.Nodes {
addrs = append(addrs, node.Address)
}
}
}
// no router addresses available
if len(addrs) == 0 {
return nil, selector.ErrNoneAvailable
}
var pbRoutes *pb.LookupResponse
var gerr error
// set default client
if p.RouterService == nil {
p.RouterService = pb.NewRouterService(name, p.Client)
}
// TODO: implement backoff and retries
for _, addr := range addrs {
// call the router
proutes, err := p.RouterService.Lookup(context.Background(), &pb.LookupRequest{
Query: &pb.Query{
Destination: service,
},
}, client.WithAddress(addr))
if err != nil {
gerr = err
continue
}
// set routes
pbRoutes = proutes
break
}
// errored out
if gerr != nil {
return nil, gerr
}
// no routes
if pbRoutes == nil {
return nil, selector.ErrNoneAvailable
}
// convert from pb to []*router.Route
for _, r := range pbRoutes.Routes {
routes = append(routes, router.Route{
Destination: r.Destination,
Gateway: r.Gateway,
Router: r.Router,
Network: r.Network,
Metric: int(r.Metric),
})
}
return toNodes(routes), nil
}
// ServeRequest honours the server.Router interface
func (p *Proxy) ServeRequest(ctx context.Context, req server.Request, rsp server.Response) error {
// set default client
if p.Client == nil {
p.Client = client.DefaultClient
}
// service name
service := req.Service()
endpoint := req.Endpoint()
var addresses []string
// call a specific backend endpoint either by name or address
if len(p.Endpoint) > 0 {
// address:port
if parts := strings.Split(p.Endpoint, ":"); len(parts) > 1 {
addresses = []string{p.Endpoint}
} else {
// get route for endpoint from router
addr, err := p.getRoute(p.Endpoint)
if err != nil {
return err
}
// set the address
addresses = addr
// set the name
service = p.Endpoint
}
} else {
// no endpoint was specified just lookup the route
// get route for endpoint from router
addr, err := p.getRoute(service)
if err != nil {
return err
}
addresses = addr
}
var opts []client.CallOption
// set address if available
if len(addresses) > 0 {
opts = append(opts, client.WithAddress(addresses...))
}
// read initial request
body, err := req.Read()
if err != nil {
return err
}
// create new request with raw bytes body
creq := p.Client.NewRequest(service, endpoint, &bytes.Frame{body}, client.WithContentType(req.ContentType()))
// create new stream
stream, err := p.Client.Stream(ctx, creq, opts...)
if err != nil {
return err
}
defer stream.Close()
// create client request read loop
go readLoop(req, stream)
// get raw response
resp := stream.Response()
// create server response write loop
for {
// read backend response body
body, err := resp.Read()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
// read backend response header
hdr := resp.Header()
// write raw response header to client
rsp.WriteHeader(hdr)
// write raw response body to client
err = rsp.Write(body)
if err == io.EOF {
return nil
} else if err != nil {
return err
}
}
return nil
}
// NewSingleHostProxy returns a proxy which sends requests to a single backend
func NewSingleHostProxy(endpoint string) *Proxy {
return &Proxy{
Options: options.NewOptions(),
Endpoint: endpoint,
}
}
// NewProxy returns a new proxy which will route based on mucp headers
func NewProxy(opts ...options.Option) proxy.Proxy {
p := new(Proxy)
p.Options = options.NewOptions(opts...)
p.Options.Init(options.WithString("mucp"))
// get endpoint
ep, ok := p.Options.Values().Get("proxy.endpoint")
if ok {
p.Endpoint = ep.(string)
}
// get client
c, ok := p.Options.Values().Get("proxy.client")
if ok {
p.Client = c.(client.Client)
}
// get router
r, ok := p.Options.Values().Get("proxy.router")
if ok {
p.Router = r.(router.Router)
// TODO: should we advertise?
go p.Router.Advertise()
}
return p
}
| [
"\"MICRO_ROUTER_ADDRESS\"",
"\"MICRO_ROUTER\""
] | [] | [
"MICRO_ROUTER",
"MICRO_ROUTER_ADDRESS"
] | [] | ["MICRO_ROUTER", "MICRO_ROUTER_ADDRESS"] | go | 2 | 0 | |
pkg/server/web/config.go | // Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
context2 "context"
"crypto/tls"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"github.com/flycash/beego-web/pkg"
"github.com/flycash/beego-web/pkg/infrastructure/config"
"github.com/flycash/beego-web/pkg/infrastructure/logs"
"github.com/flycash/beego-web/pkg/infrastructure/session"
"github.com/flycash/beego-web/pkg/infrastructure/utils"
"github.com/flycash/beego-web/pkg/server/web/context"
)
// Config is the main struct for BConfig
// TODO after supporting multiple servers, remove common config to somewhere else
type Config struct {
AppName string // Application name
RunMode string // Running Mode: dev | prod
RouterCaseSensitive bool
ServerName string
RecoverPanic bool
RecoverFunc func(*context.Context, *Config)
CopyRequestBody bool
EnableGzip bool
MaxMemory int64
EnableErrorsShow bool
EnableErrorsRender bool
Listen Listen
WebConfig WebConfig
Log LogConfig
}
// Listen holds for http and https related config
type Listen struct {
Graceful bool // Graceful means use graceful module to start the server
ServerTimeOut int64
ListenTCP4 bool
EnableHTTP bool
HTTPAddr string
HTTPPort int
AutoTLS bool
Domains []string
TLSCacheDir string
EnableHTTPS bool
EnableMutualHTTPS bool
HTTPSAddr string
HTTPSPort int
HTTPSCertFile string
HTTPSKeyFile string
TrustCaFile string
EnableAdmin bool
AdminAddr string
AdminPort int
EnableFcgi bool
EnableStdIo bool // EnableStdIo works with EnableFcgi Use FCGI via standard I/O
ClientAuth int
}
// WebConfig holds web related config
type WebConfig struct {
AutoRender bool
EnableDocs bool
FlashName string
FlashSeparator string
DirectoryIndex bool
StaticDir map[string]string
StaticExtensionsToGzip []string
StaticCacheFileSize int
StaticCacheFileNum int
TemplateLeft string
TemplateRight string
ViewsPath string
CommentRouterPath string
EnableXSRF bool
XSRFKey string
XSRFExpire int
Session SessionConfig
}
// SessionConfig holds session related config
type SessionConfig struct {
SessionOn bool
SessionProvider string
SessionName string
SessionGCMaxLifetime int64
SessionProviderConfig string
SessionCookieLifeTime int
SessionAutoSetCookie bool
SessionDomain string
SessionDisableHTTPOnly bool // used to allow for cross domain cookies/javascript cookies.
SessionEnableSidInHTTPHeader bool // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader string
SessionEnableSidInURLQuery bool // enable get the sessionId from Url Query params
}
// LogConfig holds Log related config
type LogConfig struct {
AccessLogs bool
EnableStaticLogs bool // log static files requests default: false
AccessLogsFormat string // access log format: JSON_FORMAT, APACHE_FORMAT or empty string
FileLineNum bool
Outputs map[string]string // Store Adaptor : config
}
var (
// BConfig is the default config for Application
BConfig *Config
// AppConfig is the instance of Config, store the config information from file
AppConfig *beegoAppConfig
// AppPath is the absolute path to the app
AppPath string
// GlobalSessions is the instance for the session manager
GlobalSessions *session.Manager
// appConfigPath is the path to the config files
appConfigPath string
// appConfigProvider is the provider for the config, default is ini
appConfigProvider = "ini"
// WorkPath is the absolute path to project root directory
WorkPath string
)
func init() {
BConfig = newBConfig()
var err error
if AppPath, err = filepath.Abs(filepath.Dir(os.Args[0])); err != nil {
panic(err)
}
WorkPath, err = os.Getwd()
if err != nil {
panic(err)
}
var filename = "app.conf"
if os.Getenv("BEEGO_RUNMODE") != "" {
filename = os.Getenv("BEEGO_RUNMODE") + ".app.conf"
}
appConfigPath = filepath.Join(WorkPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
appConfigPath = filepath.Join(AppPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
AppConfig = &beegoAppConfig{innerConfig: config.NewFakeConfig()}
return
}
}
if err = parseConfig(appConfigPath); err != nil {
panic(err)
}
}
func defaultRecoverPanic(ctx *context.Context, cfg *Config) {
if err := recover(); err != nil {
if err == ErrAbort {
return
}
if !cfg.RecoverPanic {
panic(err)
}
if cfg.EnableErrorsShow {
if _, ok := ErrorMaps[fmt.Sprint(err)]; ok {
exception(fmt.Sprint(err), ctx)
return
}
}
var stack string
logs.Critical("the request url is ", ctx.Input.URL())
logs.Critical("Handler crashed with error", err)
for i := 1; ; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
logs.Critical(fmt.Sprintf("%s:%d", file, line))
stack = stack + fmt.Sprintln(fmt.Sprintf("%s:%d", file, line))
}
if cfg.RunMode == DEV && cfg.EnableErrorsRender {
showErr(err, ctx, stack)
}
if ctx.Output.Status != 0 {
ctx.ResponseWriter.WriteHeader(ctx.Output.Status)
} else {
ctx.ResponseWriter.WriteHeader(500)
}
}
}
func newBConfig() *Config {
res := &Config{
AppName: "beego",
RunMode: PROD,
RouterCaseSensitive: true,
ServerName: "beegoServer:" + pkg.VERSION,
RecoverPanic: true,
CopyRequestBody: false,
EnableGzip: false,
MaxMemory: 1 << 26, // 64MB
EnableErrorsShow: true,
EnableErrorsRender: true,
Listen: Listen{
Graceful: false,
ServerTimeOut: 0,
ListenTCP4: false,
EnableHTTP: true,
AutoTLS: false,
Domains: []string{},
TLSCacheDir: ".",
HTTPAddr: "",
HTTPPort: 8080,
EnableHTTPS: false,
HTTPSAddr: "",
HTTPSPort: 10443,
HTTPSCertFile: "",
HTTPSKeyFile: "",
EnableAdmin: false,
AdminAddr: "",
AdminPort: 8088,
EnableFcgi: false,
EnableStdIo: false,
ClientAuth: int(tls.RequireAndVerifyClientCert),
},
WebConfig: WebConfig{
AutoRender: true,
EnableDocs: false,
FlashName: "BEEGO_FLASH",
FlashSeparator: "BEEGOFLASH",
DirectoryIndex: false,
StaticDir: map[string]string{"/static": "static"},
StaticExtensionsToGzip: []string{".css", ".js"},
StaticCacheFileSize: 1024 * 100,
StaticCacheFileNum: 1000,
TemplateLeft: "{{",
TemplateRight: "}}",
ViewsPath: "views",
CommentRouterPath: "controllers",
EnableXSRF: false,
XSRFKey: "beegoxsrf",
XSRFExpire: 0,
Session: SessionConfig{
SessionOn: false,
SessionProvider: "memory",
SessionName: "beegosessionID",
SessionGCMaxLifetime: 3600,
SessionProviderConfig: "",
SessionDisableHTTPOnly: false,
SessionCookieLifeTime: 0, // set cookie default is the browser life
SessionAutoSetCookie: true,
SessionDomain: "",
SessionEnableSidInHTTPHeader: false, // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader: "Beegosessionid",
SessionEnableSidInURLQuery: false, // enable get the sessionId from Url Query params
},
},
Log: LogConfig{
AccessLogs: false,
EnableStaticLogs: false,
AccessLogsFormat: "APACHE_FORMAT",
FileLineNum: true,
Outputs: map[string]string{"console": ""},
},
}
res.RecoverFunc = defaultRecoverPanic
return res
}
// now only support ini, next will support json.
func parseConfig(appConfigPath string) (err error) {
AppConfig, err = newAppConfig(appConfigProvider, appConfigPath)
if err != nil {
return err
}
return assignConfig(AppConfig)
}
func assignConfig(ac config.Configer) error {
for _, i := range []interface{}{BConfig, &BConfig.Listen, &BConfig.WebConfig, &BConfig.Log, &BConfig.WebConfig.Session} {
assignSingleConfig(i, ac)
}
// set the run mode first
if envRunMode := os.Getenv("BEEGO_RUNMODE"); envRunMode != "" {
BConfig.RunMode = envRunMode
} else if runMode, err := ac.String(nil, "RunMode"); runMode != "" && err == nil {
BConfig.RunMode = runMode
}
if sd, err := ac.String(nil, "StaticDir"); sd != "" && err == nil {
BConfig.WebConfig.StaticDir = map[string]string{}
sds := strings.Fields(sd)
for _, v := range sds {
if url2fsmap := strings.SplitN(v, ":", 2); len(url2fsmap) == 2 {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[1]
} else {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[0]
}
}
}
if sgz, err := ac.String(nil, "StaticExtensionsToGzip"); sgz != "" && err == nil {
extensions := strings.Split(sgz, ",")
fileExts := []string{}
for _, ext := range extensions {
ext = strings.TrimSpace(ext)
if ext == "" {
continue
}
if !strings.HasPrefix(ext, ".") {
ext = "." + ext
}
fileExts = append(fileExts, ext)
}
if len(fileExts) > 0 {
BConfig.WebConfig.StaticExtensionsToGzip = fileExts
}
}
if sfs, err := ac.Int(nil, "StaticCacheFileSize"); err == nil {
BConfig.WebConfig.StaticCacheFileSize = sfs
}
if sfn, err := ac.Int(nil, "StaticCacheFileNum"); err == nil {
BConfig.WebConfig.StaticCacheFileNum = sfn
}
if lo, err := ac.String(nil, "LogOutputs"); lo != "" && err == nil {
// if lo is not nil or empty
// means user has set his own LogOutputs
// clear the default setting to BConfig.Log.Outputs
BConfig.Log.Outputs = make(map[string]string)
los := strings.Split(lo, ";")
for _, v := range los {
if logType2Config := strings.SplitN(v, ",", 2); len(logType2Config) == 2 {
BConfig.Log.Outputs[logType2Config[0]] = logType2Config[1]
} else {
continue
}
}
}
// init log
logs.Reset()
for adaptor, config := range BConfig.Log.Outputs {
err := logs.SetLogger(adaptor, config)
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Sprintf("%s with the config %q got err:%s", adaptor, config, err.Error()))
}
}
logs.SetLogFuncCall(BConfig.Log.FileLineNum)
return nil
}
func assignSingleConfig(p interface{}, ac config.Configer) {
pt := reflect.TypeOf(p)
if pt.Kind() != reflect.Ptr {
return
}
pt = pt.Elem()
if pt.Kind() != reflect.Struct {
return
}
pv := reflect.ValueOf(p).Elem()
for i := 0; i < pt.NumField(); i++ {
pf := pv.Field(i)
if !pf.CanSet() {
continue
}
name := pt.Field(i).Name
switch pf.Kind() {
case reflect.String:
pf.SetString(ac.DefaultString(nil, name, pf.String()))
case reflect.Int, reflect.Int64:
pf.SetInt(ac.DefaultInt64(nil, name, pf.Int()))
case reflect.Bool:
pf.SetBool(ac.DefaultBool(nil, name, pf.Bool()))
case reflect.Struct:
default:
// do nothing here
}
}
}
// LoadAppConfig allow developer to apply a config file
func LoadAppConfig(adapterName, configPath string) error {
absConfigPath, err := filepath.Abs(configPath)
if err != nil {
return err
}
if !utils.FileExists(absConfigPath) {
return fmt.Errorf("the target config file: %s don't exist", configPath)
}
appConfigPath = absConfigPath
appConfigProvider = adapterName
return parseConfig(appConfigPath)
}
type beegoAppConfig struct {
config.BaseConfiger
innerConfig config.Configer
}
func newAppConfig(appConfigProvider, appConfigPath string) (*beegoAppConfig, error) {
ac, err := config.NewConfig(appConfigProvider, appConfigPath)
if err != nil {
return nil, err
}
return &beegoAppConfig{innerConfig: ac}, nil
}
func (b *beegoAppConfig) Set(ctx context2.Context, key, val string) error {
if err := b.innerConfig.Set(nil, BConfig.RunMode+"::"+key, val); err != nil {
return b.innerConfig.Set(nil, key, val)
}
return nil
}
func (b *beegoAppConfig) String(ctx context2.Context, key string) (string, error) {
if v, err := b.innerConfig.String(nil, BConfig.RunMode+"::"+key); v != "" && err == nil {
return v, nil
}
return b.innerConfig.String(nil, key)
}
func (b *beegoAppConfig) Strings(ctx context2.Context, key string) ([]string, error) {
if v, err := b.innerConfig.Strings(nil, BConfig.RunMode+"::"+key); len(v) > 0 && err == nil {
return v, nil
}
return b.innerConfig.Strings(nil, key)
}
func (b *beegoAppConfig) Int(ctx context2.Context, key string) (int, error) {
if v, err := b.innerConfig.Int(nil, BConfig.RunMode+"::"+key); err == nil {
return v, nil
}
return b.innerConfig.Int(nil, key)
}
func (b *beegoAppConfig) Int64(ctx context2.Context, key string) (int64, error) {
if v, err := b.innerConfig.Int64(nil, BConfig.RunMode+"::"+key); err == nil {
return v, nil
}
return b.innerConfig.Int64(nil, key)
}
func (b *beegoAppConfig) Bool(ctx context2.Context, key string) (bool, error) {
if v, err := b.innerConfig.Bool(nil, BConfig.RunMode+"::"+key); err == nil {
return v, nil
}
return b.innerConfig.Bool(nil, key)
}
func (b *beegoAppConfig) Float(ctx context2.Context, key string) (float64, error) {
if v, err := b.innerConfig.Float(nil, BConfig.RunMode+"::"+key); err == nil {
return v, nil
}
return b.innerConfig.Float(nil, key)
}
func (b *beegoAppConfig) DefaultString(ctx context2.Context, key string, defaultVal string) string {
if v, err := b.String(nil, key); v != "" && err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultStrings(ctx context2.Context, key string, defaultVal []string) []string {
if v, err := b.Strings(ctx, key); len(v) != 0 && err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt(ctx context2.Context, key string, defaultVal int) int {
if v, err := b.Int(ctx, key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt64(ctx context2.Context, key string, defaultVal int64) int64 {
if v, err := b.Int64(ctx, key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultBool(ctx context2.Context, key string, defaultVal bool) bool {
if v, err := b.Bool(ctx, key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultFloat(ctx context2.Context, key string, defaultVal float64) float64 {
if v, err := b.Float(ctx, key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DIY(ctx context2.Context, key string) (interface{}, error) {
return b.innerConfig.DIY(nil, key)
}
func (b *beegoAppConfig) GetSection(ctx context2.Context, section string) (map[string]string, error) {
return b.innerConfig.GetSection(nil, section)
}
func (b *beegoAppConfig) SaveConfigFile(ctx context2.Context, filename string) error {
return b.innerConfig.SaveConfigFile(nil, filename)
}
| [
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\""
] | [] | [
"BEEGO_RUNMODE"
] | [] | ["BEEGO_RUNMODE"] | go | 1 | 0 | |
git-2.2.1/git-p4.py | #!/usr/bin/env python
#
# git-p4.py -- A tool for bidirectional operation between a Perforce depot and git.
#
# Author: Simon Hausmann <[email protected]>
# Copyright: 2007 Simon Hausmann <[email protected]>
# 2007 Trolltech ASA
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
#
import sys
if sys.hexversion < 0x02040000:
# The limiter is the subprocess module
sys.stderr.write("git-p4: requires Python 2.4 or later.\n")
sys.exit(1)
import os
import optparse
import marshal
import subprocess
import tempfile
import time
import platform
import re
import shutil
import stat
try:
from subprocess import CalledProcessError
except ImportError:
# from python2.7:subprocess.py
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
verbose = False
# Only labels/tags matching this will be imported/exported
defaultLabelRegexp = r'[a-zA-Z0-9_\-.]+$'
def p4_build_cmd(cmd):
"""Build a suitable p4 command line.
This consolidates building and returning a p4 command line into one
location. It means that hooking into the environment, or other configuration
can be done more easily.
"""
real_cmd = ["p4"]
user = gitConfig("git-p4.user")
if len(user) > 0:
real_cmd += ["-u",user]
password = gitConfig("git-p4.password")
if len(password) > 0:
real_cmd += ["-P", password]
port = gitConfig("git-p4.port")
if len(port) > 0:
real_cmd += ["-p", port]
host = gitConfig("git-p4.host")
if len(host) > 0:
real_cmd += ["-H", host]
client = gitConfig("git-p4.client")
if len(client) > 0:
real_cmd += ["-c", client]
if isinstance(cmd,basestring):
real_cmd = ' '.join(real_cmd) + ' ' + cmd
else:
real_cmd += cmd
return real_cmd
def chdir(path, is_client_path=False):
"""Do chdir to the given path, and set the PWD environment
variable for use by P4. It does not look at getcwd() output.
Since we're not using the shell, it is necessary to set the
PWD environment variable explicitly.
Normally, expand the path to force it to be absolute. This
addresses the use of relative path names inside P4 settings,
e.g. P4CONFIG=.p4config. P4 does not simply open the filename
as given; it looks for .p4config using PWD.
If is_client_path, the path was handed to us directly by p4,
and may be a symbolic link. Do not call os.getcwd() in this
case, because it will cause p4 to think that PWD is not inside
the client path.
"""
os.chdir(path)
if not is_client_path:
path = os.getcwd()
os.environ['PWD'] = path
def die(msg):
if verbose:
raise Exception(msg)
else:
sys.stderr.write(msg + "\n")
sys.exit(1)
def write_pipe(c, stdin):
if verbose:
sys.stderr.write('Writing pipe: %s\n' % str(c))
expand = isinstance(c,basestring)
p = subprocess.Popen(c, stdin=subprocess.PIPE, shell=expand)
pipe = p.stdin
val = pipe.write(stdin)
pipe.close()
if p.wait():
die('Command failed: %s' % str(c))
return val
def p4_write_pipe(c, stdin):
real_cmd = p4_build_cmd(c)
return write_pipe(real_cmd, stdin)
def read_pipe(c, ignore_error=False):
if verbose:
sys.stderr.write('Reading pipe: %s\n' % str(c))
expand = isinstance(c,basestring)
p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
pipe = p.stdout
val = pipe.read()
if p.wait() and not ignore_error:
die('Command failed: %s' % str(c))
return val
def p4_read_pipe(c, ignore_error=False):
real_cmd = p4_build_cmd(c)
return read_pipe(real_cmd, ignore_error)
def read_pipe_lines(c):
if verbose:
sys.stderr.write('Reading pipe: %s\n' % str(c))
expand = isinstance(c, basestring)
p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
pipe = p.stdout
val = pipe.readlines()
if pipe.close() or p.wait():
die('Command failed: %s' % str(c))
return val
def p4_read_pipe_lines(c):
"""Specifically invoke p4 on the command supplied. """
real_cmd = p4_build_cmd(c)
return read_pipe_lines(real_cmd)
def p4_has_command(cmd):
"""Ask p4 for help on this command. If it returns an error, the
command does not exist in this version of p4."""
real_cmd = p4_build_cmd(["help", cmd])
p = subprocess.Popen(real_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def p4_has_move_command():
"""See if the move command exists, that it supports -k, and that
it has not been administratively disabled. The arguments
must be correct, but the filenames do not have to exist. Use
ones with wildcards so even if they exist, it will fail."""
if not p4_has_command("move"):
return False
cmd = p4_build_cmd(["move", "-k", "@from", "@to"])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
# return code will be 1 in either case
if err.find("Invalid option") >= 0:
return False
if err.find("disabled") >= 0:
return False
# assume it failed because @... was invalid changelist
return True
def system(cmd):
expand = isinstance(cmd,basestring)
if verbose:
sys.stderr.write("executing %s\n" % str(cmd))
retcode = subprocess.call(cmd, shell=expand)
if retcode:
raise CalledProcessError(retcode, cmd)
def p4_system(cmd):
"""Specifically invoke p4 as the system command. """
real_cmd = p4_build_cmd(cmd)
expand = isinstance(real_cmd, basestring)
retcode = subprocess.call(real_cmd, shell=expand)
if retcode:
raise CalledProcessError(retcode, real_cmd)
_p4_version_string = None
def p4_version_string():
"""Read the version string, showing just the last line, which
hopefully is the interesting version bit.
$ p4 -V
Perforce - The Fast Software Configuration Management System.
Copyright 1995-2011 Perforce Software. All rights reserved.
Rev. P4/NTX86/2011.1/393975 (2011/12/16).
"""
global _p4_version_string
if not _p4_version_string:
a = p4_read_pipe_lines(["-V"])
_p4_version_string = a[-1].rstrip()
return _p4_version_string
def p4_integrate(src, dest):
p4_system(["integrate", "-Dt", wildcard_encode(src), wildcard_encode(dest)])
def p4_sync(f, *options):
p4_system(["sync"] + list(options) + [wildcard_encode(f)])
def p4_add(f):
# forcibly add file names with wildcards
if wildcard_present(f):
p4_system(["add", "-f", f])
else:
p4_system(["add", f])
def p4_delete(f):
p4_system(["delete", wildcard_encode(f)])
def p4_edit(f):
p4_system(["edit", wildcard_encode(f)])
def p4_revert(f):
p4_system(["revert", wildcard_encode(f)])
def p4_reopen(type, f):
p4_system(["reopen", "-t", type, wildcard_encode(f)])
def p4_move(src, dest):
p4_system(["move", "-k", wildcard_encode(src), wildcard_encode(dest)])
def p4_describe(change):
"""Make sure it returns a valid result by checking for
the presence of field "time". Return a dict of the
results."""
ds = p4CmdList(["describe", "-s", str(change)])
if len(ds) != 1:
die("p4 describe -s %d did not return 1 result: %s" % (change, str(ds)))
d = ds[0]
if "p4ExitCode" in d:
die("p4 describe -s %d exited with %d: %s" % (change, d["p4ExitCode"],
str(d)))
if "code" in d:
if d["code"] == "error":
die("p4 describe -s %d returned error code: %s" % (change, str(d)))
if "time" not in d:
die("p4 describe -s %d returned no \"time\": %s" % (change, str(d)))
return d
#
# Canonicalize the p4 type and return a tuple of the
# base type, plus any modifiers. See "p4 help filetypes"
# for a list and explanation.
#
def split_p4_type(p4type):
p4_filetypes_historical = {
"ctempobj": "binary+Sw",
"ctext": "text+C",
"cxtext": "text+Cx",
"ktext": "text+k",
"kxtext": "text+kx",
"ltext": "text+F",
"tempobj": "binary+FSw",
"ubinary": "binary+F",
"uresource": "resource+F",
"uxbinary": "binary+Fx",
"xbinary": "binary+x",
"xltext": "text+Fx",
"xtempobj": "binary+Swx",
"xtext": "text+x",
"xunicode": "unicode+x",
"xutf16": "utf16+x",
}
if p4type in p4_filetypes_historical:
p4type = p4_filetypes_historical[p4type]
mods = ""
s = p4type.split("+")
base = s[0]
mods = ""
if len(s) > 1:
mods = s[1]
return (base, mods)
#
# return the raw p4 type of a file (text, text+ko, etc)
#
def p4_type(f):
results = p4CmdList(["fstat", "-T", "headType", wildcard_encode(f)])
return results[0]['headType']
#
# Given a type base and modifier, return a regexp matching
# the keywords that can be expanded in the file
#
def p4_keywords_regexp_for_type(base, type_mods):
if base in ("text", "unicode", "binary"):
kwords = None
if "ko" in type_mods:
kwords = 'Id|Header'
elif "k" in type_mods:
kwords = 'Id|Header|Author|Date|DateTime|Change|File|Revision'
else:
return None
pattern = r"""
\$ # Starts with a dollar, followed by...
(%s) # one of the keywords, followed by...
(:[^$\n]+)? # possibly an old expansion, followed by...
\$ # another dollar
""" % kwords
return pattern
else:
return None
#
# Given a file, return a regexp matching the possible
# RCS keywords that will be expanded, or None for files
# with kw expansion turned off.
#
def p4_keywords_regexp_for_file(file):
if not os.path.exists(file):
return None
else:
(type_base, type_mods) = split_p4_type(p4_type(file))
return p4_keywords_regexp_for_type(type_base, type_mods)
def setP4ExecBit(file, mode):
# Reopens an already open file and changes the execute bit to match
# the execute bit setting in the passed in mode.
p4Type = "+x"
if not isModeExec(mode):
p4Type = getP4OpenedType(file)
p4Type = re.sub('^([cku]?)x(.*)', '\\1\\2', p4Type)
p4Type = re.sub('(.*?\+.*?)x(.*?)', '\\1\\2', p4Type)
if p4Type[-1] == "+":
p4Type = p4Type[0:-1]
p4_reopen(p4Type, file)
def getP4OpenedType(file):
# Returns the perforce file type for the given file.
result = p4_read_pipe(["opened", wildcard_encode(file)])
match = re.match(".*\((.+)\)\r?$", result)
if match:
return match.group(1)
else:
die("Could not determine file type for %s (result: '%s')" % (file, result))
# Return the set of all p4 labels
def getP4Labels(depotPaths):
labels = set()
if isinstance(depotPaths,basestring):
depotPaths = [depotPaths]
for l in p4CmdList(["labels"] + ["%s..." % p for p in depotPaths]):
label = l['label']
labels.add(label)
return labels
# Return the set of all git tags
def getGitTags():
gitTags = set()
for line in read_pipe_lines(["git", "tag"]):
tag = line.strip()
gitTags.add(tag)
return gitTags
def diffTreePattern():
# This is a simple generator for the diff tree regex pattern. This could be
# a class variable if this and parseDiffTreeEntry were a part of a class.
pattern = re.compile(':(\d+) (\d+) (\w+) (\w+) ([A-Z])(\d+)?\t(.*?)((\t(.*))|$)')
while True:
yield pattern
def parseDiffTreeEntry(entry):
"""Parses a single diff tree entry into its component elements.
See git-diff-tree(1) manpage for details about the format of the diff
output. This method returns a dictionary with the following elements:
src_mode - The mode of the source file
dst_mode - The mode of the destination file
src_sha1 - The sha1 for the source file
dst_sha1 - The sha1 fr the destination file
status - The one letter status of the diff (i.e. 'A', 'M', 'D', etc)
status_score - The score for the status (applicable for 'C' and 'R'
statuses). This is None if there is no score.
src - The path for the source file.
dst - The path for the destination file. This is only present for
copy or renames. If it is not present, this is None.
If the pattern is not matched, None is returned."""
match = diffTreePattern().next().match(entry)
if match:
return {
'src_mode': match.group(1),
'dst_mode': match.group(2),
'src_sha1': match.group(3),
'dst_sha1': match.group(4),
'status': match.group(5),
'status_score': match.group(6),
'src': match.group(7),
'dst': match.group(10)
}
return None
def isModeExec(mode):
# Returns True if the given git mode represents an executable file,
# otherwise False.
return mode[-3:] == "755"
def isModeExecChanged(src_mode, dst_mode):
return isModeExec(src_mode) != isModeExec(dst_mode)
def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None):
if isinstance(cmd,basestring):
cmd = "-G " + cmd
expand = True
else:
cmd = ["-G"] + cmd
expand = False
cmd = p4_build_cmd(cmd)
if verbose:
sys.stderr.write("Opening pipe: %s\n" % str(cmd))
# Use a temporary file to avoid deadlocks without
# subprocess.communicate(), which would put another copy
# of stdout into memory.
stdin_file = None
if stdin is not None:
stdin_file = tempfile.TemporaryFile(prefix='p4-stdin', mode=stdin_mode)
if isinstance(stdin,basestring):
stdin_file.write(stdin)
else:
for i in stdin:
stdin_file.write(i + '\n')
stdin_file.flush()
stdin_file.seek(0)
p4 = subprocess.Popen(cmd,
shell=expand,
stdin=stdin_file,
stdout=subprocess.PIPE)
result = []
try:
while True:
entry = marshal.load(p4.stdout)
if cb is not None:
cb(entry)
else:
result.append(entry)
except EOFError:
pass
exitCode = p4.wait()
if exitCode != 0:
entry = {}
entry["p4ExitCode"] = exitCode
result.append(entry)
return result
def p4Cmd(cmd):
list = p4CmdList(cmd)
result = {}
for entry in list:
result.update(entry)
return result;
def p4Where(depotPath):
if not depotPath.endswith("/"):
depotPath += "/"
depotPath = depotPath + "..."
outputList = p4CmdList(["where", depotPath])
output = None
for entry in outputList:
if "depotFile" in entry:
if entry["depotFile"] == depotPath:
output = entry
break
elif "data" in entry:
data = entry.get("data")
space = data.find(" ")
if data[:space] == depotPath:
output = entry
break
if output == None:
return ""
if output["code"] == "error":
return ""
clientPath = ""
if "path" in output:
clientPath = output.get("path")
elif "data" in output:
data = output.get("data")
lastSpace = data.rfind(" ")
clientPath = data[lastSpace + 1:]
if clientPath.endswith("..."):
clientPath = clientPath[:-3]
return clientPath
def currentGitBranch():
return read_pipe("git name-rev HEAD").split(" ")[1].strip()
def isValidGitDir(path):
if (os.path.exists(path + "/HEAD")
and os.path.exists(path + "/refs") and os.path.exists(path + "/objects")):
return True;
return False
def parseRevision(ref):
return read_pipe("git rev-parse %s" % ref).strip()
def branchExists(ref):
rev = read_pipe(["git", "rev-parse", "-q", "--verify", ref],
ignore_error=True)
return len(rev) > 0
def extractLogMessageFromGitCommit(commit):
logMessage = ""
## fixme: title is first line of commit, not 1st paragraph.
foundTitle = False
for log in read_pipe_lines("git cat-file commit %s" % commit):
if not foundTitle:
if len(log) == 1:
foundTitle = True
continue
logMessage += log
return logMessage
def extractSettingsGitLog(log):
values = {}
for line in log.split("\n"):
line = line.strip()
m = re.search (r"^ *\[git-p4: (.*)\]$", line)
if not m:
continue
assignments = m.group(1).split (':')
for a in assignments:
vals = a.split ('=')
key = vals[0].strip()
val = ('='.join (vals[1:])).strip()
if val.endswith ('\"') and val.startswith('"'):
val = val[1:-1]
values[key] = val
paths = values.get("depot-paths")
if not paths:
paths = values.get("depot-path")
if paths:
values['depot-paths'] = paths.split(',')
return values
def gitBranchExists(branch):
proc = subprocess.Popen(["git", "rev-parse", branch],
stderr=subprocess.PIPE, stdout=subprocess.PIPE);
return proc.wait() == 0;
_gitConfig = {}
def gitConfig(key):
if not _gitConfig.has_key(key):
cmd = [ "git", "config", key ]
s = read_pipe(cmd, ignore_error=True)
_gitConfig[key] = s.strip()
return _gitConfig[key]
def gitConfigBool(key):
"""Return a bool, using git config --bool. It is True only if the
variable is set to true, and False if set to false or not present
in the config."""
if not _gitConfig.has_key(key):
cmd = [ "git", "config", "--bool", key ]
s = read_pipe(cmd, ignore_error=True)
v = s.strip()
_gitConfig[key] = v == "true"
return _gitConfig[key]
def gitConfigList(key):
if not _gitConfig.has_key(key):
s = read_pipe(["git", "config", "--get-all", key], ignore_error=True)
_gitConfig[key] = s.strip().split(os.linesep)
return _gitConfig[key]
def p4BranchesInGit(branchesAreInRemotes=True):
"""Find all the branches whose names start with "p4/", looking
in remotes or heads as specified by the argument. Return
a dictionary of { branch: revision } for each one found.
The branch names are the short names, without any
"p4/" prefix."""
branches = {}
cmdline = "git rev-parse --symbolic "
if branchesAreInRemotes:
cmdline += "--remotes"
else:
cmdline += "--branches"
for line in read_pipe_lines(cmdline):
line = line.strip()
# only import to p4/
if not line.startswith('p4/'):
continue
# special symbolic ref to p4/master
if line == "p4/HEAD":
continue
# strip off p4/ prefix
branch = line[len("p4/"):]
branches[branch] = parseRevision(line)
return branches
def branch_exists(branch):
"""Make sure that the given ref name really exists."""
cmd = [ "git", "rev-parse", "--symbolic", "--verify", branch ]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = p.communicate()
if p.returncode:
return False
# expect exactly one line of output: the branch name
return out.rstrip() == branch
def findUpstreamBranchPoint(head = "HEAD"):
branches = p4BranchesInGit()
# map from depot-path to branch name
branchByDepotPath = {}
for branch in branches.keys():
tip = branches[branch]
log = extractLogMessageFromGitCommit(tip)
settings = extractSettingsGitLog(log)
if settings.has_key("depot-paths"):
paths = ",".join(settings["depot-paths"])
branchByDepotPath[paths] = "remotes/p4/" + branch
settings = None
parent = 0
while parent < 65535:
commit = head + "~%s" % parent
log = extractLogMessageFromGitCommit(commit)
settings = extractSettingsGitLog(log)
if settings.has_key("depot-paths"):
paths = ",".join(settings["depot-paths"])
if branchByDepotPath.has_key(paths):
return [branchByDepotPath[paths], settings]
parent = parent + 1
return ["", settings]
def createOrUpdateBranchesFromOrigin(localRefPrefix = "refs/remotes/p4/", silent=True):
if not silent:
print ("Creating/updating branch(es) in %s based on origin branch(es)"
% localRefPrefix)
originPrefix = "origin/p4/"
for line in read_pipe_lines("git rev-parse --symbolic --remotes"):
line = line.strip()
if (not line.startswith(originPrefix)) or line.endswith("HEAD"):
continue
headName = line[len(originPrefix):]
remoteHead = localRefPrefix + headName
originHead = line
original = extractSettingsGitLog(extractLogMessageFromGitCommit(originHead))
if (not original.has_key('depot-paths')
or not original.has_key('change')):
continue
update = False
if not gitBranchExists(remoteHead):
if verbose:
print "creating %s" % remoteHead
update = True
else:
settings = extractSettingsGitLog(extractLogMessageFromGitCommit(remoteHead))
if settings.has_key('change') > 0:
if settings['depot-paths'] == original['depot-paths']:
originP4Change = int(original['change'])
p4Change = int(settings['change'])
if originP4Change > p4Change:
print ("%s (%s) is newer than %s (%s). "
"Updating p4 branch from origin."
% (originHead, originP4Change,
remoteHead, p4Change))
update = True
else:
print ("Ignoring: %s was imported from %s while "
"%s was imported from %s"
% (originHead, ','.join(original['depot-paths']),
remoteHead, ','.join(settings['depot-paths'])))
if update:
system("git update-ref %s %s" % (remoteHead, originHead))
def originP4BranchesExist():
return gitBranchExists("origin") or gitBranchExists("origin/p4") or gitBranchExists("origin/p4/master")
def p4ChangesForPaths(depotPaths, changeRange):
assert depotPaths
cmd = ['changes']
for p in depotPaths:
cmd += ["%s...%s" % (p, changeRange)]
output = p4_read_pipe_lines(cmd)
changes = {}
for line in output:
changeNum = int(line.split(" ")[1])
changes[changeNum] = True
changelist = changes.keys()
changelist.sort()
return changelist
def p4PathStartsWith(path, prefix):
# This method tries to remedy a potential mixed-case issue:
#
# If UserA adds //depot/DirA/file1
# and UserB adds //depot/dira/file2
#
# we may or may not have a problem. If you have core.ignorecase=true,
# we treat DirA and dira as the same directory
if gitConfigBool("core.ignorecase"):
return path.lower().startswith(prefix.lower())
return path.startswith(prefix)
def getClientSpec():
"""Look at the p4 client spec, create a View() object that contains
all the mappings, and return it."""
specList = p4CmdList("client -o")
if len(specList) != 1:
die('Output from "client -o" is %d lines, expecting 1' %
len(specList))
# dictionary of all client parameters
entry = specList[0]
# the //client/ name
client_name = entry["Client"]
# just the keys that start with "View"
view_keys = [ k for k in entry.keys() if k.startswith("View") ]
# hold this new View
view = View(client_name)
# append the lines, in order, to the view
for view_num in range(len(view_keys)):
k = "View%d" % view_num
if k not in view_keys:
die("Expected view key %s missing" % k)
view.append(entry[k])
return view
def getClientRoot():
"""Grab the client directory."""
output = p4CmdList("client -o")
if len(output) != 1:
die('Output from "client -o" is %d lines, expecting 1' % len(output))
entry = output[0]
if "Root" not in entry:
die('Client has no "Root"')
return entry["Root"]
#
# P4 wildcards are not allowed in filenames. P4 complains
# if you simply add them, but you can force it with "-f", in
# which case it translates them into %xx encoding internally.
#
def wildcard_decode(path):
# Search for and fix just these four characters. Do % last so
# that fixing it does not inadvertently create new %-escapes.
# Cannot have * in a filename in windows; untested as to
# what p4 would do in such a case.
if not platform.system() == "Windows":
path = path.replace("%2A", "*")
path = path.replace("%23", "#") \
.replace("%40", "@") \
.replace("%25", "%")
return path
def wildcard_encode(path):
# do % first to avoid double-encoding the %s introduced here
path = path.replace("%", "%25") \
.replace("*", "%2A") \
.replace("#", "%23") \
.replace("@", "%40")
return path
def wildcard_present(path):
m = re.search("[*#@%]", path)
return m is not None
class Command:
def __init__(self):
self.usage = "usage: %prog [options]"
self.needsGit = True
self.verbose = False
class P4UserMap:
def __init__(self):
self.userMapFromPerforceServer = False
self.myP4UserId = None
def p4UserId(self):
if self.myP4UserId:
return self.myP4UserId
results = p4CmdList("user -o")
for r in results:
if r.has_key('User'):
self.myP4UserId = r['User']
return r['User']
die("Could not find your p4 user id")
def p4UserIsMe(self, p4User):
# return True if the given p4 user is actually me
me = self.p4UserId()
if not p4User or p4User != me:
return False
else:
return True
def getUserCacheFilename(self):
home = os.environ.get("HOME", os.environ.get("USERPROFILE"))
return home + "/.gitp4-usercache.txt"
def getUserMapFromPerforceServer(self):
if self.userMapFromPerforceServer:
return
self.users = {}
self.emails = {}
for output in p4CmdList("users"):
if not output.has_key("User"):
continue
self.users[output["User"]] = output["FullName"] + " <" + output["Email"] + ">"
self.emails[output["Email"]] = output["User"]
s = ''
for (key, val) in self.users.items():
s += "%s\t%s\n" % (key.expandtabs(1), val.expandtabs(1))
open(self.getUserCacheFilename(), "wb").write(s)
self.userMapFromPerforceServer = True
def loadUserMapFromCache(self):
self.users = {}
self.userMapFromPerforceServer = False
try:
cache = open(self.getUserCacheFilename(), "rb")
lines = cache.readlines()
cache.close()
for line in lines:
entry = line.strip().split("\t")
self.users[entry[0]] = entry[1]
except IOError:
self.getUserMapFromPerforceServer()
class P4Debug(Command):
def __init__(self):
Command.__init__(self)
self.options = []
self.description = "A tool to debug the output of p4 -G."
self.needsGit = False
def run(self, args):
j = 0
for output in p4CmdList(args):
print 'Element: %d' % j
j += 1
print output
return True
class P4RollBack(Command):
def __init__(self):
Command.__init__(self)
self.options = [
optparse.make_option("--local", dest="rollbackLocalBranches", action="store_true")
]
self.description = "A tool to debug the multi-branch import. Don't use :)"
self.rollbackLocalBranches = False
def run(self, args):
if len(args) != 1:
return False
maxChange = int(args[0])
if "p4ExitCode" in p4Cmd("changes -m 1"):
die("Problems executing p4");
if self.rollbackLocalBranches:
refPrefix = "refs/heads/"
lines = read_pipe_lines("git rev-parse --symbolic --branches")
else:
refPrefix = "refs/remotes/"
lines = read_pipe_lines("git rev-parse --symbolic --remotes")
for line in lines:
if self.rollbackLocalBranches or (line.startswith("p4/") and line != "p4/HEAD\n"):
line = line.strip()
ref = refPrefix + line
log = extractLogMessageFromGitCommit(ref)
settings = extractSettingsGitLog(log)
depotPaths = settings['depot-paths']
change = settings['change']
changed = False
if len(p4Cmd("changes -m 1 " + ' '.join (['%s...@%s' % (p, maxChange)
for p in depotPaths]))) == 0:
print "Branch %s did not exist at change %s, deleting." % (ref, maxChange)
system("git update-ref -d %s `git rev-parse %s`" % (ref, ref))
continue
while change and int(change) > maxChange:
changed = True
if self.verbose:
print "%s is at %s ; rewinding towards %s" % (ref, change, maxChange)
system("git update-ref %s \"%s^\"" % (ref, ref))
log = extractLogMessageFromGitCommit(ref)
settings = extractSettingsGitLog(log)
depotPaths = settings['depot-paths']
change = settings['change']
if changed:
print "%s rewound to %s" % (ref, change)
return True
class P4Submit(Command, P4UserMap):
conflict_behavior_choices = ("ask", "skip", "quit")
def __init__(self):
Command.__init__(self)
P4UserMap.__init__(self)
self.options = [
optparse.make_option("--origin", dest="origin"),
optparse.make_option("-M", dest="detectRenames", action="store_true"),
# preserve the user, requires relevant p4 permissions
optparse.make_option("--preserve-user", dest="preserveUser", action="store_true"),
optparse.make_option("--export-labels", dest="exportLabels", action="store_true"),
optparse.make_option("--dry-run", "-n", dest="dry_run", action="store_true"),
optparse.make_option("--prepare-p4-only", dest="prepare_p4_only", action="store_true"),
optparse.make_option("--conflict", dest="conflict_behavior",
choices=self.conflict_behavior_choices),
optparse.make_option("--branch", dest="branch"),
]
self.description = "Submit changes from git to the perforce depot."
self.usage += " [name of git branch to submit into perforce depot]"
self.origin = ""
self.detectRenames = False
self.preserveUser = gitConfigBool("git-p4.preserveUser")
self.dry_run = False
self.prepare_p4_only = False
self.conflict_behavior = None
self.isWindows = (platform.system() == "Windows")
self.exportLabels = False
self.p4HasMoveCommand = p4_has_move_command()
self.branch = None
def check(self):
if len(p4CmdList("opened ...")) > 0:
die("You have files opened with perforce! Close them before starting the sync.")
def separate_jobs_from_description(self, message):
"""Extract and return a possible Jobs field in the commit
message. It goes into a separate section in the p4 change
specification.
A jobs line starts with "Jobs:" and looks like a new field
in a form. Values are white-space separated on the same
line or on following lines that start with a tab.
This does not parse and extract the full git commit message
like a p4 form. It just sees the Jobs: line as a marker
to pass everything from then on directly into the p4 form,
but outside the description section.
Return a tuple (stripped log message, jobs string)."""
m = re.search(r'^Jobs:', message, re.MULTILINE)
if m is None:
return (message, None)
jobtext = message[m.start():]
stripped_message = message[:m.start()].rstrip()
return (stripped_message, jobtext)
def prepareLogMessage(self, template, message, jobs):
"""Edits the template returned from "p4 change -o" to insert
the message in the Description field, and the jobs text in
the Jobs field."""
result = ""
inDescriptionSection = False
for line in template.split("\n"):
if line.startswith("#"):
result += line + "\n"
continue
if inDescriptionSection:
if line.startswith("Files:") or line.startswith("Jobs:"):
inDescriptionSection = False
# insert Jobs section
if jobs:
result += jobs + "\n"
else:
continue
else:
if line.startswith("Description:"):
inDescriptionSection = True
line += "\n"
for messageLine in message.split("\n"):
line += "\t" + messageLine + "\n"
result += line + "\n"
return result
def patchRCSKeywords(self, file, pattern):
# Attempt to zap the RCS keywords in a p4 controlled file matching the given pattern
(handle, outFileName) = tempfile.mkstemp(dir='.')
try:
outFile = os.fdopen(handle, "w+")
inFile = open(file, "r")
regexp = re.compile(pattern, re.VERBOSE)
for line in inFile.readlines():
line = regexp.sub(r'$\1$', line)
outFile.write(line)
inFile.close()
outFile.close()
# Forcibly overwrite the original file
os.unlink(file)
shutil.move(outFileName, file)
except:
# cleanup our temporary file
os.unlink(outFileName)
print "Failed to strip RCS keywords in %s" % file
raise
print "Patched up RCS keywords in %s" % file
def p4UserForCommit(self,id):
# Return the tuple (perforce user,git email) for a given git commit id
self.getUserMapFromPerforceServer()
gitEmail = read_pipe(["git", "log", "--max-count=1",
"--format=%ae", id])
gitEmail = gitEmail.strip()
if not self.emails.has_key(gitEmail):
return (None,gitEmail)
else:
return (self.emails[gitEmail],gitEmail)
def checkValidP4Users(self,commits):
# check if any git authors cannot be mapped to p4 users
for id in commits:
(user,email) = self.p4UserForCommit(id)
if not user:
msg = "Cannot find p4 user for email %s in commit %s." % (email, id)
if gitConfigBool("git-p4.allowMissingP4Users"):
print "%s" % msg
else:
die("Error: %s\nSet git-p4.allowMissingP4Users to true to allow this." % msg)
def lastP4Changelist(self):
# Get back the last changelist number submitted in this client spec. This
# then gets used to patch up the username in the change. If the same
# client spec is being used by multiple processes then this might go
# wrong.
results = p4CmdList("client -o") # find the current client
client = None
for r in results:
if r.has_key('Client'):
client = r['Client']
break
if not client:
die("could not get client spec")
results = p4CmdList(["changes", "-c", client, "-m", "1"])
for r in results:
if r.has_key('change'):
return r['change']
die("Could not get changelist number for last submit - cannot patch up user details")
def modifyChangelistUser(self, changelist, newUser):
# fixup the user field of a changelist after it has been submitted.
changes = p4CmdList("change -o %s" % changelist)
if len(changes) != 1:
die("Bad output from p4 change modifying %s to user %s" %
(changelist, newUser))
c = changes[0]
if c['User'] == newUser: return # nothing to do
c['User'] = newUser
input = marshal.dumps(c)
result = p4CmdList("change -f -i", stdin=input)
for r in result:
if r.has_key('code'):
if r['code'] == 'error':
die("Could not modify user field of changelist %s to %s:%s" % (changelist, newUser, r['data']))
if r.has_key('data'):
print("Updated user field for changelist %s to %s" % (changelist, newUser))
return
die("Could not modify user field of changelist %s to %s" % (changelist, newUser))
def canChangeChangelists(self):
# check to see if we have p4 admin or super-user permissions, either of
# which are required to modify changelists.
results = p4CmdList(["protects", self.depotPath])
for r in results:
if r.has_key('perm'):
if r['perm'] == 'admin':
return 1
if r['perm'] == 'super':
return 1
return 0
def prepareSubmitTemplate(self):
"""Run "p4 change -o" to grab a change specification template.
This does not use "p4 -G", as it is nice to keep the submission
template in original order, since a human might edit it.
Remove lines in the Files section that show changes to files
outside the depot path we're committing into."""
template = ""
inFilesSection = False
for line in p4_read_pipe_lines(['change', '-o']):
if line.endswith("\r\n"):
line = line[:-2] + "\n"
if inFilesSection:
if line.startswith("\t"):
# path starts and ends with a tab
path = line[1:]
lastTab = path.rfind("\t")
if lastTab != -1:
path = path[:lastTab]
if not p4PathStartsWith(path, self.depotPath):
continue
else:
inFilesSection = False
else:
if line.startswith("Files:"):
inFilesSection = True
template += line
return template
def edit_template(self, template_file):
"""Invoke the editor to let the user change the submission
message. Return true if okay to continue with the submit."""
# if configured to skip the editing part, just submit
if gitConfigBool("git-p4.skipSubmitEdit"):
return True
# look at the modification time, to check later if the user saved
# the file
mtime = os.stat(template_file).st_mtime
# invoke the editor
if os.environ.has_key("P4EDITOR") and (os.environ.get("P4EDITOR") != ""):
editor = os.environ.get("P4EDITOR")
else:
editor = read_pipe("git var GIT_EDITOR").strip()
system([editor, template_file])
# If the file was not saved, prompt to see if this patch should
# be skipped. But skip this verification step if configured so.
if gitConfigBool("git-p4.skipSubmitEditCheck"):
return True
# modification time updated means user saved the file
if os.stat(template_file).st_mtime > mtime:
return True
while True:
response = raw_input("Submit template unchanged. Submit anyway? [y]es, [n]o (skip this patch) ")
if response == 'y':
return True
if response == 'n':
return False
def get_diff_description(self, editedFiles, filesToAdd):
# diff
if os.environ.has_key("P4DIFF"):
del(os.environ["P4DIFF"])
diff = ""
for editedFile in editedFiles:
diff += p4_read_pipe(['diff', '-du',
wildcard_encode(editedFile)])
# new file diff
newdiff = ""
for newFile in filesToAdd:
newdiff += "==== new file ====\n"
newdiff += "--- /dev/null\n"
newdiff += "+++ %s\n" % newFile
f = open(newFile, "r")
for line in f.readlines():
newdiff += "+" + line
f.close()
return (diff + newdiff).replace('\r\n', '\n')
def applyCommit(self, id):
"""Apply one commit, return True if it succeeded."""
print "Applying", read_pipe(["git", "show", "-s",
"--format=format:%h %s", id])
(p4User, gitEmail) = self.p4UserForCommit(id)
diff = read_pipe_lines("git diff-tree -r %s \"%s^\" \"%s\"" % (self.diffOpts, id, id))
filesToAdd = set()
filesToDelete = set()
editedFiles = set()
pureRenameCopy = set()
filesToChangeExecBit = {}
for line in diff:
diff = parseDiffTreeEntry(line)
modifier = diff['status']
path = diff['src']
if modifier == "M":
p4_edit(path)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
filesToChangeExecBit[path] = diff['dst_mode']
editedFiles.add(path)
elif modifier == "A":
filesToAdd.add(path)
filesToChangeExecBit[path] = diff['dst_mode']
if path in filesToDelete:
filesToDelete.remove(path)
elif modifier == "D":
filesToDelete.add(path)
if path in filesToAdd:
filesToAdd.remove(path)
elif modifier == "C":
src, dest = diff['src'], diff['dst']
p4_integrate(src, dest)
pureRenameCopy.add(dest)
if diff['src_sha1'] != diff['dst_sha1']:
p4_edit(dest)
pureRenameCopy.discard(dest)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
p4_edit(dest)
pureRenameCopy.discard(dest)
filesToChangeExecBit[dest] = diff['dst_mode']
if self.isWindows:
# turn off read-only attribute
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
editedFiles.add(dest)
elif modifier == "R":
src, dest = diff['src'], diff['dst']
if self.p4HasMoveCommand:
p4_edit(src) # src must be open before move
p4_move(src, dest) # opens for (move/delete, move/add)
else:
p4_integrate(src, dest)
if diff['src_sha1'] != diff['dst_sha1']:
p4_edit(dest)
else:
pureRenameCopy.add(dest)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
if not self.p4HasMoveCommand:
p4_edit(dest) # with move: already open, writable
filesToChangeExecBit[dest] = diff['dst_mode']
if not self.p4HasMoveCommand:
if self.isWindows:
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
filesToDelete.add(src)
editedFiles.add(dest)
else:
die("unknown modifier %s for %s" % (modifier, path))
diffcmd = "git diff-tree --full-index -p \"%s\"" % (id)
patchcmd = diffcmd + " | git apply "
tryPatchCmd = patchcmd + "--check -"
applyPatchCmd = patchcmd + "--check --apply -"
patch_succeeded = True
if os.system(tryPatchCmd) != 0:
fixed_rcs_keywords = False
patch_succeeded = False
print "Unfortunately applying the change failed!"
# Patch failed, maybe it's just RCS keyword woes. Look through
# the patch to see if that's possible.
if gitConfigBool("git-p4.attemptRCSCleanup"):
file = None
pattern = None
kwfiles = {}
for file in editedFiles | filesToDelete:
# did this file's delta contain RCS keywords?
pattern = p4_keywords_regexp_for_file(file)
if pattern:
# this file is a possibility...look for RCS keywords.
regexp = re.compile(pattern, re.VERBOSE)
for line in read_pipe_lines(["git", "diff", "%s^..%s" % (id, id), file]):
if regexp.search(line):
if verbose:
print "got keyword match on %s in %s in %s" % (pattern, line, file)
kwfiles[file] = pattern
break
for file in kwfiles:
if verbose:
print "zapping %s with %s" % (line,pattern)
# File is being deleted, so not open in p4. Must
# disable the read-only bit on windows.
if self.isWindows and file not in editedFiles:
os.chmod(file, stat.S_IWRITE)
self.patchRCSKeywords(file, kwfiles[file])
fixed_rcs_keywords = True
if fixed_rcs_keywords:
print "Retrying the patch with RCS keywords cleaned up"
if os.system(tryPatchCmd) == 0:
patch_succeeded = True
if not patch_succeeded:
for f in editedFiles:
p4_revert(f)
return False
#
# Apply the patch for real, and do add/delete/+x handling.
#
system(applyPatchCmd)
for f in filesToAdd:
p4_add(f)
for f in filesToDelete:
p4_revert(f)
p4_delete(f)
# Set/clear executable bits
for f in filesToChangeExecBit.keys():
mode = filesToChangeExecBit[f]
setP4ExecBit(f, mode)
#
# Build p4 change description, starting with the contents
# of the git commit message.
#
logMessage = extractLogMessageFromGitCommit(id)
logMessage = logMessage.strip()
(logMessage, jobs) = self.separate_jobs_from_description(logMessage)
template = self.prepareSubmitTemplate()
submitTemplate = self.prepareLogMessage(template, logMessage, jobs)
if self.preserveUser:
submitTemplate += "\n######## Actual user %s, modified after commit\n" % p4User
if self.checkAuthorship and not self.p4UserIsMe(p4User):
submitTemplate += "######## git author %s does not match your p4 account.\n" % gitEmail
submitTemplate += "######## Use option --preserve-user to modify authorship.\n"
submitTemplate += "######## Variable git-p4.skipUserNameCheck hides this message.\n"
separatorLine = "######## everything below this line is just the diff #######\n"
if not self.prepare_p4_only:
submitTemplate += separatorLine
submitTemplate += self.get_diff_description(editedFiles, filesToAdd)
(handle, fileName) = tempfile.mkstemp()
tmpFile = os.fdopen(handle, "w+b")
if self.isWindows:
submitTemplate = submitTemplate.replace("\n", "\r\n")
tmpFile.write(submitTemplate)
tmpFile.close()
if self.prepare_p4_only:
#
# Leave the p4 tree prepared, and the submit template around
# and let the user decide what to do next
#
print
print "P4 workspace prepared for submission."
print "To submit or revert, go to client workspace"
print " " + self.clientPath
print
print "To submit, use \"p4 submit\" to write a new description,"
print "or \"p4 submit -i %s\" to use the one prepared by" \
" \"git p4\"." % fileName
print "You can delete the file \"%s\" when finished." % fileName
if self.preserveUser and p4User and not self.p4UserIsMe(p4User):
print "To preserve change ownership by user %s, you must\n" \
"do \"p4 change -f <change>\" after submitting and\n" \
"edit the User field."
if pureRenameCopy:
print "After submitting, renamed files must be re-synced."
print "Invoke \"p4 sync -f\" on each of these files:"
for f in pureRenameCopy:
print " " + f
print
print "To revert the changes, use \"p4 revert ...\", and delete"
print "the submit template file \"%s\"" % fileName
if filesToAdd:
print "Since the commit adds new files, they must be deleted:"
for f in filesToAdd:
print " " + f
print
return True
#
# Let the user edit the change description, then submit it.
#
if self.edit_template(fileName):
# read the edited message and submit
ret = True
tmpFile = open(fileName, "rb")
message = tmpFile.read()
tmpFile.close()
if self.isWindows:
message = message.replace("\r\n", "\n")
submitTemplate = message[:message.index(separatorLine)]
p4_write_pipe(['submit', '-i'], submitTemplate)
if self.preserveUser:
if p4User:
# Get last changelist number. Cannot easily get it from
# the submit command output as the output is
# unmarshalled.
changelist = self.lastP4Changelist()
self.modifyChangelistUser(changelist, p4User)
# The rename/copy happened by applying a patch that created a
# new file. This leaves it writable, which confuses p4.
for f in pureRenameCopy:
p4_sync(f, "-f")
else:
# skip this patch
ret = False
print "Submission cancelled, undoing p4 changes."
for f in editedFiles:
p4_revert(f)
for f in filesToAdd:
p4_revert(f)
os.remove(f)
for f in filesToDelete:
p4_revert(f)
os.remove(fileName)
return ret
# Export git tags as p4 labels. Create a p4 label and then tag
# with that.
def exportGitTags(self, gitTags):
validLabelRegexp = gitConfig("git-p4.labelExportRegexp")
if len(validLabelRegexp) == 0:
validLabelRegexp = defaultLabelRegexp
m = re.compile(validLabelRegexp)
for name in gitTags:
if not m.match(name):
if verbose:
print "tag %s does not match regexp %s" % (name, validLabelRegexp)
continue
# Get the p4 commit this corresponds to
logMessage = extractLogMessageFromGitCommit(name)
values = extractSettingsGitLog(logMessage)
if not values.has_key('change'):
# a tag pointing to something not sent to p4; ignore
if verbose:
print "git tag %s does not give a p4 commit" % name
continue
else:
changelist = values['change']
# Get the tag details.
inHeader = True
isAnnotated = False
body = []
for l in read_pipe_lines(["git", "cat-file", "-p", name]):
l = l.strip()
if inHeader:
if re.match(r'tag\s+', l):
isAnnotated = True
elif re.match(r'\s*$', l):
inHeader = False
continue
else:
body.append(l)
if not isAnnotated:
body = ["lightweight tag imported by git p4\n"]
# Create the label - use the same view as the client spec we are using
clientSpec = getClientSpec()
labelTemplate = "Label: %s\n" % name
labelTemplate += "Description:\n"
for b in body:
labelTemplate += "\t" + b + "\n"
labelTemplate += "View:\n"
for depot_side in clientSpec.mappings:
labelTemplate += "\t%s\n" % depot_side
if self.dry_run:
print "Would create p4 label %s for tag" % name
elif self.prepare_p4_only:
print "Not creating p4 label %s for tag due to option" \
" --prepare-p4-only" % name
else:
p4_write_pipe(["label", "-i"], labelTemplate)
# Use the label
p4_system(["tag", "-l", name] +
["%s@%s" % (depot_side, changelist) for depot_side in clientSpec.mappings])
if verbose:
print "created p4 label for tag %s" % name
def run(self, args):
if len(args) == 0:
self.master = currentGitBranch()
if len(self.master) == 0 or not gitBranchExists("refs/heads/%s" % self.master):
die("Detecting current git branch failed!")
elif len(args) == 1:
self.master = args[0]
if not branchExists(self.master):
die("Branch %s does not exist" % self.master)
else:
return False
allowSubmit = gitConfig("git-p4.allowSubmit")
if len(allowSubmit) > 0 and not self.master in allowSubmit.split(","):
die("%s is not in git-p4.allowSubmit" % self.master)
[upstream, settings] = findUpstreamBranchPoint()
self.depotPath = settings['depot-paths'][0]
if len(self.origin) == 0:
self.origin = upstream
if self.preserveUser:
if not self.canChangeChangelists():
die("Cannot preserve user names without p4 super-user or admin permissions")
# if not set from the command line, try the config file
if self.conflict_behavior is None:
val = gitConfig("git-p4.conflict")
if val:
if val not in self.conflict_behavior_choices:
die("Invalid value '%s' for config git-p4.conflict" % val)
else:
val = "ask"
self.conflict_behavior = val
if self.verbose:
print "Origin branch is " + self.origin
if len(self.depotPath) == 0:
print "Internal error: cannot locate perforce depot path from existing branches"
sys.exit(128)
self.useClientSpec = False
if gitConfigBool("git-p4.useclientspec"):
self.useClientSpec = True
if self.useClientSpec:
self.clientSpecDirs = getClientSpec()
if self.useClientSpec:
# all files are relative to the client spec
self.clientPath = getClientRoot()
else:
self.clientPath = p4Where(self.depotPath)
if self.clientPath == "":
die("Error: Cannot locate perforce checkout of %s in client view" % self.depotPath)
print "Perforce checkout for depot path %s located at %s" % (self.depotPath, self.clientPath)
self.oldWorkingDirectory = os.getcwd()
# ensure the clientPath exists
new_client_dir = False
if not os.path.exists(self.clientPath):
new_client_dir = True
os.makedirs(self.clientPath)
chdir(self.clientPath, is_client_path=True)
if self.dry_run:
print "Would synchronize p4 checkout in %s" % self.clientPath
else:
print "Synchronizing p4 checkout..."
if new_client_dir:
# old one was destroyed, and maybe nobody told p4
p4_sync("...", "-f")
else:
p4_sync("...")
self.check()
commits = []
for line in read_pipe_lines(["git", "rev-list", "--no-merges", "%s..%s" % (self.origin, self.master)]):
commits.append(line.strip())
commits.reverse()
if self.preserveUser or gitConfigBool("git-p4.skipUserNameCheck"):
self.checkAuthorship = False
else:
self.checkAuthorship = True
if self.preserveUser:
self.checkValidP4Users(commits)
#
# Build up a set of options to be passed to diff when
# submitting each commit to p4.
#
if self.detectRenames:
# command-line -M arg
self.diffOpts = "-M"
else:
# If not explicitly set check the config variable
detectRenames = gitConfig("git-p4.detectRenames")
if detectRenames.lower() == "false" or detectRenames == "":
self.diffOpts = ""
elif detectRenames.lower() == "true":
self.diffOpts = "-M"
else:
self.diffOpts = "-M%s" % detectRenames
# no command-line arg for -C or --find-copies-harder, just
# config variables
detectCopies = gitConfig("git-p4.detectCopies")
if detectCopies.lower() == "false" or detectCopies == "":
pass
elif detectCopies.lower() == "true":
self.diffOpts += " -C"
else:
self.diffOpts += " -C%s" % detectCopies
if gitConfigBool("git-p4.detectCopiesHarder"):
self.diffOpts += " --find-copies-harder"
#
# Apply the commits, one at a time. On failure, ask if should
# continue to try the rest of the patches, or quit.
#
if self.dry_run:
print "Would apply"
applied = []
last = len(commits) - 1
for i, commit in enumerate(commits):
if self.dry_run:
print " ", read_pipe(["git", "show", "-s",
"--format=format:%h %s", commit])
ok = True
else:
ok = self.applyCommit(commit)
if ok:
applied.append(commit)
else:
if self.prepare_p4_only and i < last:
print "Processing only the first commit due to option" \
" --prepare-p4-only"
break
if i < last:
quit = False
while True:
# prompt for what to do, or use the option/variable
if self.conflict_behavior == "ask":
print "What do you want to do?"
response = raw_input("[s]kip this commit but apply"
" the rest, or [q]uit? ")
if not response:
continue
elif self.conflict_behavior == "skip":
response = "s"
elif self.conflict_behavior == "quit":
response = "q"
else:
die("Unknown conflict_behavior '%s'" %
self.conflict_behavior)
if response[0] == "s":
print "Skipping this commit, but applying the rest"
break
if response[0] == "q":
print "Quitting"
quit = True
break
if quit:
break
chdir(self.oldWorkingDirectory)
if self.dry_run:
pass
elif self.prepare_p4_only:
pass
elif len(commits) == len(applied):
print "All commits applied!"
sync = P4Sync()
if self.branch:
sync.branch = self.branch
sync.run([])
rebase = P4Rebase()
rebase.rebase()
else:
if len(applied) == 0:
print "No commits applied."
else:
print "Applied only the commits marked with '*':"
for c in commits:
if c in applied:
star = "*"
else:
star = " "
print star, read_pipe(["git", "show", "-s",
"--format=format:%h %s", c])
print "You will have to do 'git p4 sync' and rebase."
if gitConfigBool("git-p4.exportLabels"):
self.exportLabels = True
if self.exportLabels:
p4Labels = getP4Labels(self.depotPath)
gitTags = getGitTags()
missingGitTags = gitTags - p4Labels
self.exportGitTags(missingGitTags)
# exit with error unless everything applied perfectly
if len(commits) != len(applied):
sys.exit(1)
return True
class View(object):
"""Represent a p4 view ("p4 help views"), and map files in a
repo according to the view."""
def __init__(self, client_name):
self.mappings = []
self.client_prefix = "//%s/" % client_name
# cache results of "p4 where" to lookup client file locations
self.client_spec_path_cache = {}
def append(self, view_line):
"""Parse a view line, splitting it into depot and client
sides. Append to self.mappings, preserving order. This
is only needed for tag creation."""
# Split the view line into exactly two words. P4 enforces
# structure on these lines that simplifies this quite a bit.
#
# Either or both words may be double-quoted.
# Single quotes do not matter.
# Double-quote marks cannot occur inside the words.
# A + or - prefix is also inside the quotes.
# There are no quotes unless they contain a space.
# The line is already white-space stripped.
# The two words are separated by a single space.
#
if view_line[0] == '"':
# First word is double quoted. Find its end.
close_quote_index = view_line.find('"', 1)
if close_quote_index <= 0:
die("No first-word closing quote found: %s" % view_line)
depot_side = view_line[1:close_quote_index]
# skip closing quote and space
rhs_index = close_quote_index + 1 + 1
else:
space_index = view_line.find(" ")
if space_index <= 0:
die("No word-splitting space found: %s" % view_line)
depot_side = view_line[0:space_index]
rhs_index = space_index + 1
# prefix + means overlay on previous mapping
if depot_side.startswith("+"):
depot_side = depot_side[1:]
# prefix - means exclude this path, leave out of mappings
exclude = False
if depot_side.startswith("-"):
exclude = True
depot_side = depot_side[1:]
if not exclude:
self.mappings.append(depot_side)
def convert_client_path(self, clientFile):
# chop off //client/ part to make it relative
if not clientFile.startswith(self.client_prefix):
die("No prefix '%s' on clientFile '%s'" %
(self.client_prefix, clientFile))
return clientFile[len(self.client_prefix):]
def update_client_spec_path_cache(self, files):
""" Caching file paths by "p4 where" batch query """
# List depot file paths exclude that already cached
fileArgs = [f['path'] for f in files if f['path'] not in self.client_spec_path_cache]
if len(fileArgs) == 0:
return # All files in cache
where_result = p4CmdList(["-x", "-", "where"], stdin=fileArgs)
for res in where_result:
if "code" in res and res["code"] == "error":
# assume error is "... file(s) not in client view"
continue
if "clientFile" not in res:
die("No clientFile in 'p4 where' output")
if "unmap" in res:
# it will list all of them, but only one not unmap-ped
continue
self.client_spec_path_cache[res['depotFile']] = self.convert_client_path(res["clientFile"])
# not found files or unmap files set to ""
for depotFile in fileArgs:
if depotFile not in self.client_spec_path_cache:
self.client_spec_path_cache[depotFile] = ""
def map_in_client(self, depot_path):
"""Return the relative location in the client where this
depot file should live. Returns "" if the file should
not be mapped in the client."""
if depot_path in self.client_spec_path_cache:
return self.client_spec_path_cache[depot_path]
die( "Error: %s is not found in client spec path" % depot_path )
return ""
class P4Sync(Command, P4UserMap):
delete_actions = ( "delete", "move/delete", "purge" )
def __init__(self):
Command.__init__(self)
P4UserMap.__init__(self)
self.options = [
optparse.make_option("--branch", dest="branch"),
optparse.make_option("--detect-branches", dest="detectBranches", action="store_true"),
optparse.make_option("--changesfile", dest="changesFile"),
optparse.make_option("--silent", dest="silent", action="store_true"),
optparse.make_option("--detect-labels", dest="detectLabels", action="store_true"),
optparse.make_option("--import-labels", dest="importLabels", action="store_true"),
optparse.make_option("--import-local", dest="importIntoRemotes", action="store_false",
help="Import into refs/heads/ , not refs/remotes"),
optparse.make_option("--max-changes", dest="maxChanges"),
optparse.make_option("--keep-path", dest="keepRepoPath", action='store_true',
help="Keep entire BRANCH/DIR/SUBDIR prefix during import"),
optparse.make_option("--use-client-spec", dest="useClientSpec", action='store_true',
help="Only sync files that are included in the Perforce Client Spec")
]
self.description = """Imports from Perforce into a git repository.\n
example:
//depot/my/project/ -- to import the current head
//depot/my/project/@all -- to import everything
//depot/my/project/@1,6 -- to import only from revision 1 to 6
(a ... is not needed in the path p4 specification, it's added implicitly)"""
self.usage += " //depot/path[@revRange]"
self.silent = False
self.createdBranches = set()
self.committedChanges = set()
self.branch = ""
self.detectBranches = False
self.detectLabels = False
self.importLabels = False
self.changesFile = ""
self.syncWithOrigin = True
self.importIntoRemotes = True
self.maxChanges = ""
self.keepRepoPath = False
self.depotPaths = None
self.p4BranchesInGit = []
self.cloneExclude = []
self.useClientSpec = False
self.useClientSpec_from_options = False
self.clientSpecDirs = None
self.tempBranches = []
self.tempBranchLocation = "git-p4-tmp"
if gitConfig("git-p4.syncFromOrigin") == "false":
self.syncWithOrigin = False
# Force a checkpoint in fast-import and wait for it to finish
def checkpoint(self):
self.gitStream.write("checkpoint\n\n")
self.gitStream.write("progress checkpoint\n\n")
out = self.gitOutput.readline()
if self.verbose:
print "checkpoint finished: " + out
def extractFilesFromCommit(self, commit):
self.cloneExclude = [re.sub(r"\.\.\.$", "", path)
for path in self.cloneExclude]
files = []
fnum = 0
while commit.has_key("depotFile%s" % fnum):
path = commit["depotFile%s" % fnum]
if [p for p in self.cloneExclude
if p4PathStartsWith(path, p)]:
found = False
else:
found = [p for p in self.depotPaths
if p4PathStartsWith(path, p)]
if not found:
fnum = fnum + 1
continue
file = {}
file["path"] = path
file["rev"] = commit["rev%s" % fnum]
file["action"] = commit["action%s" % fnum]
file["type"] = commit["type%s" % fnum]
files.append(file)
fnum = fnum + 1
return files
def stripRepoPath(self, path, prefixes):
"""When streaming files, this is called to map a p4 depot path
to where it should go in git. The prefixes are either
self.depotPaths, or self.branchPrefixes in the case of
branch detection."""
if self.useClientSpec:
# branch detection moves files up a level (the branch name)
# from what client spec interpretation gives
path = self.clientSpecDirs.map_in_client(path)
if self.detectBranches:
for b in self.knownBranches:
if path.startswith(b + "/"):
path = path[len(b)+1:]
elif self.keepRepoPath:
# Preserve everything in relative path name except leading
# //depot/; just look at first prefix as they all should
# be in the same depot.
depot = re.sub("^(//[^/]+/).*", r'\1', prefixes[0])
if p4PathStartsWith(path, depot):
path = path[len(depot):]
else:
for p in prefixes:
if p4PathStartsWith(path, p):
path = path[len(p):]
break
path = wildcard_decode(path)
return path
def splitFilesIntoBranches(self, commit):
"""Look at each depotFile in the commit to figure out to what
branch it belongs."""
if self.clientSpecDirs:
files = self.extractFilesFromCommit(commit)
self.clientSpecDirs.update_client_spec_path_cache(files)
branches = {}
fnum = 0
while commit.has_key("depotFile%s" % fnum):
path = commit["depotFile%s" % fnum]
found = [p for p in self.depotPaths
if p4PathStartsWith(path, p)]
if not found:
fnum = fnum + 1
continue
file = {}
file["path"] = path
file["rev"] = commit["rev%s" % fnum]
file["action"] = commit["action%s" % fnum]
file["type"] = commit["type%s" % fnum]
fnum = fnum + 1
# start with the full relative path where this file would
# go in a p4 client
if self.useClientSpec:
relPath = self.clientSpecDirs.map_in_client(path)
else:
relPath = self.stripRepoPath(path, self.depotPaths)
for branch in self.knownBranches.keys():
# add a trailing slash so that a commit into qt/4.2foo
# doesn't end up in qt/4.2, e.g.
if relPath.startswith(branch + "/"):
if branch not in branches:
branches[branch] = []
branches[branch].append(file)
break
return branches
# output one file from the P4 stream
# - helper for streamP4Files
def streamOneP4File(self, file, contents):
relPath = self.stripRepoPath(file['depotFile'], self.branchPrefixes)
if verbose:
sys.stderr.write("%s\n" % relPath)
(type_base, type_mods) = split_p4_type(file["type"])
git_mode = "100644"
if "x" in type_mods:
git_mode = "100755"
if type_base == "symlink":
git_mode = "120000"
# p4 print on a symlink sometimes contains "target\n";
# if it does, remove the newline
data = ''.join(contents)
if not data:
# Some version of p4 allowed creating a symlink that pointed
# to nothing. This causes p4 errors when checking out such
# a change, and errors here too. Work around it by ignoring
# the bad symlink; hopefully a future change fixes it.
print "\nIgnoring empty symlink in %s" % file['depotFile']
return
elif data[-1] == '\n':
contents = [data[:-1]]
else:
contents = [data]
if type_base == "utf16":
# p4 delivers different text in the python output to -G
# than it does when using "print -o", or normal p4 client
# operations. utf16 is converted to ascii or utf8, perhaps.
# But ascii text saved as -t utf16 is completely mangled.
# Invoke print -o to get the real contents.
#
# On windows, the newlines will always be mangled by print, so put
# them back too. This is not needed to the cygwin windows version,
# just the native "NT" type.
#
text = p4_read_pipe(['print', '-q', '-o', '-', file['depotFile']])
if p4_version_string().find("/NT") >= 0:
text = text.replace("\r\n", "\n")
contents = [ text ]
if type_base == "apple":
# Apple filetype files will be streamed as a concatenation of
# its appledouble header and the contents. This is useless
# on both macs and non-macs. If using "print -q -o xx", it
# will create "xx" with the data, and "%xx" with the header.
# This is also not very useful.
#
# Ideally, someday, this script can learn how to generate
# appledouble files directly and import those to git, but
# non-mac machines can never find a use for apple filetype.
print "\nIgnoring apple filetype file %s" % file['depotFile']
return
# Note that we do not try to de-mangle keywords on utf16 files,
# even though in theory somebody may want that.
pattern = p4_keywords_regexp_for_type(type_base, type_mods)
if pattern:
regexp = re.compile(pattern, re.VERBOSE)
text = ''.join(contents)
text = regexp.sub(r'$\1$', text)
contents = [ text ]
self.gitStream.write("M %s inline %s\n" % (git_mode, relPath))
# total length...
length = 0
for d in contents:
length = length + len(d)
self.gitStream.write("data %d\n" % length)
for d in contents:
self.gitStream.write(d)
self.gitStream.write("\n")
def streamOneP4Deletion(self, file):
relPath = self.stripRepoPath(file['path'], self.branchPrefixes)
if verbose:
sys.stderr.write("delete %s\n" % relPath)
self.gitStream.write("D %s\n" % relPath)
# handle another chunk of streaming data
def streamP4FilesCb(self, marshalled):
# catch p4 errors and complain
err = None
if "code" in marshalled:
if marshalled["code"] == "error":
if "data" in marshalled:
err = marshalled["data"].rstrip()
if err:
f = None
if self.stream_have_file_info:
if "depotFile" in self.stream_file:
f = self.stream_file["depotFile"]
# force a failure in fast-import, else an empty
# commit will be made
self.gitStream.write("\n")
self.gitStream.write("die-now\n")
self.gitStream.close()
# ignore errors, but make sure it exits first
self.importProcess.wait()
if f:
die("Error from p4 print for %s: %s" % (f, err))
else:
die("Error from p4 print: %s" % err)
if marshalled.has_key('depotFile') and self.stream_have_file_info:
# start of a new file - output the old one first
self.streamOneP4File(self.stream_file, self.stream_contents)
self.stream_file = {}
self.stream_contents = []
self.stream_have_file_info = False
# pick up the new file information... for the
# 'data' field we need to append to our array
for k in marshalled.keys():
if k == 'data':
self.stream_contents.append(marshalled['data'])
else:
self.stream_file[k] = marshalled[k]
self.stream_have_file_info = True
# Stream directly from "p4 files" into "git fast-import"
def streamP4Files(self, files):
filesForCommit = []
filesToRead = []
filesToDelete = []
for f in files:
# if using a client spec, only add the files that have
# a path in the client
if self.clientSpecDirs:
if self.clientSpecDirs.map_in_client(f['path']) == "":
continue
filesForCommit.append(f)
if f['action'] in self.delete_actions:
filesToDelete.append(f)
else:
filesToRead.append(f)
# deleted files...
for f in filesToDelete:
self.streamOneP4Deletion(f)
if len(filesToRead) > 0:
self.stream_file = {}
self.stream_contents = []
self.stream_have_file_info = False
# curry self argument
def streamP4FilesCbSelf(entry):
self.streamP4FilesCb(entry)
fileArgs = ['%s#%s' % (f['path'], f['rev']) for f in filesToRead]
p4CmdList(["-x", "-", "print"],
stdin=fileArgs,
cb=streamP4FilesCbSelf)
# do the last chunk
if self.stream_file.has_key('depotFile'):
self.streamOneP4File(self.stream_file, self.stream_contents)
def make_email(self, userid):
if userid in self.users:
return self.users[userid]
else:
return "%s <a@b>" % userid
# Stream a p4 tag
def streamTag(self, gitStream, labelName, labelDetails, commit, epoch):
if verbose:
print "writing tag %s for commit %s" % (labelName, commit)
gitStream.write("tag %s\n" % labelName)
gitStream.write("from %s\n" % commit)
if labelDetails.has_key('Owner'):
owner = labelDetails["Owner"]
else:
owner = None
# Try to use the owner of the p4 label, or failing that,
# the current p4 user id.
if owner:
email = self.make_email(owner)
else:
email = self.make_email(self.p4UserId())
tagger = "%s %s %s" % (email, epoch, self.tz)
gitStream.write("tagger %s\n" % tagger)
print "labelDetails=",labelDetails
if labelDetails.has_key('Description'):
description = labelDetails['Description']
else:
description = 'Label from git p4'
gitStream.write("data %d\n" % len(description))
gitStream.write(description)
gitStream.write("\n")
def commit(self, details, files, branch, parent = ""):
epoch = details["time"]
author = details["user"]
if self.verbose:
print "commit into %s" % branch
# start with reading files; if that fails, we should not
# create a commit.
new_files = []
for f in files:
if [p for p in self.branchPrefixes if p4PathStartsWith(f['path'], p)]:
new_files.append (f)
else:
sys.stderr.write("Ignoring file outside of prefix: %s\n" % f['path'])
if self.clientSpecDirs:
self.clientSpecDirs.update_client_spec_path_cache(files)
self.gitStream.write("commit %s\n" % branch)
# gitStream.write("mark :%s\n" % details["change"])
self.committedChanges.add(int(details["change"]))
committer = ""
if author not in self.users:
self.getUserMapFromPerforceServer()
committer = "%s %s %s" % (self.make_email(author), epoch, self.tz)
self.gitStream.write("committer %s\n" % committer)
self.gitStream.write("data <<EOT\n")
self.gitStream.write(details["desc"])
self.gitStream.write("\n[git-p4: depot-paths = \"%s\": change = %s" %
(','.join(self.branchPrefixes), details["change"]))
if len(details['options']) > 0:
self.gitStream.write(": options = %s" % details['options'])
self.gitStream.write("]\nEOT\n\n")
if len(parent) > 0:
if self.verbose:
print "parent %s" % parent
self.gitStream.write("from %s\n" % parent)
self.streamP4Files(new_files)
self.gitStream.write("\n")
change = int(details["change"])
if self.labels.has_key(change):
label = self.labels[change]
labelDetails = label[0]
labelRevisions = label[1]
if self.verbose:
print "Change %s is labelled %s" % (change, labelDetails)
files = p4CmdList(["files"] + ["%s...@%s" % (p, change)
for p in self.branchPrefixes])
if len(files) == len(labelRevisions):
cleanedFiles = {}
for info in files:
if info["action"] in self.delete_actions:
continue
cleanedFiles[info["depotFile"]] = info["rev"]
if cleanedFiles == labelRevisions:
self.streamTag(self.gitStream, 'tag_%s' % labelDetails['label'], labelDetails, branch, epoch)
else:
if not self.silent:
print ("Tag %s does not match with change %s: files do not match."
% (labelDetails["label"], change))
else:
if not self.silent:
print ("Tag %s does not match with change %s: file count is different."
% (labelDetails["label"], change))
# Build a dictionary of changelists and labels, for "detect-labels" option.
def getLabels(self):
self.labels = {}
l = p4CmdList(["labels"] + ["%s..." % p for p in self.depotPaths])
if len(l) > 0 and not self.silent:
print "Finding files belonging to labels in %s" % `self.depotPaths`
for output in l:
label = output["label"]
revisions = {}
newestChange = 0
if self.verbose:
print "Querying files for label %s" % label
for file in p4CmdList(["files"] +
["%s...@%s" % (p, label)
for p in self.depotPaths]):
revisions[file["depotFile"]] = file["rev"]
change = int(file["change"])
if change > newestChange:
newestChange = change
self.labels[newestChange] = [output, revisions]
if self.verbose:
print "Label changes: %s" % self.labels.keys()
# Import p4 labels as git tags. A direct mapping does not
# exist, so assume that if all the files are at the same revision
# then we can use that, or it's something more complicated we should
# just ignore.
def importP4Labels(self, stream, p4Labels):
if verbose:
print "import p4 labels: " + ' '.join(p4Labels)
ignoredP4Labels = gitConfigList("git-p4.ignoredP4Labels")
validLabelRegexp = gitConfig("git-p4.labelImportRegexp")
if len(validLabelRegexp) == 0:
validLabelRegexp = defaultLabelRegexp
m = re.compile(validLabelRegexp)
for name in p4Labels:
commitFound = False
if not m.match(name):
if verbose:
print "label %s does not match regexp %s" % (name,validLabelRegexp)
continue
if name in ignoredP4Labels:
continue
labelDetails = p4CmdList(['label', "-o", name])[0]
# get the most recent changelist for each file in this label
change = p4Cmd(["changes", "-m", "1"] + ["%s...@%s" % (p, name)
for p in self.depotPaths])
if change.has_key('change'):
# find the corresponding git commit; take the oldest commit
changelist = int(change['change'])
gitCommit = read_pipe(["git", "rev-list", "--max-count=1",
"--reverse", ":/\[git-p4:.*change = %d\]" % changelist])
if len(gitCommit) == 0:
print "could not find git commit for changelist %d" % changelist
else:
gitCommit = gitCommit.strip()
commitFound = True
# Convert from p4 time format
try:
tmwhen = time.strptime(labelDetails['Update'], "%Y/%m/%d %H:%M:%S")
except ValueError:
print "Could not convert label time %s" % labelDetails['Update']
tmwhen = 1
when = int(time.mktime(tmwhen))
self.streamTag(stream, name, labelDetails, gitCommit, when)
if verbose:
print "p4 label %s mapped to git commit %s" % (name, gitCommit)
else:
if verbose:
print "Label %s has no changelists - possibly deleted?" % name
if not commitFound:
# We can't import this label; don't try again as it will get very
# expensive repeatedly fetching all the files for labels that will
# never be imported. If the label is moved in the future, the
# ignore will need to be removed manually.
system(["git", "config", "--add", "git-p4.ignoredP4Labels", name])
def guessProjectName(self):
for p in self.depotPaths:
if p.endswith("/"):
p = p[:-1]
p = p[p.strip().rfind("/") + 1:]
if not p.endswith("/"):
p += "/"
return p
def getBranchMapping(self):
lostAndFoundBranches = set()
user = gitConfig("git-p4.branchUser")
if len(user) > 0:
command = "branches -u %s" % user
else:
command = "branches"
for info in p4CmdList(command):
details = p4Cmd(["branch", "-o", info["branch"]])
viewIdx = 0
while details.has_key("View%s" % viewIdx):
paths = details["View%s" % viewIdx].split(" ")
viewIdx = viewIdx + 1
# require standard //depot/foo/... //depot/bar/... mapping
if len(paths) != 2 or not paths[0].endswith("/...") or not paths[1].endswith("/..."):
continue
source = paths[0]
destination = paths[1]
## HACK
if p4PathStartsWith(source, self.depotPaths[0]) and p4PathStartsWith(destination, self.depotPaths[0]):
source = source[len(self.depotPaths[0]):-4]
destination = destination[len(self.depotPaths[0]):-4]
if destination in self.knownBranches:
if not self.silent:
print "p4 branch %s defines a mapping from %s to %s" % (info["branch"], source, destination)
print "but there exists another mapping from %s to %s already!" % (self.knownBranches[destination], destination)
continue
self.knownBranches[destination] = source
lostAndFoundBranches.discard(destination)
if source not in self.knownBranches:
lostAndFoundBranches.add(source)
# Perforce does not strictly require branches to be defined, so we also
# check git config for a branch list.
#
# Example of branch definition in git config file:
# [git-p4]
# branchList=main:branchA
# branchList=main:branchB
# branchList=branchA:branchC
configBranches = gitConfigList("git-p4.branchList")
for branch in configBranches:
if branch:
(source, destination) = branch.split(":")
self.knownBranches[destination] = source
lostAndFoundBranches.discard(destination)
if source not in self.knownBranches:
lostAndFoundBranches.add(source)
for branch in lostAndFoundBranches:
self.knownBranches[branch] = branch
def getBranchMappingFromGitBranches(self):
branches = p4BranchesInGit(self.importIntoRemotes)
for branch in branches.keys():
if branch == "master":
branch = "main"
else:
branch = branch[len(self.projectName):]
self.knownBranches[branch] = branch
def updateOptionDict(self, d):
option_keys = {}
if self.keepRepoPath:
option_keys['keepRepoPath'] = 1
d["options"] = ' '.join(sorted(option_keys.keys()))
def readOptions(self, d):
self.keepRepoPath = (d.has_key('options')
and ('keepRepoPath' in d['options']))
def gitRefForBranch(self, branch):
if branch == "main":
return self.refPrefix + "master"
if len(branch) <= 0:
return branch
return self.refPrefix + self.projectName + branch
def gitCommitByP4Change(self, ref, change):
if self.verbose:
print "looking in ref " + ref + " for change %s using bisect..." % change
earliestCommit = ""
latestCommit = parseRevision(ref)
while True:
if self.verbose:
print "trying: earliest %s latest %s" % (earliestCommit, latestCommit)
next = read_pipe("git rev-list --bisect %s %s" % (latestCommit, earliestCommit)).strip()
if len(next) == 0:
if self.verbose:
print "argh"
return ""
log = extractLogMessageFromGitCommit(next)
settings = extractSettingsGitLog(log)
currentChange = int(settings['change'])
if self.verbose:
print "current change %s" % currentChange
if currentChange == change:
if self.verbose:
print "found %s" % next
return next
if currentChange < change:
earliestCommit = "^%s" % next
else:
latestCommit = "%s" % next
return ""
def importNewBranch(self, branch, maxChange):
# make fast-import flush all changes to disk and update the refs using the checkpoint
# command so that we can try to find the branch parent in the git history
self.gitStream.write("checkpoint\n\n");
self.gitStream.flush();
branchPrefix = self.depotPaths[0] + branch + "/"
range = "@1,%s" % maxChange
#print "prefix" + branchPrefix
changes = p4ChangesForPaths([branchPrefix], range)
if len(changes) <= 0:
return False
firstChange = changes[0]
#print "first change in branch: %s" % firstChange
sourceBranch = self.knownBranches[branch]
sourceDepotPath = self.depotPaths[0] + sourceBranch
sourceRef = self.gitRefForBranch(sourceBranch)
#print "source " + sourceBranch
branchParentChange = int(p4Cmd(["changes", "-m", "1", "%s...@1,%s" % (sourceDepotPath, firstChange)])["change"])
#print "branch parent: %s" % branchParentChange
gitParent = self.gitCommitByP4Change(sourceRef, branchParentChange)
if len(gitParent) > 0:
self.initialParents[self.gitRefForBranch(branch)] = gitParent
#print "parent git commit: %s" % gitParent
self.importChanges(changes)
return True
def searchParent(self, parent, branch, target):
parentFound = False
for blob in read_pipe_lines(["git", "rev-list", "--reverse",
"--no-merges", parent]):
blob = blob.strip()
if len(read_pipe(["git", "diff-tree", blob, target])) == 0:
parentFound = True
if self.verbose:
print "Found parent of %s in commit %s" % (branch, blob)
break
if parentFound:
return blob
else:
return None
def importChanges(self, changes):
cnt = 1
for change in changes:
description = p4_describe(change)
self.updateOptionDict(description)
if not self.silent:
sys.stdout.write("\rImporting revision %s (%s%%)" % (change, cnt * 100 / len(changes)))
sys.stdout.flush()
cnt = cnt + 1
try:
if self.detectBranches:
branches = self.splitFilesIntoBranches(description)
for branch in branches.keys():
## HACK --hwn
branchPrefix = self.depotPaths[0] + branch + "/"
self.branchPrefixes = [ branchPrefix ]
parent = ""
filesForCommit = branches[branch]
if self.verbose:
print "branch is %s" % branch
self.updatedBranches.add(branch)
if branch not in self.createdBranches:
self.createdBranches.add(branch)
parent = self.knownBranches[branch]
if parent == branch:
parent = ""
else:
fullBranch = self.projectName + branch
if fullBranch not in self.p4BranchesInGit:
if not self.silent:
print("\n Importing new branch %s" % fullBranch);
if self.importNewBranch(branch, change - 1):
parent = ""
self.p4BranchesInGit.append(fullBranch)
if not self.silent:
print("\n Resuming with change %s" % change);
if self.verbose:
print "parent determined through known branches: %s" % parent
branch = self.gitRefForBranch(branch)
parent = self.gitRefForBranch(parent)
if self.verbose:
print "looking for initial parent for %s; current parent is %s" % (branch, parent)
if len(parent) == 0 and branch in self.initialParents:
parent = self.initialParents[branch]
del self.initialParents[branch]
blob = None
if len(parent) > 0:
tempBranch = "%s/%d" % (self.tempBranchLocation, change)
if self.verbose:
print "Creating temporary branch: " + tempBranch
self.commit(description, filesForCommit, tempBranch)
self.tempBranches.append(tempBranch)
self.checkpoint()
blob = self.searchParent(parent, branch, tempBranch)
if blob:
self.commit(description, filesForCommit, branch, blob)
else:
if self.verbose:
print "Parent of %s not found. Committing into head of %s" % (branch, parent)
self.commit(description, filesForCommit, branch, parent)
else:
files = self.extractFilesFromCommit(description)
self.commit(description, files, self.branch,
self.initialParent)
# only needed once, to connect to the previous commit
self.initialParent = ""
except IOError:
print self.gitError.read()
sys.exit(1)
def importHeadRevision(self, revision):
print "Doing initial import of %s from revision %s into %s" % (' '.join(self.depotPaths), revision, self.branch)
details = {}
details["user"] = "git perforce import user"
details["desc"] = ("Initial import of %s from the state at revision %s\n"
% (' '.join(self.depotPaths), revision))
details["change"] = revision
newestRevision = 0
fileCnt = 0
fileArgs = ["%s...%s" % (p,revision) for p in self.depotPaths]
for info in p4CmdList(["files"] + fileArgs):
if 'code' in info and info['code'] == 'error':
sys.stderr.write("p4 returned an error: %s\n"
% info['data'])
if info['data'].find("must refer to client") >= 0:
sys.stderr.write("This particular p4 error is misleading.\n")
sys.stderr.write("Perhaps the depot path was misspelled.\n");
sys.stderr.write("Depot path: %s\n" % " ".join(self.depotPaths))
sys.exit(1)
if 'p4ExitCode' in info:
sys.stderr.write("p4 exitcode: %s\n" % info['p4ExitCode'])
sys.exit(1)
change = int(info["change"])
if change > newestRevision:
newestRevision = change
if info["action"] in self.delete_actions:
# don't increase the file cnt, otherwise details["depotFile123"] will have gaps!
#fileCnt = fileCnt + 1
continue
for prop in ["depotFile", "rev", "action", "type" ]:
details["%s%s" % (prop, fileCnt)] = info[prop]
fileCnt = fileCnt + 1
details["change"] = newestRevision
# Use time from top-most change so that all git p4 clones of
# the same p4 repo have the same commit SHA1s.
res = p4_describe(newestRevision)
details["time"] = res["time"]
self.updateOptionDict(details)
try:
self.commit(details, self.extractFilesFromCommit(details), self.branch)
except IOError:
print "IO error with git fast-import. Is your git version recent enough?"
print self.gitError.read()
def run(self, args):
self.depotPaths = []
self.changeRange = ""
self.previousDepotPaths = []
self.hasOrigin = False
# map from branch depot path to parent branch
self.knownBranches = {}
self.initialParents = {}
if self.importIntoRemotes:
self.refPrefix = "refs/remotes/p4/"
else:
self.refPrefix = "refs/heads/p4/"
if self.syncWithOrigin:
self.hasOrigin = originP4BranchesExist()
if self.hasOrigin:
if not self.silent:
print 'Syncing with origin first, using "git fetch origin"'
system("git fetch origin")
branch_arg_given = bool(self.branch)
if len(self.branch) == 0:
self.branch = self.refPrefix + "master"
if gitBranchExists("refs/heads/p4") and self.importIntoRemotes:
system("git update-ref %s refs/heads/p4" % self.branch)
system("git branch -D p4")
# accept either the command-line option, or the configuration variable
if self.useClientSpec:
# will use this after clone to set the variable
self.useClientSpec_from_options = True
else:
if gitConfigBool("git-p4.useclientspec"):
self.useClientSpec = True
if self.useClientSpec:
self.clientSpecDirs = getClientSpec()
# TODO: should always look at previous commits,
# merge with previous imports, if possible.
if args == []:
if self.hasOrigin:
createOrUpdateBranchesFromOrigin(self.refPrefix, self.silent)
# branches holds mapping from branch name to sha1
branches = p4BranchesInGit(self.importIntoRemotes)
# restrict to just this one, disabling detect-branches
if branch_arg_given:
short = self.branch.split("/")[-1]
if short in branches:
self.p4BranchesInGit = [ short ]
else:
self.p4BranchesInGit = branches.keys()
if len(self.p4BranchesInGit) > 1:
if not self.silent:
print "Importing from/into multiple branches"
self.detectBranches = True
for branch in branches.keys():
self.initialParents[self.refPrefix + branch] = \
branches[branch]
if self.verbose:
print "branches: %s" % self.p4BranchesInGit
p4Change = 0
for branch in self.p4BranchesInGit:
logMsg = extractLogMessageFromGitCommit(self.refPrefix + branch)
settings = extractSettingsGitLog(logMsg)
self.readOptions(settings)
if (settings.has_key('depot-paths')
and settings.has_key ('change')):
change = int(settings['change']) + 1
p4Change = max(p4Change, change)
depotPaths = sorted(settings['depot-paths'])
if self.previousDepotPaths == []:
self.previousDepotPaths = depotPaths
else:
paths = []
for (prev, cur) in zip(self.previousDepotPaths, depotPaths):
prev_list = prev.split("/")
cur_list = cur.split("/")
for i in range(0, min(len(cur_list), len(prev_list))):
if cur_list[i] <> prev_list[i]:
i = i - 1
break
paths.append ("/".join(cur_list[:i + 1]))
self.previousDepotPaths = paths
if p4Change > 0:
self.depotPaths = sorted(self.previousDepotPaths)
self.changeRange = "@%s,#head" % p4Change
if not self.silent and not self.detectBranches:
print "Performing incremental import into %s git branch" % self.branch
# accept multiple ref name abbreviations:
# refs/foo/bar/branch -> use it exactly
# p4/branch -> prepend refs/remotes/ or refs/heads/
# branch -> prepend refs/remotes/p4/ or refs/heads/p4/
if not self.branch.startswith("refs/"):
if self.importIntoRemotes:
prepend = "refs/remotes/"
else:
prepend = "refs/heads/"
if not self.branch.startswith("p4/"):
prepend += "p4/"
self.branch = prepend + self.branch
if len(args) == 0 and self.depotPaths:
if not self.silent:
print "Depot paths: %s" % ' '.join(self.depotPaths)
else:
if self.depotPaths and self.depotPaths != args:
print ("previous import used depot path %s and now %s was specified. "
"This doesn't work!" % (' '.join (self.depotPaths),
' '.join (args)))
sys.exit(1)
self.depotPaths = sorted(args)
revision = ""
self.users = {}
# Make sure no revision specifiers are used when --changesfile
# is specified.
bad_changesfile = False
if len(self.changesFile) > 0:
for p in self.depotPaths:
if p.find("@") >= 0 or p.find("#") >= 0:
bad_changesfile = True
break
if bad_changesfile:
die("Option --changesfile is incompatible with revision specifiers")
newPaths = []
for p in self.depotPaths:
if p.find("@") != -1:
atIdx = p.index("@")
self.changeRange = p[atIdx:]
if self.changeRange == "@all":
self.changeRange = ""
elif ',' not in self.changeRange:
revision = self.changeRange
self.changeRange = ""
p = p[:atIdx]
elif p.find("#") != -1:
hashIdx = p.index("#")
revision = p[hashIdx:]
p = p[:hashIdx]
elif self.previousDepotPaths == []:
# pay attention to changesfile, if given, else import
# the entire p4 tree at the head revision
if len(self.changesFile) == 0:
revision = "#head"
p = re.sub ("\.\.\.$", "", p)
if not p.endswith("/"):
p += "/"
newPaths.append(p)
self.depotPaths = newPaths
# --detect-branches may change this for each branch
self.branchPrefixes = self.depotPaths
self.loadUserMapFromCache()
self.labels = {}
if self.detectLabels:
self.getLabels();
if self.detectBranches:
## FIXME - what's a P4 projectName ?
self.projectName = self.guessProjectName()
if self.hasOrigin:
self.getBranchMappingFromGitBranches()
else:
self.getBranchMapping()
if self.verbose:
print "p4-git branches: %s" % self.p4BranchesInGit
print "initial parents: %s" % self.initialParents
for b in self.p4BranchesInGit:
if b != "master":
## FIXME
b = b[len(self.projectName):]
self.createdBranches.add(b)
self.tz = "%+03d%02d" % (- time.timezone / 3600, ((- time.timezone % 3600) / 60))
self.importProcess = subprocess.Popen(["git", "fast-import"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE);
self.gitOutput = self.importProcess.stdout
self.gitStream = self.importProcess.stdin
self.gitError = self.importProcess.stderr
if revision:
self.importHeadRevision(revision)
else:
changes = []
if len(self.changesFile) > 0:
output = open(self.changesFile).readlines()
changeSet = set()
for line in output:
changeSet.add(int(line))
for change in changeSet:
changes.append(change)
changes.sort()
else:
# catch "git p4 sync" with no new branches, in a repo that
# does not have any existing p4 branches
if len(args) == 0:
if not self.p4BranchesInGit:
die("No remote p4 branches. Perhaps you never did \"git p4 clone\" in here.")
# The default branch is master, unless --branch is used to
# specify something else. Make sure it exists, or complain
# nicely about how to use --branch.
if not self.detectBranches:
if not branch_exists(self.branch):
if branch_arg_given:
die("Error: branch %s does not exist." % self.branch)
else:
die("Error: no branch %s; perhaps specify one with --branch." %
self.branch)
if self.verbose:
print "Getting p4 changes for %s...%s" % (', '.join(self.depotPaths),
self.changeRange)
changes = p4ChangesForPaths(self.depotPaths, self.changeRange)
if len(self.maxChanges) > 0:
changes = changes[:min(int(self.maxChanges), len(changes))]
if len(changes) == 0:
if not self.silent:
print "No changes to import!"
else:
if not self.silent and not self.detectBranches:
print "Import destination: %s" % self.branch
self.updatedBranches = set()
if not self.detectBranches:
if args:
# start a new branch
self.initialParent = ""
else:
# build on a previous revision
self.initialParent = parseRevision(self.branch)
self.importChanges(changes)
if not self.silent:
print ""
if len(self.updatedBranches) > 0:
sys.stdout.write("Updated branches: ")
for b in self.updatedBranches:
sys.stdout.write("%s " % b)
sys.stdout.write("\n")
if gitConfigBool("git-p4.importLabels"):
self.importLabels = True
if self.importLabels:
p4Labels = getP4Labels(self.depotPaths)
gitTags = getGitTags()
missingP4Labels = p4Labels - gitTags
self.importP4Labels(self.gitStream, missingP4Labels)
self.gitStream.close()
if self.importProcess.wait() != 0:
die("fast-import failed: %s" % self.gitError.read())
self.gitOutput.close()
self.gitError.close()
# Cleanup temporary branches created during import
if self.tempBranches != []:
for branch in self.tempBranches:
read_pipe("git update-ref -d %s" % branch)
os.rmdir(os.path.join(os.environ.get("GIT_DIR", ".git"), self.tempBranchLocation))
# Create a symbolic ref p4/HEAD pointing to p4/<branch> to allow
# a convenient shortcut refname "p4".
if self.importIntoRemotes:
head_ref = self.refPrefix + "HEAD"
if not gitBranchExists(head_ref) and gitBranchExists(self.branch):
system(["git", "symbolic-ref", head_ref, self.branch])
return True
class P4Rebase(Command):
def __init__(self):
Command.__init__(self)
self.options = [
optparse.make_option("--import-labels", dest="importLabels", action="store_true"),
]
self.importLabels = False
self.description = ("Fetches the latest revision from perforce and "
+ "rebases the current work (branch) against it")
def run(self, args):
sync = P4Sync()
sync.importLabels = self.importLabels
sync.run([])
return self.rebase()
def rebase(self):
if os.system("git update-index --refresh") != 0:
die("Some files in your working directory are modified and different than what is in your index. You can use git update-index <filename> to bring the index up-to-date or stash away all your changes with git stash.");
if len(read_pipe("git diff-index HEAD --")) > 0:
die("You have uncommitted changes. Please commit them before rebasing or stash them away with git stash.");
[upstream, settings] = findUpstreamBranchPoint()
if len(upstream) == 0:
die("Cannot find upstream branchpoint for rebase")
# the branchpoint may be p4/foo~3, so strip off the parent
upstream = re.sub("~[0-9]+$", "", upstream)
print "Rebasing the current branch onto %s" % upstream
oldHead = read_pipe("git rev-parse HEAD").strip()
system("git rebase %s" % upstream)
system("git diff-tree --stat --summary -M %s HEAD --" % oldHead)
return True
class P4Clone(P4Sync):
def __init__(self):
P4Sync.__init__(self)
self.description = "Creates a new git repository and imports from Perforce into it"
self.usage = "usage: %prog [options] //depot/path[@revRange]"
self.options += [
optparse.make_option("--destination", dest="cloneDestination",
action='store', default=None,
help="where to leave result of the clone"),
optparse.make_option("-/", dest="cloneExclude",
action="append", type="string",
help="exclude depot path"),
optparse.make_option("--bare", dest="cloneBare",
action="store_true", default=False),
]
self.cloneDestination = None
self.needsGit = False
self.cloneBare = False
# This is required for the "append" cloneExclude action
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
def defaultDestination(self, args):
## TODO: use common prefix of args?
depotPath = args[0]
depotDir = re.sub("(@[^@]*)$", "", depotPath)
depotDir = re.sub("(#[^#]*)$", "", depotDir)
depotDir = re.sub(r"\.\.\.$", "", depotDir)
depotDir = re.sub(r"/$", "", depotDir)
return os.path.split(depotDir)[1]
def run(self, args):
if len(args) < 1:
return False
if self.keepRepoPath and not self.cloneDestination:
sys.stderr.write("Must specify destination for --keep-path\n")
sys.exit(1)
depotPaths = args
if not self.cloneDestination and len(depotPaths) > 1:
self.cloneDestination = depotPaths[-1]
depotPaths = depotPaths[:-1]
self.cloneExclude = ["/"+p for p in self.cloneExclude]
for p in depotPaths:
if not p.startswith("//"):
sys.stderr.write('Depot paths must start with "//": %s\n' % p)
return False
if not self.cloneDestination:
self.cloneDestination = self.defaultDestination(args)
print "Importing from %s into %s" % (', '.join(depotPaths), self.cloneDestination)
if not os.path.exists(self.cloneDestination):
os.makedirs(self.cloneDestination)
chdir(self.cloneDestination)
init_cmd = [ "git", "init" ]
if self.cloneBare:
init_cmd.append("--bare")
retcode = subprocess.call(init_cmd)
if retcode:
raise CalledProcessError(retcode, init_cmd)
if not P4Sync.run(self, depotPaths):
return False
# create a master branch and check out a work tree
if gitBranchExists(self.branch):
system([ "git", "branch", "master", self.branch ])
if not self.cloneBare:
system([ "git", "checkout", "-f" ])
else:
print 'Not checking out any branch, use ' \
'"git checkout -q -b master <branch>"'
# auto-set this variable if invoked with --use-client-spec
if self.useClientSpec_from_options:
system("git config --bool git-p4.useclientspec true")
return True
class P4Branches(Command):
def __init__(self):
Command.__init__(self)
self.options = [ ]
self.description = ("Shows the git branches that hold imports and their "
+ "corresponding perforce depot paths")
self.verbose = False
def run(self, args):
if originP4BranchesExist():
createOrUpdateBranchesFromOrigin()
cmdline = "git rev-parse --symbolic "
cmdline += " --remotes"
for line in read_pipe_lines(cmdline):
line = line.strip()
if not line.startswith('p4/') or line == "p4/HEAD":
continue
branch = line
log = extractLogMessageFromGitCommit("refs/remotes/%s" % branch)
settings = extractSettingsGitLog(log)
print "%s <= %s (%s)" % (branch, ",".join(settings["depot-paths"]), settings["change"])
return True
class HelpFormatter(optparse.IndentedHelpFormatter):
def __init__(self):
optparse.IndentedHelpFormatter.__init__(self)
def format_description(self, description):
if description:
return description + "\n"
else:
return ""
def printUsage(commands):
print "usage: %s <command> [options]" % sys.argv[0]
print ""
print "valid commands: %s" % ", ".join(commands)
print ""
print "Try %s <command> --help for command specific help." % sys.argv[0]
print ""
commands = {
"debug" : P4Debug,
"submit" : P4Submit,
"commit" : P4Submit,
"sync" : P4Sync,
"rebase" : P4Rebase,
"clone" : P4Clone,
"rollback" : P4RollBack,
"branches" : P4Branches
}
def main():
if len(sys.argv[1:]) == 0:
printUsage(commands.keys())
sys.exit(2)
cmdName = sys.argv[1]
try:
klass = commands[cmdName]
cmd = klass()
except KeyError:
print "unknown command %s" % cmdName
print ""
printUsage(commands.keys())
sys.exit(2)
options = cmd.options
cmd.gitdir = os.environ.get("GIT_DIR", None)
args = sys.argv[2:]
options.append(optparse.make_option("--verbose", "-v", dest="verbose", action="store_true"))
if cmd.needsGit:
options.append(optparse.make_option("--git-dir", dest="gitdir"))
parser = optparse.OptionParser(cmd.usage.replace("%prog", "%prog " + cmdName),
options,
description = cmd.description,
formatter = HelpFormatter())
(cmd, args) = parser.parse_args(sys.argv[2:], cmd);
global verbose
verbose = cmd.verbose
if cmd.needsGit:
if cmd.gitdir == None:
cmd.gitdir = os.path.abspath(".git")
if not isValidGitDir(cmd.gitdir):
cmd.gitdir = read_pipe("git rev-parse --git-dir").strip()
if os.path.exists(cmd.gitdir):
cdup = read_pipe("git rev-parse --show-cdup").strip()
if len(cdup) > 0:
chdir(cdup);
if not isValidGitDir(cmd.gitdir):
if isValidGitDir(cmd.gitdir + "/.git"):
cmd.gitdir += "/.git"
else:
die("fatal: cannot locate git repository at %s" % cmd.gitdir)
os.environ["GIT_DIR"] = cmd.gitdir
if not cmd.run(args):
parser.print_help()
sys.exit(2)
if __name__ == '__main__':
main()
| [] | [] | [
"HOME",
"P4DIFF",
"P4EDITOR",
"USERPROFILE",
"PWD",
"GIT_DIR"
] | [] | ["HOME", "P4DIFF", "P4EDITOR", "USERPROFILE", "PWD", "GIT_DIR"] | python | 6 | 0 | |
main.go | package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
yaml "gopkg.in/yaml.v2"
)
type metricConfiguration struct {
Name string `yaml:"name"`
Help string `yaml:"help"`
JQL string `yaml:"jql"`
Interval string `yaml:"interval"`
Labels map[string]string `yaml:"labels"`
ParsedInterval time.Duration
Gauge prometheus.Gauge
}
type configuration struct {
BaseURL string `yaml:"baseURL"`
Login string `yaml:"login"`
Password string `yaml:"-"`
Metrics []metricConfiguration `yaml:"metrics"`
HTTPHeaders map[string]string `yaml:"httpHeaders"`
}
func loadConfiguration(path string) (*configuration, error) {
var data []byte
var err error
if path == "-" {
data, err = ioutil.ReadAll(os.Stdin)
} else {
data, err = ioutil.ReadFile(path)
}
if err != nil {
return nil, errors.Wrapf(err, "failed to read %s", path)
}
cfg := &configuration{}
if err := yaml.Unmarshal(data, cfg); err != nil {
return nil, errors.Wrap(err, "failed to parse config data")
}
for i := 0; i < len(cfg.Metrics); i++ {
// Set a default value of 5 minutes if none has been specified.
if cfg.Metrics[i].Interval == "" {
cfg.Metrics[i].Interval = "5m"
}
dur, err := time.ParseDuration(cfg.Metrics[i].Interval)
if err != nil {
return nil, errors.Wrapf(err, "invalid interval for metric %d", i)
}
cfg.Metrics[i].ParsedInterval = dur
}
return cfg, nil
}
type pagedResponse struct {
Total uint64 `json:"total"`
}
func addHeaders(r *http.Request, headers map[string]string) {
for k, v := range headers {
r.Header.Set(k, v)
}
}
func check(ctx context.Context, log *logrus.Logger, cfg *configuration, wg *sync.WaitGroup) {
for idx, m := range cfg.Metrics {
go func(idx int, m metricConfiguration) {
timer := time.NewTicker(m.ParsedInterval)
params := url.Values{}
params.Set("jql", m.JQL)
params.Set("maxResults", "0")
u := fmt.Sprintf("%s/rest/api/2/search?%s", cfg.BaseURL, params.Encode())
client := http.Client{}
defer timer.Stop()
loop:
for {
var resp *http.Response
pr := pagedResponse{}
log.Debugf("Checking %s", m.Name)
r, err := http.NewRequest(http.MethodGet, u, nil)
addHeaders(r, cfg.HTTPHeaders)
if err != nil {
log.WithError(err).Errorf("Failed to create HTTP request with URL = %s", u)
goto next
}
r.SetBasicAuth(cfg.Login, cfg.Password)
resp, err = client.Do(r)
if err != nil {
log.WithError(err).WithField("url", u).Errorf("Failed to execute HTTP request")
goto next
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
log.WithField("url", u).Errorf("HTTP response had status %d instead of 200", resp.StatusCode)
goto next
}
if err := json.NewDecoder(resp.Body).Decode(&pr); err != nil {
resp.Body.Close()
log.WithError(err).WithField("url", u).Errorf("Failed to parse HTTP response")
goto next
}
resp.Body.Close()
cfg.Metrics[idx].Gauge.Set(float64(pr.Total))
log.Debugf("Completed %s: %v", m.Name, pr.Total)
next:
select {
case <-timer.C:
case <-ctx.Done():
break loop
}
}
log.Infof("Stopping worker for %s", m.Name)
defer wg.Done()
}(idx, m)
}
}
func setupGauges(metrics []metricConfiguration) error {
for i := 0; i < len(metrics); i++ {
metrics[i].Gauge = prometheus.NewGauge(prometheus.GaugeOpts{
Name: fmt.Sprintf("jira_%s", metrics[i].Name),
ConstLabels: metrics[i].Labels,
Help: metrics[i].Help,
})
if err := prometheus.Register(metrics[i].Gauge); err != nil {
return err
}
}
return nil
}
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
log := logrus.New()
var configFile string
var addr string
var verbose bool
pflag.StringVar(&configFile, "config", "", "Path to a configuration file")
pflag.StringVar(&addr, "http-addr", "127.0.0.1:9300", "Address the HTTP server should be listening on")
pflag.BoolVar(&verbose, "verbose", false, "Verbose logging")
pflag.Parse()
if verbose {
log.SetLevel(logrus.DebugLevel)
} else {
log.SetLevel(logrus.InfoLevel)
}
if configFile == "" {
log.Fatal("Please specify a config file using --config CONFIG_FILE")
}
cfg, err := loadConfiguration(configFile)
if err != nil {
log.WithError(err).Fatalf("Failed to load config from %s", configFile)
}
cfg.Password = os.Getenv("JIRA_PASSWORD")
if cfg.Password == "" {
log.Fatal("Please specify a JIRA_PASSWORD via environment variable")
}
if err := setupGauges(cfg.Metrics); err != nil {
log.WithError(err).Fatal("Failed to setup gauges")
}
sigChan := make(chan os.Signal)
signal.Notify(sigChan, syscall.SIGINT)
httpServer := http.Server{}
wg := sync.WaitGroup{}
wg.Add(len(cfg.Metrics) + 2)
go func() {
<-sigChan
log.Info("Shutting down...")
httpServer.Close()
cancel()
defer wg.Done()
}()
go check(ctx, log, cfg, &wg)
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
httpServer.Handler = mux
httpServer.Addr = addr
go func() {
defer wg.Done()
log.Infof("Starting server on %s", addr)
if err := httpServer.ListenAndServe(); err != nil {
cancel()
log.WithError(err).Error("Server stopped")
}
}()
wg.Wait()
}
| [
"\"JIRA_PASSWORD\""
] | [] | [
"JIRA_PASSWORD"
] | [] | ["JIRA_PASSWORD"] | go | 1 | 0 | |
SIFID/sifid_score.py | #!/usr/bin/env python3
"""Calculates ***Single Image*** Frechet Inception Distance (SIFID) to evalulate Single-Image-GANs
Code was adapted from:
https://github.com/mseitzer/pytorch-fid.git
Which was adapted from the TensorFlow implementation of:
https://github.com/bioinf-jku/TTUR
The FID metric calculates the distance between two distributions of images.
The SIFID calculates the distance between the distribution of deep features of a single real image and a single fake image.
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
#from scipy.misc import imread
from matplotlib.pyplot import imread
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from inception import InceptionV3
import torchvision
import numpy
import scipy
import pickle
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--path2real', type=str, help=('Path to the real images'))
parser.add_argument('--path2fake', type=str, help=('Path to generated images'))
parser.add_argument('-c', '--gpu', default='', type=str, help='GPU to use (leave blank for CPU only)')
parser.add_argument('--images_suffix', default='jpg', type=str, help='image file suffix')
def get_activations(files, model, batch_size=1, dims=64,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if len(files) % batch_size != 0:
print(('Warning: number of images is not a multiple of the '
'batch size. Some samples are going to be ignored.'))
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
n_batches = len(files) // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, dims))
for i in tqdm(range(n_batches)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches),
end='', flush=True)
start = i * batch_size
end = start + batch_size
images = np.array([imread(str(f)).astype(np.float32)
for f in files[start:end]])
images = images[:,:,:,0:3]
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
#images = images[0,:,:,:]
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
#if pred.shape[2] != 1 or pred.shape[3] != 1:
# pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr = pred.cpu().data.numpy().transpose(0, 2, 3, 1).reshape(batch_size*pred.shape[2]*pred.shape[3],-1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=1,
dims=64, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the inception model.
-- sigma : The covariance matrix of the activations of the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(files, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg'))+ list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
def calculate_sifid_given_paths(path1, path2, batch_size, cuda, dims, suffix):
"""Calculates the SIFID of two paths"""
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
path1 = pathlib.Path(path1)
files1 = list(path1.glob('*.%s' %suffix))
path2 = pathlib.Path(path2)
files2 = list(path2.glob('*.%s' %suffix))
print(path1)
print(files1)
fid_values = []
Im_ind = []
for i in range(len(files2)):
m1, s1 = calculate_activation_statistics([files1[i]], model, batch_size, dims, cuda)
m2, s2 = calculate_activation_statistics([files2[i]], model, batch_size, dims, cuda)
fid_values.append(calculate_frechet_distance(m1, s1, m2, s2))
file_num1 = files1[i].name
file_num2 = files2[i].name
Im_ind.append(int(file_num1[:-4]))
Im_ind.append(int(file_num2[:-4]))
return fid_values
if __name__ == '__main__':
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
path1 = args.path2real
path2 = args.path2fake
suffix = args.images_suffix
sifid_values = calculate_sifid_given_paths(path1,path2,1,args.gpu!='',64,suffix)
sifid_values = np.asarray(sifid_values,dtype=np.float32)
numpy.save('SIFID', sifid_values)
print('SIFID: ', sifid_values.mean())
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
client/foundries.go | package client
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/sirupsen/logrus"
tuf "github.com/theupdateframework/notary/tuf/data"
)
type Config struct {
Factory string
Token string
ClientCredentials OAuthConfig
}
type Api struct {
serverUrl string
config Config
client http.Client
}
type NetInfo struct {
Hostname string `json:"hostname"`
Ipv4 string `json:"local_ipv4"`
MAC string `json:"mac"`
}
type Update struct {
CorrelationId string `json:"correlation-id"`
Target string `json:"target"`
Version string `json:"version"`
Time string `json:"time"`
}
type UpdateList struct {
Updates []Update `json:"updates"`
Total int `json:"total"`
Next *string `json:"next"`
}
type EventType struct {
Id string `json:"id"`
}
type EventDetail struct {
Version string `json:"version"`
TargetName string `json:"targetName"`
Success *bool `json:"success,omitempty"`
}
type UpdateEvent struct {
Time string `json:"deviceTime"`
Type EventType `json:"eventType"`
Detail EventDetail `json:"event"`
}
type Device struct {
Uuid string `json:"uuid"`
Name string `json:"name"`
Owner string `json:"owner"`
Factory string `json:"factory"`
CreatedAt string `json:"created-at"`
LastSeen string `json:"last-seen"`
OstreeHash string `json:"ostree-hash"`
DockerApps []string `json:"docker-apps,omitempty"`
Tags []string `json:"device-tags,omitempty"`
Network *NetInfo `json:"network-info,omitempty"`
Hardware *json.RawMessage `json:"hardware-info,omitempty"`
TargetName string `json:"target-name"`
Status string `json:"status"`
CurrentUpdate string `json:"current-update"`
UpToDate bool `json:"up-to-date"`
PublicKey string `json:"public-key"`
}
type DeviceList struct {
Devices []Device `json:"devices"`
Total int `json:"total"`
Next *string `json:"next"`
}
type DockerApp struct {
FileName string `json:"filename"`
Uri string `json:"uri"`
}
type FactoryUser struct {
PolisId string `json:"polis-id"`
Name string `json:"name"`
Role string `json:"role"`
}
type ProjectSecret struct {
Name string `json:"name"`
Value *string `json:"value"`
}
type ProjectTrigger struct {
Type string `json:"type"`
Id int `json:"id,omitempty"`
Secrets []ProjectSecret `json:"secrets"`
}
type TufCustom struct {
HardwareIds []string `json:"hardwareIds,omitempty"`
Tags []string `json:"tags,omitempty"`
TargetFormat string `json:"targetFormat,omitempty"`
Version string `json:"version,omitempty"`
DockerApps map[string]DockerApp `json:"docker_apps,omitempty"`
Name string `json:"name,omitempty"`
}
func NewApiClient(serverUrl string, config Config, caCertPath string) *Api {
var tlsConfig *tls.Config
if len(caCertPath) > 0 {
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
certs, err := ioutil.ReadFile(caCertPath)
if err != nil {
logrus.Fatalf("Failed to append %q to RootCAs: %v", caCertPath, err)
}
if ok := rootCAs.AppendCertsFromPEM(certs); !ok {
logrus.Warning("No certs appended, using system certs only")
}
tlsConfig = &tls.Config{
RootCAs: rootCAs,
}
}
api := Api{
serverUrl: strings.TrimRight(serverUrl, "/"),
config: config,
client: http.Client{
Timeout: time.Second * 10,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
},
}
return &api
}
func (a *Api) setReqHeaders(req *http.Request, jsonContent bool) {
req.Header.Set("User-Agent", "fioctl")
if len(a.config.Token) > 0 {
logrus.Debug("Using API token for http request")
headerName := os.Getenv("TOKEN_HEADER")
if len(headerName) == 0 {
headerName = "OSF-TOKEN"
}
req.Header.Set(headerName, a.config.Token)
}
if len(a.config.ClientCredentials.AccessToken) > 0 {
logrus.Debug("Using oauth token for http request")
tok := base64.StdEncoding.EncodeToString([]byte(a.config.ClientCredentials.AccessToken))
req.Header.Set("Authorization", "Bearer "+tok)
}
if jsonContent {
req.Header.Set("Content-Type", "application/json")
}
}
func (a *Api) RawGet(url string, headers *map[string]string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
a.setReqHeaders(req, false)
if headers != nil {
for key, val := range *headers {
req.Header.Set(key, val)
}
}
return a.client.Do(req)
}
func (a *Api) Get(url string) (*[]byte, error) {
res, err := a.RawGet(url, nil)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, fmt.Errorf("Unable to get '%s': HTTP_%d\n=%s", url, res.StatusCode, body)
}
return &body, nil
}
func (a *Api) Patch(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodPatch, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
res, err := a.client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 202 && res.StatusCode != 200 {
return nil, fmt.Errorf("Unable to PATCH '%s': HTTP_%d\n=%s", url, res.StatusCode, body)
}
return &body, nil
}
func (a *Api) Post(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
res, err := a.client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 201 {
return nil, fmt.Errorf("Unable to POST '%s': HTTP_%d\n=%s", url, res.StatusCode, body)
}
return &body, nil
}
func (a *Api) Put(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
res, err := a.client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 202 {
return nil, fmt.Errorf("Unable to PUT '%s': HTTP_%d\n=%s", url, res.StatusCode, body)
}
return &body, nil
}
func (a *Api) Delete(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
res, err := a.client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 202 && res.StatusCode != 200 {
return nil, fmt.Errorf("Unable to DELETE '%s': HTTP_%d\n=%s", url, res.StatusCode, body)
}
return &body, nil
}
func (a *Api) DeviceGet(device string) (*Device, error) {
body, err := a.Get(a.serverUrl + "/ota/devices/" + device + "/")
if err != nil {
return nil, err
}
d := Device{}
err = json.Unmarshal(*body, &d)
if err != nil {
return nil, err
}
return &d, nil
}
func (a *Api) DeviceList(shared bool, matchTag, byFactory, nameIlike string) (*DeviceList, error) {
sharedInt := 0
if shared {
sharedInt = 1
}
url := a.serverUrl + "/ota/devices/?"
url += fmt.Sprintf("shared=%d&match_tag=%s&name_ilike=%s&factory=%s", sharedInt, matchTag, nameIlike, byFactory)
logrus.Debugf("DeviceList with url: %s", url)
return a.DeviceListCont(url)
}
func (a *Api) DeviceListCont(url string) (*DeviceList, error) {
body, err := a.Get(url)
if err != nil {
return nil, err
}
devices := DeviceList{}
err = json.Unmarshal(*body, &devices)
if err != nil {
return nil, err
}
return &devices, nil
}
func (a *Api) DeviceDelete(device string) error {
bytes := []byte{}
_, err := a.Delete(a.serverUrl+"/ota/devices/"+device+"/", bytes)
return err
}
func (a *Api) DeviceListUpdates(device string) (*UpdateList, error) {
return a.DeviceListUpdatesCont(a.serverUrl + "/ota/devices/" + device + "/updates/")
}
func (a *Api) DeviceListUpdatesCont(url string) (*UpdateList, error) {
body, err := a.Get(url)
if err != nil {
return nil, err
}
updates := UpdateList{}
err = json.Unmarshal(*body, &updates)
if err != nil {
return nil, err
}
return &updates, nil
}
func (a *Api) DeviceUpdateEvents(device, correlationId string) ([]UpdateEvent, error) {
var events []UpdateEvent
body, err := a.Get(a.serverUrl + "/ota/devices/" + device + "/updates/" + correlationId + "/")
if err != nil {
return nil, err
}
err = json.Unmarshal(*body, &events)
if err != nil {
return events, err
}
return events, nil
}
func (a *Api) TargetsListRaw(factory string) (*[]byte, error) {
url := a.serverUrl + "/ota/repo/" + factory + "/api/v1/user_repo/targets.json"
return a.Get(url)
}
func (a *Api) TargetsList(factory string) (*tuf.SignedTargets, error) {
body, err := a.TargetsListRaw(factory)
if err != nil {
return nil, err
}
targets := tuf.SignedTargets{}
err = json.Unmarshal(*body, &targets)
if err != nil {
return nil, err
}
return &targets, nil
}
func (a *Api) TargetCustom(target tuf.FileMeta) (*TufCustom, error) {
custom := TufCustom{}
err := json.Unmarshal(*target.Custom, &custom)
if err != nil {
return nil, err
}
return &custom, nil
}
func (a *Api) TargetsPut(factory string, data []byte) (string, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/"
resp, err := a.Put(url, data)
if err != nil {
return "", err
}
type PutResp struct {
JobServUrl string `json:"jobserv-url"`
}
pr := PutResp{}
if err := json.Unmarshal(*resp, &pr); err != nil {
return "", err
}
return pr.JobServUrl + "runs/UpdateTargets/console.log", nil
}
func (a *Api) TargetUpdateTags(factory string, target_names []string, tag_names []string) (string, error) {
type EmptyTarget struct {
Custom TufCustom `json:"custom"`
}
tags := EmptyTarget{TufCustom{Tags: tag_names}}
type Update struct {
Targets map[string]EmptyTarget `json:"targets"`
}
update := Update{map[string]EmptyTarget{}}
for idx := range target_names {
update.Targets[target_names[idx]] = tags
}
data, err := json.Marshal(update)
if err != nil {
return "", err
}
url := a.serverUrl + "/ota/factories/" + factory + "/targets/"
resp, err := a.Patch(url, data)
if err != nil {
return "", err
}
type PatchResp struct {
JobServUrl string `json:"jobserv-url"`
}
pr := PatchResp{}
if err := json.Unmarshal(*resp, &pr); err != nil {
return "", err
}
return pr.JobServUrl + "runs/UpdateTargets/console.log", nil
}
func (a *Api) TargetDeleteTargets(factory string, target_names []string) (string, error) {
type Update struct {
Targets []string `json:"targets"`
}
update := Update{}
update.Targets = target_names
data, err := json.Marshal(update)
if err != nil {
return "", err
}
url := a.serverUrl + "/ota/factories/" + factory + "/targets/"
resp, err := a.Delete(url, data)
if err != nil {
return "", err
}
type PatchResp struct {
JobServUrl string `json:"jobserv-url"`
}
pr := PatchResp{}
if err := json.Unmarshal(*resp, &pr); err != nil {
return "", err
}
return pr.JobServUrl + "runs/UpdateTargets/console.log", nil
}
func (a *Api) JobservTail(url string) {
offset := 0
status := ""
for {
headers := map[string]string{"X-OFFSET": strconv.Itoa(offset)}
resp, err := a.RawGet(url, &headers)
if err != nil {
fmt.Printf("TODO LOG ERROR OR SOMETHING: %s\n", err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Printf("Unable to read body resp: %s", err)
}
if resp.StatusCode != 200 {
fmt.Printf("Unable to get '%s': HTTP_%d\n=%s", url, resp.StatusCode, body)
}
newstatus := resp.Header.Get("X-RUN-STATUS")
if newstatus == "QUEUED" {
if status == "" {
os.Stdout.Write(body)
} else {
os.Stdout.WriteString(".")
}
} else if len(newstatus) == 0 {
body = body[offset:]
os.Stdout.Write(body)
return
} else {
if newstatus != status {
fmt.Printf("\n--- Status change: %s -> %s\n", status, newstatus)
}
os.Stdout.Write(body)
offset += len(body)
}
status = newstatus
time.Sleep(5 * time.Second)
}
}
func (a *Api) FactoryTriggers(factory string) ([]ProjectTrigger, error) {
type Resp struct {
Data []ProjectTrigger `json:"data"`
}
body, err := a.Get(a.serverUrl + "/projects/" + factory + "/lmp/triggers/")
if err != nil {
return nil, err
}
r := Resp{}
err = json.Unmarshal(*body, &r)
return r.Data, err
}
func (a *Api) FactoryUpdateTrigger(factory string, t ProjectTrigger) error {
data, err := json.Marshal(t)
if err != nil {
return err
}
url := a.serverUrl + "/projects/" + factory + "/lmp/triggers/"
if t.Id == 0 {
logrus.Debugf("Creating new trigger")
_, err := a.Post(url, data)
return err
} else {
logrus.Debugf("Patching trigger %d", t.Id)
url += strconv.Itoa(t.Id) + "/"
_, err := a.Patch(url, data)
return err
}
}
func (a *Api) UsersList(factory string) ([]FactoryUser, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/users/"
body, err := a.Get(url)
if err != nil {
return nil, err
}
var users []FactoryUser
err = json.Unmarshal(*body, &users)
if err != nil {
return nil, err
}
return users, nil
}
| [
"\"TOKEN_HEADER\""
] | [] | [
"TOKEN_HEADER"
] | [] | ["TOKEN_HEADER"] | go | 1 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from datetime import datetime, timedelta
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd&q0zfz5j!sm!a4_krfuvp!7nu$r$f(1p$6^1vrwq5p9lx(*lw'
API_URL = os.environ.get('API_URL','http://localhost:8080')
MARVEL_PUBLIC_KEY = os.environ.get('MARVEL_PUBLIC_KEY','57422c4803fb8c8b6625b7f8b6151a6f')
MARVEL_PRIVATE_KEY = os.environ.get('MARVEL_PRIVATE_KEY','90c5754f8927c87bb7e1190fba1092978ed095dc')
# SECURITY WARNING: don't run with debug turned on in production!
ENV_DEBUG = os.environ.get('DEBUG', 'true')
DEBUG = ENV_DEBUG == 'true'
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'drf_yasg',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
HEROKU = bool(os.environ.get('DJANGO_HEROKU', False))
if HEROKU:
# Configure Django App for Heroku.
import django_heroku
django_heroku.settings(locals())
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'es-ES'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# JWT BASIC CONFIGURATION
JWT_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" # Change in production
JWT_ALGORITHM = 'HS512'
JWT_OPTIONS = {'verify_signature': True, 'verify_exp': True,
'verify_nbf': False, 'verify_iat': True, 'verify_aud': False}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=int(os.environ.get("ACCESS_TOKEN_LIFETIME_IN_MINUTES", 60))),
'REFRESH_TOKEN_LIFETIME': timedelta(days=int(os.environ.get("REFRESH_TOKEN_LIFETIME_IN_DAYS", 1))),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100
} | [] | [] | [
"MARVEL_PRIVATE_KEY",
"MARVEL_PUBLIC_KEY",
"ACCESS_TOKEN_LIFETIME_IN_MINUTES",
"DJANGO_HEROKU",
"DEBUG",
"REFRESH_TOKEN_LIFETIME_IN_DAYS",
"API_URL"
] | [] | ["MARVEL_PRIVATE_KEY", "MARVEL_PUBLIC_KEY", "ACCESS_TOKEN_LIFETIME_IN_MINUTES", "DJANGO_HEROKU", "DEBUG", "REFRESH_TOKEN_LIFETIME_IN_DAYS", "API_URL"] | python | 7 | 0 | |
pgxpool/tx_test.go | package pgxpool_test
import (
"context"
"os"
"testing"
"github.com/khaibin/pgx/v4/pgxpool"
"github.com/stretchr/testify/require"
)
func TestTxExec(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testExec(t, tx)
}
func TestTxQuery(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testQuery(t, tx)
}
func TestTxQueryRow(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testQueryRow(t, tx)
}
func TestTxSendBatch(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testSendBatch(t, tx)
}
func TestTxCopyFrom(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testCopyFrom(t, tx)
}
| [
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\""
] | [] | [
"PGX_TEST_DATABASE"
] | [] | ["PGX_TEST_DATABASE"] | go | 1 | 0 | |
integration/src/main/java/integration/client/RestClient.java | package integration.client;
import integration.server.soap.Endpoint;
import org.glassfish.jersey.client.ClientConfig;
import javax.ws.rs.client.Client;
import javax.ws.rs.client.ClientBuilder;
import javax.ws.rs.client.WebTarget;
import javax.ws.rs.core.Response;
import java.io.IOException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class RestClient {
private static String dataApi = "";
private static WebTarget service;
static {
ClientConfig clientConfig = new ClientConfig();
Client client = ClientBuilder.newClient(clientConfig);
service = client.target(getDataApi());
}
public static String getDataApi() {
String env = System.getenv("ENV"); // This has to be configured in Heroku vars
if (dataApi == "") {
Properties props = new Properties();
URL propFileUrl = Endpoint.class.getClassLoader().getResource("project.properties");
try {
props.load(propFileUrl.openStream());
String environmentName = (env == "" | env == null) ? "local" : env;
dataApi = props.getProperty("project.data.api" + "." + environmentName);
} catch (IOException e) {
e.printStackTrace();
dataApi = "";
}
}
dataApi = (dataApi.charAt(dataApi.length() - 1) == '/') ? dataApi + "/" : dataApi;
System.out.println("Data layer REST API URL set to " + dataApi);
return dataApi;
}
public static WebTarget getService() {
return service;
}
public static Response get(String path, String mediaType, Map<String, Object> params) {
WebTarget target = service.path(path);
for (Map.Entry<String, Object> entry : params.entrySet()) {
target = target.queryParam(entry.getKey(), entry.getValue());
}
Response res = target.request(mediaType).accept(mediaType).get();
res.bufferEntity(); // To use readEntity multiple times
return res;
}
public static Response get(String path, String mediaType) {
return get(path, mediaType, new HashMap<String, Object>());
}
}
| [
"\"ENV\""
] | [] | [
"ENV"
] | [] | ["ENV"] | java | 1 | 0 | |
aws-inventory/lambda/inventory-cloudfront.py | import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import datetime
from dateutil import tz
from antiope.aws_account import *
from common import *
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
RESOURCE_PATH = "cloudfront/distribution"
RESOURCE_TYPE = "AWS::CloudFront::Distribution"
def lambda_handler(event, context):
set_debug(event, logger)
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
# Cloudfront is a global service
cf_client = target_account.get_client('cloudfront')
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = RESOURCE_TYPE
resource_item['source'] = "Antiope"
distributions = list_distributions(cf_client, target_account)
logger.debug(f"Found {len(distributions)} distributions for account {target_account.account_name}({target_account.account_id}")
for distribution in distributions:
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['configuration'] = distribution
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = distribution['Id']
resource_item['resourceName'] = distribution['DomainName']
resource_item['ARN'] = distribution['ARN']
resource_item['errors'] = {}
save_resource_to_s3(RESOURCE_PATH, distribution['Id'], resource_item)
except AntiopeAssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e))
capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e))
raise
except Exception as e:
logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e))
raise
def list_distributions(cf_client, target_account):
distributions = []
response = cf_client.list_distributions()
while 'NextMarker' in response['DistributionList']:
for i in response['DistributionList']['Items']:
distributions.append(i)
response = cf_client.list_distributions(Marker=response['DistributionList']['NextMarker'])
if 'Items' not in response['DistributionList']:
return(distributions)
for i in response['DistributionList']['Items']:
distributions.append(i)
return(distributions)
| [] | [] | [
"LOG_LEVEL"
] | [] | ["LOG_LEVEL"] | python | 1 | 0 | |
java/device/iothub-java-client/src/test/java/tests/integration/com/microsoft/azure/iothub/transport/SendMessagesIT.java | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package tests.integration.com.microsoft.azure.iothub.transport;
import com.microsoft.azure.iot.service.exceptions.IotHubException;
import com.microsoft.azure.iot.service.sdk.Device;
import com.microsoft.azure.iot.service.sdk.RegistryManager;
import com.microsoft.azure.iothub.*;
import org.junit.*;
import tests.integration.com.microsoft.azure.iothub.DeviceConnectionString;
import tests.integration.com.microsoft.azure.iothub.EventCallback;
import tests.integration.com.microsoft.azure.iothub.Success;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.Map;
import java.util.UUID;
public class SendMessagesIT
{
private static String iotHubonnectionStringEnvVarName = "IOTHUB_CONNECTION_STRING";
private static String iotHubConnectionString = "";
private static RegistryManager registryManager;
private static Device deviceHttps;
private static Device deviceAmqps;
private static Device deviceMqtt;
// How much to wait until a message makes it to the server, in milliseconds
private Integer sendTimeout = 5000;
@BeforeClass
public static void setUp() throws Exception
{
Map<String, String> env = System.getenv();
for (String envName : env.keySet())
{
if (envName.equals(iotHubonnectionStringEnvVarName))
{
iotHubConnectionString = env.get(envName);
}
}
registryManager = RegistryManager.createFromConnectionString(iotHubConnectionString);
String uuid = UUID.randomUUID().toString();
String deviceIdHttps = "java-device-client-e2e-test-https".concat("-" + uuid);
String deviceIdAmqps = "java-device-client-e2e-test-amqps".concat("-" + uuid);
String deviceIdMqtt = "java-device-client-e2e-test-mqtt".concat("-" + uuid);
deviceHttps = Device.createFromId(deviceIdHttps, null, null);
deviceAmqps = Device.createFromId(deviceIdAmqps, null, null);
deviceMqtt = Device.createFromId(deviceIdMqtt, null, null);
registryManager.addDevice(deviceHttps);
registryManager.addDevice(deviceAmqps);
registryManager.addDevice(deviceMqtt);
}
@AfterClass
public static void TearDown() throws IOException, IotHubException
{
registryManager.removeDevice(deviceHttps.getDeviceId());
registryManager.removeDevice(deviceAmqps.getDeviceId());
registryManager.removeDevice(deviceMqtt.getDeviceId());
}
@Test
public void SendMessagesOverHttps() throws URISyntaxException, IOException
{
String messageString = "Java client e2e test message over Https protocol";
Message msg = new Message(messageString);
DeviceClient client = new DeviceClient(DeviceConnectionString.get(iotHubConnectionString, deviceHttps), IotHubClientProtocol.HTTPS);
client.open();
for (int i = 0; i < 3; ++i)
{
try
{
Success messageSent = new Success();
EventCallback callback = new EventCallback();
client.sendEventAsync(msg, callback, messageSent);
Integer waitDuration = 0;
while(true)
{
Thread.sleep(100);
waitDuration += 100;
if (messageSent.getResult() || waitDuration > sendTimeout)
{
break;
}
}
if (!messageSent.getResult())
{
Assert.fail("Sending message over HTTPS protocol failed");
}
}
catch (Exception e)
{
Assert.fail("Sending message over HTTPS protocol failed");
}
}
client.close();
}
@Test
public void SendMessagesOverAmqps() throws URISyntaxException, IOException, InterruptedException
{
String messageString = "Java client e2e test message over Amqps protocol";
Message msg = new Message(messageString);
DeviceClient client = new DeviceClient(DeviceConnectionString.get(iotHubConnectionString, deviceAmqps), IotHubClientProtocol.AMQPS);
client.open();
for (int i = 0; i < 3; ++i)
{
try
{
Success messageSent = new Success();
EventCallback callback = new EventCallback();
client.sendEventAsync(msg, callback, messageSent);
Integer waitDuration = 0;
while(true)
{
Thread.sleep(100);
if (messageSent.getResult() || waitDuration > sendTimeout)
{
break;
}
}
if (!messageSent.getResult())
{
Assert.fail("Sending message over AMQPS protocol failed");
}
}
catch (Exception e)
{
Assert.fail("Sending message over AMQPS protocol failed");
}
}
client.close();
}
@Test
public void SendMessagesOverMqtt() throws URISyntaxException, IOException
{
String messageString = "Java client e2e test message over Mqtt protocol";
Message msg = new Message(messageString);
DeviceClient client = new DeviceClient(DeviceConnectionString.get(iotHubConnectionString, deviceMqtt), IotHubClientProtocol.MQTT);
client.open();
for (int i = 0; i < 3; ++i)
{
try
{
Success messageSent = new Success();
EventCallback callback = new EventCallback();
client.sendEventAsync(msg, callback, messageSent);
Integer waitDuration = 0;
while(true)
{
Thread.sleep(100);
if (messageSent.getResult() || waitDuration > sendTimeout)
{
break;
}
}
if (!messageSent.getResult())
{
Assert.fail("Sending message over MQTT protocol failed");
}
}
catch (Exception e)
{
Assert.fail("Sending message over MQTT protocol failed");
}
}
client.close();
}
} | [] | [] | [] | [] | [] | java | 0 | 0 | |
database/database.go | package database
import (
"golang-products-api/database/migrations"
"log"
"os"
"time"
"github.com/joho/godotenv"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
var database *gorm.DB
func StartDatabase() {
database = createDatabaseInstance()
configureDatabase()
}
func createDatabaseInstance() *gorm.DB {
godotenv.Load(".env")
databaseHost := os.Getenv("DB_HOST")
databasePort := os.Getenv("DB_PORT")
databaseUser := os.Getenv("DB_USER")
databasePassword := os.Getenv("DB_PASSWORD")
databaseName := os.Getenv("DB_DATABASE")
databaseConnectionUrl :=
"host=" + databaseHost +
" port=" + databasePort +
" user=" + databaseUser +
" dbname=" + databaseName +
" password=" + databasePassword +
" sslmode=disable"
createdDatabase, errorFromDatabaseOpen := gorm.Open(postgres.Open(databaseConnectionUrl), &gorm.Config{})
if errorFromDatabaseOpen != nil {
log.Fatal("Failed to create the database: ", errorFromDatabaseOpen)
}
return createdDatabase
}
func configureDatabase() {
config, errorFromConfig := database.DB()
if errorFromConfig != nil {
log.Fatal("Failed to configure the database: ", errorFromConfig)
}
config.SetMaxIdleConns(10)
config.SetMaxOpenConns(100)
config.SetConnMaxLifetime(time.Hour)
migrations.RunMigrations(database)
}
func GetDatabase() *gorm.DB {
return database
} | [
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"DB_DATABASE\""
] | [] | [
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_DATABASE",
"DB_USER"
] | [] | ["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_DATABASE", "DB_USER"] | go | 5 | 0 | |
truenas/provider_test.go | package truenas
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"log"
"os"
"testing"
)
var testAccProviders map[string]*schema.Provider
var testAccProvider *schema.Provider
const testResourcePrefix = "tf-acc-test"
var testPoolName string
func init() {
testAccProvider = Provider()
testAccProviders = map[string]*schema.Provider{
"truenas": testAccProvider,
}
if v := os.Getenv("TRUENAS_POOL_NAME"); v != "" {
testPoolName = v
}
if testPoolName == "" {
log.Printf("[WARN] Env TRUENAS_POOL_NAME was not specified, using default 'Tank'")
testPoolName = "Tank"
}
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("TRUENAS_API_KEY"); v == "" {
t.Fatal("TRUENAS_API_KEY must be set for acceptance tests")
}
if v := os.Getenv("TRUENAS_BASE_URL"); v == "" {
t.Fatal("TRUENAS_BASE_URL must be set for acceptance tests")
}
}
| [
"\"TRUENAS_POOL_NAME\"",
"\"TRUENAS_API_KEY\"",
"\"TRUENAS_BASE_URL\""
] | [] | [
"TRUENAS_BASE_URL",
"TRUENAS_POOL_NAME",
"TRUENAS_API_KEY"
] | [] | ["TRUENAS_BASE_URL", "TRUENAS_POOL_NAME", "TRUENAS_API_KEY"] | go | 3 | 0 | |
dbinflux/dbinflux.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide some extra functions to work with InfluxDB."""
import argparse
import copy
import csv
import os
import logging
import time
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError
def connect(config: dict) -> InfluxDBClient:
"""Connect to the InfluxDB with given config
:param config: Dictionary (or object with dictionary interface) in format:
{'host': 'localhost', 'port': 8086, 'timeout': 5, 'username': 'username', 'password': 'password',
'database': 'database'}
or in format:
{'INFLUXDB_HOST': 'localhost', 'INFLUXDB_PORT': 8086, 'INFLUXDB_TIMEOUT': 5, 'INFLUXDB_USERNAME': 'username',
'INFLUXDB_PASSWORD': 'password', 'INFLUXDB_DATABASE': 'database'}
"""
host = config['host'] if 'host' in config else \
config['INFLUXDB_HOST'] if 'INFLUXDB_HOST' in config else 'localhost'
port = int(config['port']) if 'port' in config else \
int(config['INFLUXDB_PORT']) if 'INFLUXDB_PORT' in config else 8086
timeout = int(config['timeout']) if 'timeout' in config else \
int(config['INFLUXDB_TIMEOUT']) if 'INFLUXDB_TIMEOUT' in config else 5
username = config['username'] if 'username' in config else config['INFLUXDB_USERNAME']
password = config['password'] if 'password' in config else config['INFLUXDB_PASSWORD']
database = config['database'] if 'database' in config else config['INFLUXDB_DATABASE']
return InfluxDBClient(host=host, port=port, username=username, password=password, database=database,
timeout=timeout)
def dump_measurement_csv(client, measurement, chunk_size=500, logger=None, show_cli_cmd=False):
"""Dump given measurement to csv file"""
if not logger:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
query = "SELECT * FROM {}".format(measurement)
if show_cli_cmd:
logger.info("0. Stop inserting in measurement '%s'", measurement)
logger.info("1. Execute following command in InfluxDB CLI to get same output faster:")
logger.info(" influx -database '%s' -username '%s' -password '%s' -execute '%s LIMIT 2' -format csv > "
"/tmp/%s.csv", client._database, client._username, client._password, query, measurement)
logger.info("2. Execute 1 once again and check files hashes, to be sure no new data was saved during export")
logger.info(" Also, you may want count points number with 'wc -l /tmp/%s.csv'", measurement)
logger.info("3. Then transform csv file '%s.csv' -> '%s.txt' (line protocol file) with csv2lp() function",
measurement, measurement)
logger.info(" Also, do any data transformation you want, for example, type conversion, etc")
logger.info("4. Drop measurement: DROP MEASUREMENT %s", measurement)
logger.info("5. And last, import data back into InfluxDB:")
logger.info(" influx -username '%s' -password '%s' -import -pps 10000 -path=%s.txt",
client._username, client._password, measurement)
logger.info("6. Check new measurement schema with:")
logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW FIELD KEYS FROM %s'",
client._database, client._username, client._password, measurement)
logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW TAG KEYS FROM %s'",
client._database, client._username, client._password, measurement)
logger.info(" influx -database '%s' -username '%s' -password '%s' "
"-execute 'SHOW TAG VALUES FROM %s WITH KEY IN (...)'",
client._database, client._username, client._password, measurement)
else:
logger.info("Dumping measurement '%s' started...", measurement)
logger.info("Start query '%s' with chunk size %d...", query, chunk_size)
t0 = time.time()
res = client.query(query, chunked=True, chunk_size=chunk_size)
t1 = time.time()
tdiff = t1-t0
logger.info('End query. Time: %ds (%.2fm)', tdiff, tdiff/60)
# @todo finish function (actually dump data)
def csv2lp(csv_filepath, tags_keys=None, database=None, retention_policy=None):
"""Transform given csv file into file protocol file. Run example:
csv2lp('/root/bitfinex_ticker.csv', ['symbol'], 'alfadirect', 'alfadirect')"""
tags_keys = tags_keys if tags_keys else []
path, filename = os.path.split(csv_filepath)
filename_wo_extension = os.path.splitext(os.path.basename(filename))[0]
lp_filepath = path + '/' + filename_wo_extension + '.txt'
with open(csv_filepath) as csvfile:
reader = csv.DictReader(csvfile)
with open(lp_filepath, 'w') as lp:
if database and retention_policy:
lp.write('# DML\n')
lp.write('# CONTEXT-DATABASE: {}\n'.format(database))
lp.write('# CONTEXT-RETENTION-POLICY: {}\n\n'.format(retention_policy))
for row in reader:
tag_set = []
for tag_key in tags_keys:
tag_set.append('{}={}'.format(tag_key, row[tag_key]))
tag_set = ','.join(tag_set)
field_set = []
excludes = ['name', 'time'] + tags_keys
for field_key, field_value in row.items():
if field_key not in excludes:
field_set.append('{}={}'.format(field_key, field_value))
field_set = ','.join(field_set)
name = row['name']
time = row['time']
lp.write('{},{} {} {}\n'.format(name, tag_set, field_set, time))
def batch_write_points(client, points, time_precision=None):
batch_size = 10000
l = len(points)
for i in range(0, l, batch_size):
end = i + batch_size - 1
end = l if end > l else end
client.write_points(points[i:end], time_precision=time_precision)
def move_points(source, dest):
"""This function helps transfer points from one database (and/or measurement) to another one. Here is the demo
script using that function:
import pylibs
source = {
'client': pylibs.connect({
'host': 'influxdb_source',
'username': 'user1',
'password': 'super_secret_password',
'database': 'some_database'
}),
'measurement': 'dockerhub',
'fields': ['field_1', 'field_2', 'another_field'],
'tags': ['tag_1', 'tag_2']
}
dest = pylibs.connect({
'host': 'influxdb_dest',
'username': 'user2',
'password': 'another_super_secret_password',
'database': 'another_database'
})
pylibs.move_points(source, dest)
:param source: Dictionary with source measurement description.
:param dest: Destination client or dictionary with destination measurement description.
:return:
"""
if not isinstance(dest, dict):
dest = {'client': dest}
if 'client' not in source or 'client' not in dest:
print("Source and destinations clients must be passed in!")
exit(1)
if 'measurement' not in source:
print("Source measurement must be passed in!")
exit(2)
elif 'measurement' not in dest:
dest['measurement'] = source['measurement']
res = source['client'].query("SELECT * FROM {}".format(source['measurement']))
points = []
point_template = {
"time": None,
"measurement": dest['measurement'],
"tags": {},
"fields": {},
}
for point in res.get_points():
point_raw = copy.deepcopy(point_template)
point_raw['time'] = point['time']
for meta_key in ['fields', 'tags']:
for key in source[meta_key]:
point_raw[meta_key][key] = point[key]
points.append(point_raw)
batch_write_points(dest['client'], points)
def argparse_add_influxdb_options(parser: argparse.ArgumentParser):
"""Add InfluxDB connection parameters to given parser. Also read environment variables for defaults"""
parser.add_argument('--influxdb-host', metavar='HOST', default=os.environ.get('INFLUXDB_HOST', 'localhost'),
help='InfluxDB host name')
parser.add_argument('--influxdb-port', metavar='PORT', default=os.environ.get('INFLUXDB_PORT', 8086),
help='InfluxDB host port')
parser.add_argument('--influxdb-user', metavar='USER', default=os.environ.get('INFLUXDB_USER', None),
help='InfluxDB user')
parser.add_argument('--influxdb-password', metavar='PASSWORD', default=os.environ.get('INFLUXDB_PASSWORD', None),
help='InfluxDB user password')
parser.add_argument('--influxdb-password-file', metavar='FILE', default=os.environ.get(
'INFLUXDB_PASSWORD_FILE', None), help='Filename contains InfluxDB user password')
parser.add_argument('--influxdb-database', metavar='DATABASE', default=os.environ.get('INFLUXDB_DATABASE', None),
help='InfluxDB database to connect to')
def timestamp_to_influxdb_format(timestamp=time.time()) -> int:
"""Convert given timestamp (number of seconds) to integer of InfluxDB format (number of nanoseconds).
@todo: see __main__ section test: fix them
:param timestamp: Datetime in timestamp format (number of seconds that elapsed since
00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970. Can be string, int or float
:return: Integer that ready to use in influxdb.client.write_points() function without precision parameter
"""
return round(float(timestamp) * 1_000_000_000)
def write_points_with_exception_handling(client, points, time_precision=None, logger=None):
if not logger:
logger = logging.getLogger()
try:
return client.write_points(points, time_precision=time_precision)
except InfluxDBClientError as e:
logger.warning('Nothing saved as InfluxDB client error happens: %s', getattr(e, 'message', repr(e)))
except InfluxDBServerError as e:
logger.warning('Nothing saved as InfluxDB server error happens: %s', getattr(e, 'message', repr(e)))
def get_measurements(client: InfluxDBClient, database='') -> list:
"""Return the list of measurements in given database"""
query = 'SHOW MEASUREMENTS'
query += ' ON "{}"'.format(database) if database else ''
return [_['name'] for _ in client.query(query).get_points()]
def get_series(client: InfluxDBClient, database='', measurement='') -> list:
"""Return the list of series in given database and measurement"""
query = 'SHOW SERIES'
query += ' ON "{}"'.format(database) if database else ''
query += ' FROM "{}"'.format(measurement) if measurement else ''
return [_['key'] for _ in client.query(query).get_points()]
def get_fields_keys(client: InfluxDBClient, database='', measurement='') -> dict:
"""Return the dictionary of field keys, where key is field name and value is field type, for given database and
measurement"""
query = 'SHOW FIELD KEYS'
query += ' ON "{}"'.format(database) if database else ''
query += ' FROM "{}"'.format(measurement) if measurement else ''
return {_['fieldKey']: _['fieldType'] for _ in client.query(query).get_points()}
def get_tag_keys(client: InfluxDBClient, database='', measurement='') -> list:
"""Return the list of tag keys in given database and measurement"""
query = 'SHOW TAG KEYS'
query += ' ON "{}"'.format(database) if database else ''
query += ' FROM "{}"'.format(measurement) if measurement else ''
return [_['tagKey'] for _ in client.query(query).get_points()]
def get_tags(client: InfluxDBClient, database='', measurement='') -> dict:
"""Return the dictionary of tag keys, where key is tag name and value is a list of tag values, for given database
and measurement"""
tags = {}
for tag in get_tag_keys(client, database, measurement):
query = 'SHOW TAG VALUES'
query += ' ON "{}"'.format(database) if database else ''
query += ' FROM "{}"'.format(measurement) if measurement else ''
query += ' WITH KEY = "{}"'.format(tag)
tags[tag] = [_['value'] for _ in client.query(query).get_points()]
return tags
def compare_point_with_db(client: InfluxDBClient, measurement: str, tag_set: dict, ts: int, point: dict) -> dict:
"""Get the point from InfluxDB for given measurement, tag set and timestamp, and compare results from InfluxDB
with given point. Return comparison stats.
@see https://docs.influxdata.com/influxdb/v1.8/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points
"""
query = 'SELECT * FROM "{}" WHERE time = {}'.format(measurement, ts) # query part for measurement and timestamp
for tag_name, v in tag_set.items(): # query part for tag set
query += ' AND "{}" = '.format(tag_name) + ("{}".format(v) if isinstance(v, int) else "'{}'".format(v))
row = [v for v in client.query(query).get_points()]
# result dictionary blank
result = {
'query': query,
'query_results_count': len(row),
'fields_not_in_db': {},
'fields_not_equal': {},
'result': False
}
if result['query_results_count'] != 1:
return result
# Clean found point from time and tag set dictionary keys
row = row[0]
del row['time']
for tag_name, _ in tag_set.items():
del row[tag_name]
# Compare fields from found point and given point
fields_not_in_db = {}
fields_not_equal = {}
for field_name, v in point.items():
if field_name in row:
if row[field_name] == v:
del row[field_name]
else:
fields_not_equal[field_name] = v
else:
fields_not_in_db[field_name] = v
result['fields_not_in_db'] = fields_not_in_db
result['fields_not_equal'] = fields_not_equal
result['result'] = not fields_not_equal and not fields_not_in_db
return result
if __name__ == '__main__':
test = [
{'input': '1526041380.9045842', 'output': 1526041380904584200}
]
for t in test:
# @todo find why test did not pass, transformed = 1526041380904584192 if use round() or int()
transformed = timestamp_to_influxdb_format(t['input'])
is_ok = 'OK' if transformed == t['output'] else 'FAIL'
is_eq = '==' if transformed == t['output'] else '!='
print("{}: {}({}) -> {}({}) {} {}".format(is_ok, t['input'], type(t['input']), transformed, type(transformed), is_eq, t['output']))
| [] | [] | [
"INFLUXDB_DATABASE",
"INFLUXDB_PASSWORD",
"INFLUXDB_HOST",
"INFLUXDB_PORT",
"INFLUXDB_USER",
"INFLUXDB_PASSWORD_FILE"
] | [] | ["INFLUXDB_DATABASE", "INFLUXDB_PASSWORD", "INFLUXDB_HOST", "INFLUXDB_PORT", "INFLUXDB_USER", "INFLUXDB_PASSWORD_FILE"] | python | 6 | 0 | |
netscaler/config_test.go | /*
Copyright 2016 Citrix Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package netscaler
import (
"bytes"
"fmt"
"log"
"math/rand"
"testing"
"time"
"os"
"github.com/dreampuf/go-nitro/config/basic"
"github.com/dreampuf/go-nitro/config/lb"
"github.com/dreampuf/go-nitro/config/network"
"github.com/dreampuf/go-nitro/config/ns"
)
var client *NitroClient
//Used to generate random config object names
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randomString(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func randomIP() string {
return fmt.Sprintf("%d.%d.%d.%d", rand.Intn(125)+1, rand.Intn(252)+1, rand.Intn(252)+1, rand.Intn(252)+1)
}
//init random and client
func init() {
rand.Seed(time.Now().UnixNano())
var err error
client, err = NewNitroClientFromEnv()
if err != nil {
log.Fatal("Could not create a client: ", err)
}
_, ok := os.LookupEnv("NITRO_LOG") //if NITRO_LOG has been set then let the client get it from the environment
if !ok {
client.SetLogLevel("INFO")
}
}
func TestMain(m *testing.M) {
r := m.Run()
client.ClearConfig()
os.Exit(r)
}
// Functional tests
func TestClearConfig(t *testing.T) {
err := client.ClearConfig()
if err != nil {
t.Error("Could not clear config: ", err)
}
}
func TestAdd(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not add Lbvserver: ", err)
t.Log("Not continuing test")
return
}
rsrc, err := client.FindResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Did not find resource of type ", err, Lbvserver.Type(), ":", lbName)
}
val, ok := rsrc["ipv46"]
if ok {
if val != rndIP {
t.Error("Wrong ipv46 for lb ", lbName, ": ", val)
}
val, ok = rsrc["lbmethod"]
if val != "ROUNDROBIN" {
t.Error("Wrong lbmethod for lb ", lbName, ": ", val)
}
val, ok = rsrc["servicetype"]
if val != "HTTP" {
t.Error("Wrong servicetype for lb ", err, lbName, ": ", val)
}
}
if !ok {
t.Error("Non existent property in retrieved lb ", lbName)
}
svcName := randomString(5)
rndIP2 := randomIP()
service1 := basic.Service{
Name: svcName,
Ip: rndIP2,
Port: 80,
Servicetype: "HTTP",
}
client.AddResource(Service.Type(), svcName, &service1)
_, err = client.FindResource(Service.Type(), svcName)
if err != nil {
t.Error("Did not find resource of type ", err, Service.Type(), ":", svcName)
}
}
func TestApply(t *testing.T) {
aclName := "test_acl_" + randomString(5)
acl1 := ns.Nsacl{
Aclname: aclName,
Aclaction: "ALLOW",
Srcip: true,
Srcipval: "192.168.11.10",
Destip: true,
Destipval: "192.183.83.11",
Priority: 1100,
}
_, err := client.AddResource(Nsacl.Type(), aclName, &acl1)
if err != nil {
t.Error("Could not add resource Nsacl", err)
t.Log("Cannot continue")
return
}
acls := ns.Nsacls{}
client.ApplyResource(Nsacls.Type(), &acls)
readAcls, err := client.FindResourceArray(Nsacl.Type(), aclName)
if err != nil {
t.Error("Did not find resource of type ", Nsacl.Type(), err, ":", aclName)
}
if err == nil {
acl2 := readAcls[0]
t.Log("Found acl, kernelstate= ", acl2["kernelstate"])
if acl2["kernelstate"].(string) != "APPLIED" {
t.Error("ACL created but not APPLIED ", Nsacl.Type(), ":", aclName)
}
}
}
func TestUpdate(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not create LB", err)
t.Log("Cannot continue")
return
}
lb1 = lb.Lbvserver{
Name: lbName,
Lbmethod: "LEASTCONNECTION",
}
_, err = client.UpdateResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not update LB")
t.Log("Cannot continue")
return
}
rsrc, err := client.FindResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Did not find resource of type ", Lbvserver.Type(), ":", lbName, err)
t.Log("Cannot continue")
return
}
val, ok := rsrc["lbmethod"]
if ok {
if val != "LEASTCONNECTION" {
t.Error("Did not update lb method to LEASTCONNECTION")
}
}
if !ok {
t.Error("Failed to retrieve lb vserver object")
}
}
func TestBindUnBind(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
rndIP2 := randomIP()
svcName := "test_svc_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not create LB", err)
t.Log("Cannot continue")
return
}
service1 := basic.Service{
Name: svcName,
Ip: rndIP2,
Port: 80,
Servicetype: "HTTP",
}
_, err = client.AddResource(Service.Type(), svcName, &service1)
if err != nil {
t.Error("Could not create service", err)
t.Log("Cannot continue")
return
}
binding := lb.Lbvserverservicebinding{
Name: lbName,
Servicename: svcName,
}
err = client.BindResource(Lbvserver.Type(), lbName, Service.Type(), svcName, &binding)
if err != nil {
t.Error("Could not bind LB to svc", err)
t.Log("Cannot continue")
return
}
exists := client.ResourceBindingExists(Lbvserver.Type(), lbName, Service.Type(), "servicename", svcName)
if !exists {
t.Error("Failed to bind service to lb vserver")
t.Log("Cannot continue")
return
}
err = client.UnbindResource(Lbvserver.Type(), lbName, Service.Type(), svcName, "servicename")
if err != nil {
t.Error("Could not unbind LB to svc", err)
t.Log("Cannot continue")
return
}
exists = client.ResourceBindingExists(Lbvserver.Type(), lbName, Service.Type(), "servicename", svcName)
if exists {
t.Error("Failed to unbind service to lb vserver")
}
}
func TestFindBoundResource(t *testing.T) {
lbName := "test_lb_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName,
Ipv46: randomIP(),
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Failed to add resource of type ", Lbvserver.Type(), ":", "sample_lb_1", err)
t.Log("Cannot continue")
return
}
svcName := "test_svc_" + randomString(5)
service1 := basic.Service{
Name: svcName,
Ip: randomIP(),
Port: 80,
Servicetype: "HTTP",
}
_, err = client.AddResource(Service.Type(), svcName, &service1)
if err != nil {
t.Error("Failed to add resource of type ", Service.Type(), ":", svcName, err)
t.Log("Cannot continue")
return
}
binding := lb.Lbvserverservicebinding{
Name: lbName,
Servicename: svcName,
}
err = client.BindResource(Lbvserver.Type(), lbName, Service.Type(), svcName, &binding)
if err != nil {
t.Error("Failed to bind resource of type ", Service.Type(), ":", svcName)
t.Log("Cannot continue")
return
}
result, err := client.FindBoundResource(Lbvserver.Type(), lbName, Service.Type(), "servicename", svcName)
if err != nil {
t.Error("Failed to find bound resource of type ", Service.Type(), ":", svcName)
t.Log("Cannot continue")
return
}
//t.Log("Found bound resource ", result)
if result["servicename"] != svcName {
t.Error("Failed to find bound resource of type ", Service.Type(), ":", svcName)
}
}
func TestDelete(t *testing.T) {
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not create LB", err)
t.Log("Cannot continue")
return
}
err = client.DeleteResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Could not delete LB", lbName, err)
t.Log("Cannot continue")
return
}
if client.ResourceExists(Lbvserver.Type(), lbName) {
t.Error("Failed to delete ", lbName)
}
}
func TestDeleteWithArgs(t *testing.T) {
monitorName := "test_lb_monitor_" + randomString(5)
lbmonitor := lb.Lbmonitor{
Monitorname: monitorName,
Type: "http",
Retries: 20,
Failureretries: 10,
Downtime: 60,
}
_, err := client.AddResource(Lbmonitor.Type(), monitorName, &lbmonitor)
if err != nil {
t.Error("Could not create monitor", err)
t.Log("Cannot continue")
return
}
args := map[string]string{"type": "http"}
err = client.DeleteResourceWithArgsMap(Lbmonitor.Type(), monitorName, args)
if err != nil {
t.Error("Could not delete monitor", monitorName, err)
t.Log("Cannot continue")
return
}
}
func TestEnableFeatures(t *testing.T) {
features := []string{"SSL", "CS"}
err := client.EnableFeatures(features)
if err != nil {
t.Error("Failed to enable features", err)
t.Log("Cannot continue")
return
}
result, err := client.ListEnabledFeatures()
if err != nil {
t.Error("Failed to retrieve features", err)
t.Log("Cannot continue")
return
}
found := 0
for _, f := range features {
for _, r := range result {
if f == r {
found = found + 1
}
}
}
if found != len(features) {
t.Error("Requested features do not match enabled features=", features, "result=", result)
}
}
func TestEnableModes(t *testing.T) {
modes := []string{"ULFD", "MBF"}
err := client.EnableModes(modes)
if err != nil {
t.Error("Failed to enable modes", err)
t.Log("Cannot continue")
return
}
result, err := client.ListEnabledModes()
if err != nil {
t.Error("Failed to retrieve modes", err)
t.Log("Cannot continue")
return
}
found := 0
for _, m := range modes {
for _, r := range result {
if m == r {
found = found + 1
}
}
}
if found != len(modes) {
t.Error("Requested modes do not match enabled modes=", modes, "result=", result)
}
}
func TestSaveConfig(t *testing.T) {
err := client.SaveConfig()
if err != nil {
t.Error("Failed to save config", err)
}
}
func TestFindAllResources(t *testing.T) {
lbName1 := "test_lb_" + randomString(5)
lbName2 := "test_lb_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName1,
Ipv46: randomIP(),
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
lb2 := lb.Lbvserver{
Name: lbName2,
Ipv46: randomIP(),
Lbmethod: "LEASTCONNECTION",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName1, &lb1)
if err != nil {
t.Error("Failed to add resource of type ", Lbvserver.Type(), ":", lbName1)
t.Log("Cannot continue")
return
}
_, err = client.AddResource(Lbvserver.Type(), lbName2, &lb2)
if err != nil {
t.Error("Failed to add resource of type ", Lbvserver.Type(), ":", lbName2)
t.Log("Cannot continue")
return
}
rsrcs, err := client.FindAllResources(Lbvserver.Type())
if err != nil {
t.Error("Did not find resources of type ", Lbvserver.Type(), err)
}
if len(rsrcs) < 2 {
t.Error("Found only ", len(rsrcs), " resources of type ", Lbvserver.Type(), " expected at least 2")
}
found := 0
for _, v := range rsrcs {
name := v["name"].(string)
if name == lbName1 || name == lbName2 {
found = found + 1
}
}
if found != 2 {
t.Error("Did not find all configured lbvservers")
}
}
func TestFindAllBoundResources(t *testing.T) {
lbName1 := "test_lb_" + randomString(5)
svcName1 := "test_svc_" + randomString(5)
svcName2 := "test_svc_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName1,
Ipv46: randomIP(),
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err := client.AddResource(Lbvserver.Type(), lbName1, &lb1)
if err != nil {
t.Error("Could not create LB")
}
service1 := basic.Service{
Name: svcName1,
Ip: randomIP(),
Port: 80,
Servicetype: "HTTP",
}
service2 := basic.Service{
Name: svcName2,
Ip: randomIP(),
Port: 80,
Servicetype: "HTTP",
}
_, err = client.AddResource(Service.Type(), svcName1, &service1)
if err != nil {
t.Error("Could not create service service1", err)
t.Log("Cannot continue")
return
}
_, err = client.AddResource(Service.Type(), svcName2, &service2)
if err != nil {
t.Error("Could not create service service2", err)
t.Log("Cannot continue")
return
}
binding1 := lb.Lbvserverservicebinding{
Name: lbName1,
Servicename: svcName1,
}
binding2 := lb.Lbvserverservicebinding{
Name: lbName1,
Servicename: svcName2,
}
err = client.BindResource(Lbvserver.Type(), lbName1, Service.Type(), svcName1, &binding1)
if err != nil {
t.Error("Could not bind service service1")
t.Log("Cannot continue")
return
}
err = client.BindResource(Lbvserver.Type(), lbName1, Service.Type(), svcName2, &binding2)
if err != nil {
t.Error("Could not bind service service2")
t.Log("Cannot continue")
return
}
rsrcs, err := client.FindAllBoundResources(Lbvserver.Type(), lbName1, Service.Type())
if err != nil {
t.Error("Did not find bound resources of type ", Service.Type())
}
if len(rsrcs) < 2 {
t.Error("Found only ", len(rsrcs), " resources of type ", Service.Type(), " expected at least 2")
t.Log("Cannot continue")
return
}
found := 0
for _, v := range rsrcs {
name := v["servicename"].(string)
if name == svcName1 || name == svcName2 {
found = found + 1
}
}
if found != 2 {
t.Error("Did not find all bound services")
}
}
func TestAction(t *testing.T) {
svcGrpName := "test_sg_" + randomString(5)
sg1 := basic.Servicegroup{
Servicegroupname: svcGrpName,
Servicetype: "http",
}
_, err := client.AddResource(Servicegroup.Type(), svcGrpName, &sg1)
if err != nil {
t.Error("Could not add resource service group", err)
t.Log("Cannot continue")
return
}
createServer := basic.Server{
Ipaddress: "192.168.1.101",
Name: "test-srvr",
}
_, err = client.AddResource(Server.Type(), "test-server", &createServer)
if err != nil {
t.Error("Could not add resource server", err)
t.Log("Cannot continue")
return
}
bindSvcGrpToServer := basic.Servicegroupservicegroupmemberbinding{
Servicegroupname: svcGrpName,
Servername: "test-srvr",
Port: 22,
}
_, err = client.AddResource(Servicegroup_servicegroupmember_binding.Type(), "test-svcgroup", &bindSvcGrpToServer)
if err != nil {
t.Error("Could not bind resource server", err)
t.Log("Cannot continue")
return
}
bindSvcGrpToServer2 := basic.Servicegroupservicegroupmemberbinding{
Servicegroupname: svcGrpName,
Ip: "192.168.1.102",
Port: 22,
}
_, err = client.AddResource(Servicegroup_servicegroupmember_binding.Type(), "test-svcgroup", &bindSvcGrpToServer2)
if err != nil {
t.Error("Could not bind resource server", err)
t.Log("Cannot continue")
return
}
sg2 := basic.Servicegroup{
Servicegroupname: svcGrpName,
Servername: "test-srvr",
Port: 22,
Delay: 100,
Graceful: "YES",
}
err = client.ActOnResource(Servicegroup.Type(), &sg2, "disable")
if err != nil {
t.Error("Could not disable server", err)
t.Log("Cannot continue")
return
}
sg3 := basic.Servicegroup{
Servicegroupname: svcGrpName,
Servername: "test-srvr",
Port: 22,
}
err = client.ActOnResource(Servicegroup.Type(), &sg3, "enable")
if err != nil {
t.Error("Could not enable server", err)
t.Log("Cannot continue")
return
}
sg4 := basic.Servicegroup{
Servicegroupname: svcGrpName,
Newname: svcGrpName + "-NEW",
}
err = client.ActOnResource(Servicegroup.Type(), &sg4, "rename")
if err != nil {
t.Error("Could not rename servicegroup", err)
t.Log("Cannot continue")
return
}
}
func TestUpdateUnnamedResource(t *testing.T) {
if os.Getenv("ADC_PLATFORM") == "CPX" {
t.Skip("Skipping test not supported by CPX")
}
rnat := network.Rnat{
Natip: "172.17.0.2",
Netmask: "255.255.240.0",
Network: "192.168.16.0",
}
err := client.UpdateUnnamedResource(Rnat.Type(), &rnat)
if err != nil {
t.Error("Could not add Rnat", err)
//t.Log("Cannot continue")
return
}
}
func TestFindFilteredResource(t *testing.T) {
if os.Getenv("ADC_PLATFORM") == "CPX" {
t.Skip("Skipping test not supported by CPX")
}
rnat := network.Rnat{
Natip: "172.17.0.2",
Netmask: "255.255.240.0",
Network: "192.168.16.0",
}
err := client.UpdateUnnamedResource(Rnat.Type(), &rnat)
if err != nil {
t.Error("Could not add Rnat", err)
t.Log("Cannot continue")
return
}
d, err := client.FindFilteredResourceArray(Rnat.Type(), map[string]string{"network": "192.168.16.0", "netmask": "255.255.240.0", "natip": "172.17.0.2"})
if err != nil {
t.Error("Could not find Rnat", err)
t.Log("Cannot continue")
return
}
if len(d) != 1 {
t.Error("Error finding Rnat", fmt.Errorf("Wrong number of RNAT discovered: %d", len(d)))
return
}
rnat2 := d[0]
if rnat2["natip"].(string) == "172.17.0.2" && rnat2["netmask"].(string) == "255.255.240.0" && rnat2["network"].(string) == "192.168.16.0" {
return
} else {
t.Error("Error finding Rnat", fmt.Errorf("Discovered RNAT does not match"))
}
}
// TestDesiredStateServicegroupAPI tests the servicegroup_servicegroupmemberlist_binding API
// which is used to bind multiple IP-only members to servicegroup in single Nitro call
func TestDesiredStateServicegroupAPI(t *testing.T) {
svcGrpName := "test_sg_" + randomString(5)
sg1 := basic.Servicegroup{
Servicegroupname: svcGrpName,
Servicetype: "http",
Autoscale: "API",
}
_, err := client.AddResource(Servicegroup.Type(), svcGrpName, &sg1)
if err != nil {
t.Error("Could not add resource autoscale service group", err)
t.Log("Cannot continue")
return
}
ipmembers := []basic.Member{
{
Ip: "1.1.1.1",
Port: 80,
},
{
Ip: "2.2.2.2",
Port: 80,
},
{
Ip: "3.3.3.3",
Port: 80,
},
}
bindSvcGrpToServer := basic.Servicegroupservicegroupmemberlistbinding{
Servicegroupname: svcGrpName,
Members: ipmembers,
}
_, err = client.AddResource(Servicegroup_servicegroupmemberlist_binding.Type(), "test-svcgroup", &bindSvcGrpToServer)
if err != nil {
t.Error("Could not bind resource server", err)
t.Log("Cannot continue")
return
}
}
func TestNullAction(t *testing.T) {
if os.Getenv("ADC_PLATFORM") == "CPX" {
t.Skip("Skipping test not supported by CPX")
}
reboot := ns.Reboot{
Warm: true,
}
err := client.ActOnResource("reboot", &reboot, "")
if err != nil {
t.Error("Could not make null action reboot", err)
t.Log("Cannot continue")
return
}
// Add a timeout to wait for instance to be back online
time.Sleep(60 * time.Second)
}
// TestTokenBasedAuth tests token-based authentication and tests if session-is is cleared in case of session-expiry
func TestTokenBasedAuth(t *testing.T) {
var err error
err = client.Login()
if err != nil {
t.Error("Login Failed")
return
}
rndIP := randomIP()
lbName := "test_lb_" + randomString(5)
lb1 := lb.Lbvserver{
Name: lbName,
Ipv46: rndIP,
Lbmethod: "ROUNDROBIN",
Servicetype: "HTTP",
Port: 8000,
}
_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
t.Error("Could not add Lbvserver: ", err)
t.Log("Not continuing test")
return
}
rsrc, err := client.FindResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Did not find resource of type ", err, Lbvserver.Type(), ":", lbName)
} else {
t.Log("LB-METHOD: ", rsrc["lbmethod"])
}
err = client.DeleteResource(Lbvserver.Type(), lbName)
if err != nil {
t.Error("Could not delete LB", lbName, err)
t.Log("Cannot continue")
return
}
err = client.Logout()
if err != nil {
t.Error("Logout Failed")
return
}
// Test if session-id is cleared in case of session-expiry
client.timeout = 10
client.Login()
time.Sleep(15 * time.Second)
_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)
if err != nil {
if client.IsLoggedIn() {
t.Error("Sessionid not cleared")
return
}
t.Log("sessionid cleared because of session-expiry")
} else {
t.Error("Adding lbvserver should have failed because of session-expiry")
}
}
func TestConstructQueryString(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
generateTestCase := func(findParams FindParams, expected string) func(t *testing.T) {
return func(t *testing.T) {
output := constructQueryString(&findParams)
if output != expected {
t.Log(buf.String())
t.Logf("Expected output \"%s\"", expected)
t.Logf("Actual output \"%s\"", output)
t.Fail()
}
}
}
var argsMap, filterMap map[string]string
var attrsAry []string
var findParams FindParams
argsMap = make(map[string]string)
argsMap["hello"] = "bye"
findParams = FindParams{
ArgsMap: argsMap,
}
t.Run("CASE=1", generateTestCase(findParams, "?args=hello:bye"))
argsMap["bye"] = "hello"
findParams = FindParams{
ArgsMap: argsMap,
}
t.Run("CASE=2", generateTestCase(findParams, "?args=bye:hello,hello:bye"))
attrsAry = []string{"hello"}
findParams = FindParams{
AttrsAry: attrsAry,
}
t.Run("CASE=3", generateTestCase(findParams, "?attrs=hello"))
attrsAry = append(attrsAry, "bye")
t.Run("CASE=4", generateTestCase(findParams, "?attrs=bye,hello"))
filterMap = make(map[string]string)
filterMap["bye"] = "hello"
findParams = FindParams{
FilterMap: filterMap,
}
t.Run("CASE=5", generateTestCase(findParams, "?filter=bye:hello"))
filterMap["hello"] = "bye"
t.Run("CASE=6", generateTestCase(findParams, "?filter=bye:hello,hello:bye"))
filterMap = make(map[string]string)
attrsAry = []string{}
argsMap = make(map[string]string)
filterMap["bye"] = "hello"
attrsAry = append(attrsAry, "hello")
argsMap["bye"] = "hello"
findParams = FindParams{
FilterMap: filterMap,
ArgsMap: argsMap,
AttrsAry: attrsAry,
}
t.Run("CASE=7", generateTestCase(findParams, "?args=bye:hello&filter=bye:hello&attrs=hello"))
filterMap["hello"] = "bye"
attrsAry = append(attrsAry, "bye")
argsMap["hello"] = "bye"
expected := "?args=bye:hello,hello:bye&filter=bye:hello,hello:bye&attrs=bye,hello"
t.Run("CASE=8", generateTestCase(findParams, expected))
}
func TestConstructUrlPathString(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
generateTestCase := func(findParams FindParams, expected string) func(t *testing.T) {
return func(t *testing.T) {
output := constructUrlPathString(&findParams)
if output != expected {
t.Log(buf.String())
t.Logf("Expected output \"%s\"", expected)
t.Logf("Actual output \"%s\"", output)
t.Fail()
}
}
}
var findParams FindParams
findParams = FindParams{
ResourceType: "resourcetype",
}
t.Run("CASE=1", generateTestCase(findParams, "resourcetype"))
findParams = FindParams{
ResourceName: "resourcename",
}
t.Run("CASE=2", generateTestCase(findParams, "resourcename"))
findParams = FindParams{
ResourceType: "resourcetype",
ResourceName: "resourcename",
}
t.Run("CASE=3", generateTestCase(findParams, "resourcetype/resourcename"))
}
func TestFindResourceArrayWithParams(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
testCase1 := func(t *testing.T) {
findParams := FindParams{
ResourceType: "lbvserver",
ResourceName: "definitelynotexists",
ResourceMissingErrorCode: 258,
}
resource, err := client.FindResourceArrayWithParams(findParams)
hasErrors := false
if err != nil {
hasErrors = true
t.Logf("Error from NITRO request: %s", err.Error())
}
if len(resource) > 0 {
hasErrors = true
t.Logf("Resource array not empty")
}
if hasErrors {
t.Log(buf.String())
t.Fail()
}
}
t.Run("CASE=1", testCase1)
testCase2 := func(t *testing.T) {
argsMap := make(map[string]string)
argsMap["filename"] = "ns.conf"
argsMap["filelocation"] = "%2Fnsconfig"
findParams := FindParams{
ResourceType: "systemfile",
ArgsMap: argsMap,
}
resource, err := client.FindResourceArrayWithParams(findParams)
hasErrors := false
if err != nil {
hasErrors = true
t.Logf("Error from NITRO request: %s", err.Error())
}
if len(resource) != 1 {
hasErrors = true
t.Logf("Resource array not exactly 1")
}
if hasErrors {
t.Log(buf.String())
t.Fail()
}
}
t.Run("CASE=2", testCase2)
testCase3 := func(t *testing.T) {
argsMap := make(map[string]string)
//argsMap["filename"] = "ns.conf"
argsMap["filelocation"] = "%2Fnsconfig"
findParams := FindParams{
ResourceType: "systemfile",
ArgsMap: argsMap,
}
resource, err := client.FindResourceArrayWithParams(findParams)
hasErrors := false
if err != nil {
hasErrors = true
t.Logf("Error from NITRO request: %s", err.Error())
}
if len(resource) <= 1 {
hasErrors = true
t.Logf("Resource array len not > 1")
}
if hasErrors {
t.Log(buf.String())
t.Fail()
}
}
t.Run("CASE=3", testCase3)
testCase4 := func(t *testing.T) {
t.Skipf("TODO: find a resource for which NITRO returns a map instead of an array")
}
t.Run("CASE=4", testCase4)
}
| [
"\"ADC_PLATFORM\"",
"\"ADC_PLATFORM\"",
"\"ADC_PLATFORM\""
] | [] | [
"ADC_PLATFORM"
] | [] | ["ADC_PLATFORM"] | go | 1 | 0 | |
db_tools/import_category_data.py | # encoding: utf-8
__author__ = 'mtianyan'
__date__ = '2018/2/14 0014 03:31'
# 独立使用django的model
import sys
import os
# 获取当前文件的路径,以及路径的父级文件夹名
pwd = os.path.dirname(os.path.realpath(__file__))
# 将项目目录加入setting
sys.path.append(pwd + "../")
# manage.py中
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MxShop_Back.settings")
import django
django.setup()
# 导入这个model必须在初始化django之后,不能放在最上边
from goods.models import GoodsCategory
from db_tools.data.category_data import row_data
# 一级分类
for lev1_cat in row_data:
lev1_intance = GoodsCategory()
lev1_intance.code = lev1_cat["code"]
lev1_intance.name = lev1_cat["name"]
lev1_intance.category_type = 1
lev1_intance.save()
# 该一级分类之下的二级分类
for lev2_cat in lev1_cat["sub_categorys"]:
lev2_intance = GoodsCategory()
lev2_intance.code = lev2_cat["code"]
lev2_intance.name = lev2_cat["name"]
lev2_intance.category_type = 2
lev2_intance.parent_category = lev1_intance
lev2_intance.save()
# 该二级分类之下的三级分类
for lev3_cat in lev2_cat["sub_categorys"]:
lev3_intance = GoodsCategory()
lev3_intance.code = lev3_cat["code"]
lev3_intance.name = lev3_cat["name"]
lev3_intance.category_type = 3
lev3_intance.parent_category = lev2_intance
lev3_intance.save()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
cmd/cataloguesvc/main.go | package main
import (
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"path/filepath"
"syscall"
"github.com/go-kit/kit/log"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
"github.com/sls-microservices-demo/catalogue"
"github.com/weaveworks/common/middleware"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/stdout"
"go.opentelemetry.io/otel/propagation"
export "go.opentelemetry.io/otel/sdk/export/trace"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/semconv"
"golang.org/x/net/context"
)
const (
ServiceName = "catalogue"
)
var (
HTTPLatency = stdprometheus.NewHistogramVec(stdprometheus.HistogramOpts{
Name: "http_request_duration_seconds",
Help: "Time (in seconds) spent serving HTTP requests.",
Buckets: stdprometheus.DefBuckets,
}, []string{"method", "path", "status_code", "isWS"})
ResponseBodySize = stdprometheus.NewHistogramVec(stdprometheus.HistogramOpts{
Name: "http_response_size",
Buckets: stdprometheus.DefBuckets,
}, []string{"method", "path"})
RequestBodySize = stdprometheus.NewHistogramVec(stdprometheus.HistogramOpts{
Name: "http_request_size",
Buckets: stdprometheus.DefBuckets,
}, []string{"method", "path"})
InflightRequests = stdprometheus.NewGaugeVec(stdprometheus.GaugeOpts{
Name: "http_inflight_requests",
}, []string{"method", "path"})
)
// Log domain.
var logger log.Logger
var otlpEndpoint string
func init() {
flag.StringVar(&otlpEndpoint, "otlp-endpoint", os.Getenv("OTLP_ENDPOINT"), "otlp endpoint")
}
func initTracer() {
var traceExporter export.SpanExporter
if otlpEndpoint == "stdout" {
// Create stdout exporter to be able to retrieve
// the collected spans.
exporter, err := stdout.NewExporter()
if err != nil {
panic(err)
}
logger.Log("register stdout exporter", "")
traceExporter = exporter
} else if otlpEndpoint != "" {
// If the OpenTelemetry Collector is running on a local cluster (minikube or
// microk8s), it should be accessible through the NodePort service at the
// `localhost:30080` address. Otherwise, replace `localhost` with the
// address of your cluster. If you run the app inside k8s, then you can
// probably connect directly to the service through dns
exp, err := otlp.NewExporter(context.Background(),
otlp.WithInsecure(),
otlp.WithAddress(otlpEndpoint),
//otlp.WithGRPCDialOption(grpc.WithBlock()), // useful for testing
)
if err != nil {
panic(err)
}
logger.Log("register otlp exporter", otlpEndpoint)
traceExporter = exp
}
if traceExporter == nil {
logger.Log("no opentelemetry exporter", "")
return
}
hostname, _ := os.Hostname()
// For the demonstration, use sdktrace.AlwaysSample sampler to sample all traces.
// In a production application, use sdktrace.ProbabilitySampler with a desired probability.
tp := sdktrace.NewTracerProvider(sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
sdktrace.WithSyncer(traceExporter),
//sdktrace.WithSyncer(&TTE{}),
sdktrace.WithResource(resource.NewWithAttributes(semconv.ServiceNameKey.String("catalogue"), semconv.HostNameKey.String(hostname))))
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
}
func init() {
prometheus.MustRegister(HTTPLatency)
prometheus.MustRegister(ResponseBodySize)
prometheus.MustRegister(RequestBodySize)
prometheus.MustRegister(InflightRequests)
}
func main() {
var (
port = flag.String("port", "80", "Port to bind HTTP listener") // TODO(pb): should be -addr, default ":80"
images = flag.String("images", "./images/", "Image path")
dsn = flag.String("DSN", "catalogue_user:default_password@tcp(catalogue-db:3306)/socksdb", "Data Source Name: [username[:password]@][protocol[(address)]]/dbname")
)
flag.Parse()
fmt.Fprintf(os.Stderr, "images: %q\n", *images)
abs, err := filepath.Abs(*images)
fmt.Fprintf(os.Stderr, "Abs(images): %q (%v)\n", abs, err)
pwd, err := os.Getwd()
fmt.Fprintf(os.Stderr, "Getwd: %q (%v)\n", pwd, err)
files, _ := filepath.Glob(*images + "/*")
fmt.Fprintf(os.Stderr, "ls: %q\n", files) // contains a list of all files in the current directory
// Mechanical stuff.
errc := make(chan error)
ctx := context.Background()
{
logger = log.NewLogfmtLogger(os.Stderr)
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
initTracer()
}
// Data domain.
db, err := sqlx.Open("mysql", *dsn)
if err != nil {
logger.Log("err", err)
os.Exit(1)
}
defer db.Close()
// Check if DB connection can be made, only for logging purposes, should not fail/exit
err = db.Ping()
if err != nil {
logger.Log("Error", "Unable to connect to Database", "DSN", dsn)
}
// Service domain.
var service catalogue.Service
{
service = catalogue.NewCatalogueService(db, logger)
service = catalogue.LoggingMiddleware(logger)(service)
}
// Endpoint domain.
endpoints := catalogue.MakeEndpoints(service)
// HTTP router
router := catalogue.MakeHTTPHandler(ctx, endpoints, *images, logger)
httpMiddleware := []middleware.Interface{
middleware.Instrument{
Duration: HTTPLatency,
RequestBodySize: RequestBodySize,
ResponseBodySize: ResponseBodySize,
InflightRequests: InflightRequests,
RouteMatcher: router,
},
}
// Handler
handler := middleware.Merge(httpMiddleware...).Wrap(router)
// Create and launch the HTTP server.
go func() {
logger.Log("transport", "HTTP", "port", *port)
errc <- http.ListenAndServe(":"+*port, handler)
}()
// Capture interrupts.
go func() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
errc <- fmt.Errorf("%s", <-c)
}()
logger.Log("exit", <-errc)
}
| [
"\"OTLP_ENDPOINT\""
] | [] | [
"OTLP_ENDPOINT"
] | [] | ["OTLP_ENDPOINT"] | go | 1 | 0 | |
testing/vcs/test_vcs_1D_datawc_missing.py |
import vcs, numpy, cdms2, MV2, os, sys, vcs.testing.regression as regression
x = regression.init()
yx = x.createyxvsx()
data = """
-999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999.
0.059503571833625334
0.059503571833625334 0.05664014775641405 0.05193557222118004
0.04777129850801233 0.0407139313814465 0.029382624830271705
0.018469399844287374 0.0162382275289592 0.02646680241827459
0.04792041732949079 0.0689138797030203 0.08167038620212037
0.09273558459066569 0.11266293431057901 0.13663018925347364
0.15229174546388072 0.15284435880966177 0.13423845476113883
0.09945904378274077 0.07032267160267985 0.05551039827020481
0.045537187647785464 0.040532491867244946 0.03577527125478327
-999. -999. -999.
-0.058062458673116 -0.08764922509099882 -0.11697036914487152
-0.14836133615864944 -0.17956528904564023 -0.21109198032585794
-0.23846429237248942 -0.2598536549218765 -0.27795672866320387
-0.2939939095159731 -0.30541031366330024 -0.307643559333884
-0.30078421139811795 -0.2841339526883441 -0.26485737397202497
-0.24287299694779327 -0.22379014890999907 -0.20121548204699846
-0.1746486732156772 -0.14585019344118372 -0.12070675757803526
-0.0997891159111037 -0.08229393660994214 -0.06779720501287469
-0.057213385470859794 -0.04875768191096844 -0.0402377347189964
-0.030169328367807245 -0.017560662894847895 -0.006968922654137132
0.0009773980274431048 0.007054306637034288 0.010472286514133042
0.010702384151997032 0.009231553701801242 0.007544033101056543
0.004639797857203645 -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999.
-999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999. -999.
-999. -999. -999.
""".split()
data = numpy.array(data,dtype=numpy.float)
data = MV2.masked_less(data,-900)
x.plot(data,yx,bg=1)
regression.run(x, "test_vcs_1D_datawc_missing.png")
| [] | [] | [] | [] | [] | python | null | null | null |
cmd/node.go | package cmd
import (
"encoding/hex"
"flag"
"fmt"
"net"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/btcsuite/btcd/addrmgr"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/deso-protocol/core/lib"
"github.com/deso-protocol/core/migrate"
"github.com/deso-protocol/go-deadlock"
"github.com/dgraph-io/badger/v3"
"github.com/go-pg/pg/v10"
"github.com/golang/glog"
migrations "github.com/robinjoseph08/go-pg-migrations/v3"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/profiler"
)
type Node struct {
Server *lib.Server
ChainDB *badger.DB
TXIndex *lib.TXIndex
Params *lib.DeSoParams
Config *Config
Postgres *lib.Postgres
// IsRunning is false when a NewNode is created, set to true on Start(), set to false
// after Stop() is called. Mainly used in testing.
IsRunning bool
// runningMutex is held whenever we call Start() or Stop() on the node.
runningMutex sync.Mutex
// internalExitChan is used internally to signal that a node should close.
internalExitChan chan struct{}
// nodeMessageChan is passed to the core engine and used to trigger node actions such as a restart or database reset.
nodeMessageChan chan lib.NodeMessage
// stopWaitGroup allows us to wait for the node to fully close.
stopWaitGroup sync.WaitGroup
}
func NewNode(config *Config) *Node {
result := Node{}
result.Config = config
result.Params = config.Params
result.internalExitChan = make(chan struct{})
result.nodeMessageChan = make(chan lib.NodeMessage)
return &result
}
// Start is the main function used to kick off the node. The exitChannels are optionally passed by the caller to receive
// signals from the node. In particular, exitChannels will be closed by the node when the node is shutting down for good.
func (node *Node) Start(exitChannels ...*chan struct{}) {
// TODO: Replace glog with logrus so we can also get rid of flag library
flag.Set("log_dir", node.Config.LogDirectory)
flag.Set("v", fmt.Sprintf("%d", node.Config.GlogV))
flag.Set("vmodule", node.Config.GlogVmodule)
flag.Set("alsologtostderr", "true")
flag.Parse()
glog.CopyStandardLogTo("INFO")
node.runningMutex.Lock()
defer node.runningMutex.Unlock()
node.internalExitChan = make(chan struct{})
node.nodeMessageChan = make(chan lib.NodeMessage)
// listenToNodeMessages handles the messages received from the engine through the nodeMessageChan.
go node.listenToNodeMessages()
// Print config
node.Config.Print()
// Check for regtest mode
if node.Config.Regtest {
node.Params.EnableRegtest()
}
// Validate params
validateParams(node.Params)
// This is a bit of a hack, and we should deprecate this. We rely on GlobalDeSoParams static variable in only one
// place in the core code, namely in encoder migrations. Encoder migrations allow us to update the core database
// schema without requiring a resync. GlobalDeSoParams is used so that encoders know if we're on mainnet or testnet.
lib.GlobalDeSoParams = *node.Params
// Setup Datadog span tracer and profiler
if node.Config.DatadogProfiler {
tracer.Start()
err := profiler.Start(profiler.WithProfileTypes(profiler.CPUProfile, profiler.BlockProfile, profiler.MutexProfile, profiler.GoroutineProfile, profiler.HeapProfile))
if err != nil {
glog.Fatal(err)
}
}
if node.Config.TimeEvents {
lib.Mode = lib.EnableTimer
}
// Setup statsd
statsdClient, err := statsd.New(fmt.Sprintf("%s:%d", os.Getenv("DD_AGENT_HOST"), 8125))
if err != nil {
glog.Fatal(err)
}
// Setup listeners and peers
desoAddrMgr := addrmgr.New(node.Config.DataDirectory, net.LookupIP)
desoAddrMgr.Start()
// This just gets localhost listening addresses on the protocol port.
// Such as [{127.0.0.1 18000 } {::1 18000 }], and associated listener structs.
listeningAddrs, listeners := GetAddrsToListenOn(node.Config.ProtocolPort)
_ = listeningAddrs
// If --connect-ips is not passed, we will connect the addresses from
// --add-ips, DNSSeeds, and DNSSeedGenerators.
if len(node.Config.ConnectIPs) == 0 {
glog.Infof("Looking for AddIPs: %v", len(node.Config.AddIPs))
for _, host := range node.Config.AddIPs {
addIPsForHost(desoAddrMgr, host, node.Params)
}
glog.Infof("Looking for DNSSeeds: %v", len(node.Params.DNSSeeds))
for _, host := range node.Params.DNSSeeds {
addIPsForHost(desoAddrMgr, host, node.Params)
}
// This is where we connect to addresses from DNSSeeds.
if !node.Config.PrivateMode {
go addSeedAddrsFromPrefixes(desoAddrMgr, node.Params)
}
}
// Setup chain database
dbDir := lib.GetBadgerDbPath(node.Config.DataDirectory)
opts := lib.PerformanceBadgerOptions(dbDir)
opts.ValueDir = dbDir
node.ChainDB, err = badger.Open(opts)
if err != nil {
panic(err)
}
// Setup snapshot logger
if node.Config.LogDBSummarySnapshots {
lib.StartDBSummarySnapshots(node.ChainDB)
}
// Validate that we weren't passed incompatible Hypersync flags
lib.ValidateHyperSyncFlags(node.Config.HyperSync, node.Config.SyncType)
// Setup postgres using a remote URI. Postgres is not currently supported when we're in hypersync mode.
if node.Config.HyperSync && node.Config.PostgresURI != "" {
glog.Fatal("--postgres-uri is not supported when --hypersync=true. We're " +
"working on Hypersync support for Postgres though!")
}
var db *pg.DB
if node.Config.PostgresURI != "" {
options, err := pg.ParseURL(node.Config.PostgresURI)
if err != nil {
panic(err)
}
db = pg.Connect(options)
node.Postgres = lib.NewPostgres(db)
// LoadMigrations registers all the migration files in the migrate package.
// See LoadMigrations for more info.
migrate.LoadMigrations()
// Migrate the database after loading all the migrations. This is equivalent
// to running "go run migrate.go migrate". See migrate.go for a migrations CLI tool
err = migrations.Run(db, "migrate", []string{"", "migrate"})
if err != nil {
panic(err)
}
}
// Setup eventManager
eventManager := lib.NewEventManager()
// Setup the server. ShouldRestart is used whenever we detect an issue and should restart the node after a recovery
// process, just in case. These issues usually arise when the node was shutdown unexpectedly mid-operation. The node
// performs regular health checks to detect whenever this occurs.
shouldRestart := false
node.Server, err, shouldRestart = lib.NewServer(
node.Params,
listeners,
desoAddrMgr,
node.Config.ConnectIPs,
node.ChainDB,
node.Postgres,
node.Config.TargetOutboundPeers,
node.Config.MaxInboundPeers,
node.Config.MinerPublicKeys,
node.Config.NumMiningThreads,
node.Config.OneInboundPerIp,
node.Config.HyperSync,
node.Config.SyncType,
node.Config.MaxSyncBlockHeight,
node.Config.DisableEncoderMigrations,
node.Config.RateLimitFeerate,
node.Config.MinFeerate,
node.Config.StallTimeoutSeconds,
node.Config.MaxBlockTemplatesCache,
node.Config.MinBlockUpdateInterval,
node.Config.BlockCypherAPIKey,
true,
node.Config.SnapshotBlockHeightPeriod,
node.Config.DataDirectory,
node.Config.MempoolDumpDirectory,
node.Config.DisableNetworking,
node.Config.ReadOnlyMode,
node.Config.IgnoreInboundInvs,
statsdClient,
node.Config.BlockProducerSeed,
node.Config.TrustedBlockProducerPublicKeys,
node.Config.TrustedBlockProducerStartHeight,
eventManager,
node.nodeMessageChan,
)
if err != nil {
if shouldRestart {
glog.Infof(lib.CLog(lib.Red, fmt.Sprintf("Start: Got en error while starting server and shouldRestart "+
"is true. Node will be erased and resynced. Error: (%v)", err)))
node.nodeMessageChan <- lib.NodeErase
return
}
panic(err)
}
if !shouldRestart {
node.Server.Start()
// Setup TXIndex - not compatible with postgres
if node.Config.TXIndex && node.Postgres == nil {
node.TXIndex, err = lib.NewTXIndex(node.Server.GetBlockchain(), node.Params, node.Config.DataDirectory)
if err != nil {
glog.Fatal(err)
}
node.Server.TxIndex = node.TXIndex
if !shouldRestart {
node.TXIndex.Start()
}
}
}
node.IsRunning = true
if shouldRestart {
if node.nodeMessageChan != nil {
node.nodeMessageChan <- lib.NodeRestart
}
}
// Detect whenever an interrupt (Ctrl-c) or termination signals are sent.
syscallChannel := make(chan os.Signal)
signal.Notify(syscallChannel, syscall.SIGINT, syscall.SIGTERM)
go func() {
// If an internalExitChan is triggered then we won't immediately signal a shutdown to the parent context through
// the exitChannels. When internal exit is called, we will just restart the node in the background.
select {
case _, open := <-node.internalExitChan:
if !open {
return
}
case <-syscallChannel:
}
node.Stop()
for _, channel := range exitChannels {
if *channel != nil {
close(*channel)
*channel = nil
}
}
glog.Info(lib.CLog(lib.Yellow, "Core node shutdown complete"))
}()
}
func (node *Node) Stop() {
node.runningMutex.Lock()
defer node.runningMutex.Unlock()
if !node.IsRunning {
return
}
node.IsRunning = false
glog.Infof(lib.CLog(lib.Yellow, "Node is shutting down. This might take a minute. Please don't "+
"close the node now or else you might corrupt the state."))
// Server
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Stopping server..."))
node.Server.Stop()
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Server successfully stopped."))
// Snapshot
snap := node.Server.GetBlockchain().Snapshot()
if snap != nil {
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Stopping snapshot..."))
snap.Stop()
node.closeDb(snap.SnapshotDb, "snapshot")
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Snapshot successfully stopped."))
}
// TXIndex
if node.TXIndex != nil {
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Stopping TXIndex..."))
node.TXIndex.Stop()
node.closeDb(node.TXIndex.TXIndexChain.DB(), "txindex")
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: TXIndex successfully stopped."))
}
// Databases
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Closing all databases..."))
node.closeDb(node.ChainDB, "chain")
node.stopWaitGroup.Wait()
glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Databases successfully closed."))
if node.internalExitChan != nil {
close(node.internalExitChan)
node.internalExitChan = nil
}
}
// Close a database and handle the stopWaitGroup accordingly. We close databases in a go routine to speed up the process.
func (node *Node) closeDb(db *badger.DB, dbName string) {
node.stopWaitGroup.Add(1)
glog.Infof("Node.closeDb: Preparing to close %v db", dbName)
go func() {
defer node.stopWaitGroup.Done()
if err := db.Close(); err != nil {
glog.Fatalf(lib.CLog(lib.Red, fmt.Sprintf("Node.Stop: Problem closing %v db: err: (%v)", dbName, err)))
} else {
glog.Infof(lib.CLog(lib.Yellow, fmt.Sprintf("Node.closeDb: Closed %v Db", dbName)))
}
}()
}
// listenToNodeMessages listens to the communication from the engine through the nodeMessageChan. There are currently
// two main operations that the engine can request. These are a regular node restart, and a restart with a database
// erase. The latter may seem a little harsh, but it is only triggered when the node is really broken and there's
// no way we can recover.
func (node *Node) listenToNodeMessages(exitChannels ...*chan struct{}) {
select {
case <-node.internalExitChan:
break
case operation := <-node.nodeMessageChan:
if !node.IsRunning {
panic("Node.listenToNodeMessages: Node is currently not running, nodeMessageChan should've not been called!")
}
glog.Infof("Node.listenToNodeMessages: Stopping node")
node.Stop()
glog.Infof("Node.listenToNodeMessages: Finished stopping node")
switch operation {
case lib.NodeErase:
if err := os.RemoveAll(node.Config.DataDirectory); err != nil {
glog.Fatal(lib.CLog(lib.Red, fmt.Sprintf("IMPORTANT: Problem removing the directory (%v), you "+
"should run `rm -rf %v` to delete it manually. Error: (%v)", node.Config.DataDirectory,
node.Config.DataDirectory, err)))
return
}
}
glog.Infof("Node.listenToNodeMessages: Restarting node")
// Wait a few seconds so that all peer messages we've sent while closing the node get propagated in the network.
go node.Start(exitChannels...)
break
}
}
func validateParams(params *lib.DeSoParams) {
if params.BitcoinBurnAddress == "" {
glog.Fatalf("The DeSoParams being used are missing the BitcoinBurnAddress field.")
}
// Check that TimeBetweenDifficultyRetargets is evenly divisible
// by TimeBetweenBlocks.
if params.TimeBetweenBlocks == 0 {
glog.Fatalf("The DeSoParams being used have TimeBetweenBlocks=0")
}
numBlocks := params.TimeBetweenDifficultyRetargets / params.TimeBetweenBlocks
truncatedTime := params.TimeBetweenBlocks * numBlocks
if truncatedTime != params.TimeBetweenDifficultyRetargets {
glog.Fatalf("TimeBetweenDifficultyRetargets (%v) should be evenly divisible by "+
"TimeBetweenBlocks (%v)", params.TimeBetweenDifficultyRetargets,
params.TimeBetweenBlocks)
}
if params.GenesisBlock == nil || params.GenesisBlockHashHex == "" {
glog.Fatalf("The DeSoParams are missing genesis block info.")
}
// Compute the merkle root for the genesis block and make sure it matches.
merkle, _, err := lib.ComputeMerkleRoot(params.GenesisBlock.Txns)
if err != nil {
glog.Fatalf("Could not compute a merkle root for the genesis block: %v", err)
}
if *merkle != *params.GenesisBlock.Header.TransactionMerkleRoot {
glog.Fatalf("Genesis block merkle root (%s) not equal to computed merkle root (%s)",
hex.EncodeToString(params.GenesisBlock.Header.TransactionMerkleRoot[:]),
hex.EncodeToString(merkle[:]))
}
genesisHash, err := params.GenesisBlock.Header.Hash()
if err != nil {
glog.Fatalf("Problem hashing header for the GenesisBlock in "+
"the DeSoParams (%+v): %v", params.GenesisBlock.Header, err)
}
genesisHashHex := hex.EncodeToString(genesisHash[:])
if genesisHashHex != params.GenesisBlockHashHex {
glog.Fatalf("GenesisBlockHash in DeSoParams (%s) does not match the block "+
"hash computed (%s) %d %d", params.GenesisBlockHashHex, genesisHashHex, len(params.GenesisBlockHashHex), len(genesisHashHex))
}
if params.MinDifficultyTargetHex == "" {
glog.Fatalf("The DeSoParams MinDifficultyTargetHex (%s) should be non-empty",
params.MinDifficultyTargetHex)
}
// Check to ensure the genesis block hash meets the initial difficulty target.
hexBytes, err := hex.DecodeString(params.MinDifficultyTargetHex)
if err != nil || len(hexBytes) != 32 {
glog.Fatalf("The DeSoParams MinDifficultyTargetHex (%s) with length (%d) is "+
"invalid: %v", params.MinDifficultyTargetHex, len(params.MinDifficultyTargetHex), err)
}
if params.MaxDifficultyRetargetFactor == 0 {
glog.Fatalf("The DeSoParams MaxDifficultyRetargetFactor is unset")
}
}
func GetAddrsToListenOn(protocolPort uint16) ([]net.TCPAddr, []net.Listener) {
listeningAddrs := []net.TCPAddr{}
listeners := []net.Listener{}
ifaceAddrs, err := net.InterfaceAddrs()
if err != nil {
return nil, nil
}
for _, iAddr := range ifaceAddrs {
ifaceIP, _, err := net.ParseCIDR(iAddr.String())
if err != nil {
continue
}
if ifaceIP.IsLinkLocalUnicast() {
continue
}
netAddr := net.TCPAddr{
IP: ifaceIP,
Port: int(protocolPort),
}
listener, err := net.Listen(netAddr.Network(), netAddr.String())
if err != nil {
continue
}
listeners = append(listeners, listener)
listeningAddrs = append(listeningAddrs, netAddr)
}
return listeningAddrs, listeners
}
func addIPsForHost(desoAddrMgr *addrmgr.AddrManager, host string, params *lib.DeSoParams) {
ipAddrs, err := net.LookupIP(host)
if err != nil {
glog.V(2).Infof("_addSeedAddrs: DNS discovery failed on seed host (continuing on): %s %v\n", host, err)
return
}
if len(ipAddrs) == 0 {
glog.V(2).Infof("_addSeedAddrs: No IPs found for host: %s\n", host)
return
}
// Don't take more than 5 IPs per host.
ipsPerHost := 5
if len(ipAddrs) > ipsPerHost {
glog.V(1).Infof("_addSeedAddrs: Truncating IPs found from %d to %d\n", len(ipAddrs), ipsPerHost)
ipAddrs = ipAddrs[:ipsPerHost]
}
glog.V(1).Infof("_addSeedAddrs: Adding seed IPs from seed %s: %v\n", host, ipAddrs)
// Convert addresses to NetAddress'es.
netAddrs := make([]*wire.NetAddress, len(ipAddrs))
for ii, ip := range ipAddrs {
netAddrs[ii] = wire.NewNetAddressTimestamp(
// We initialize addresses with a
// randomly selected "last seen time" between 3
// and 7 days ago similar to what bitcoind does.
time.Now().Add(-1*time.Second*time.Duration(lib.SecondsIn3Days+
lib.RandInt32(lib.SecondsIn4Days))),
0,
ip,
params.DefaultSocketPort)
}
glog.V(1).Infof("_addSeedAddrs: Computed the following wire.NetAddress'es: %s", spew.Sdump(netAddrs))
// Normally the second argument is the source who told us about the
// addresses we're adding. In this case since the source is a DNS seed
// just use the first address in the fetch as the source.
desoAddrMgr.AddAddresses(netAddrs, netAddrs[0])
}
// Must be run in a goroutine. This function continuously adds IPs from a DNS seed
// prefix+suffix by iterating up through all of the possible numeric values, which are typically
// [0, 10]
func addSeedAddrsFromPrefixes(desoAddrMgr *addrmgr.AddrManager, params *lib.DeSoParams) {
MaxIterations := 20
go func() {
for dnsNumber := 0; dnsNumber < MaxIterations; dnsNumber++ {
var wg deadlock.WaitGroup
for _, dnsGeneratorOuter := range params.DNSSeedGenerators {
wg.Add(1)
go func(dnsGenerator []string) {
dnsString := fmt.Sprintf("%s%d%s", dnsGenerator[0], dnsNumber, dnsGenerator[1])
glog.V(2).Infof("_addSeedAddrsFromPrefixes: Querying DNS seed: %s", dnsString)
addIPsForHost(desoAddrMgr, dnsString, params)
wg.Done()
}(dnsGeneratorOuter)
}
wg.Wait()
}
}()
}
| [
"\"DD_AGENT_HOST\""
] | [] | [
"DD_AGENT_HOST"
] | [] | ["DD_AGENT_HOST"] | go | 1 | 0 | |
go/parquet/encryption_read_config_test.go | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parquet_test
import (
"encoding/binary"
"fmt"
"os"
"path"
"testing"
"github.com/apache/arrow/go/v8/arrow/memory"
"github.com/apache/arrow/go/v8/parquet"
"github.com/apache/arrow/go/v8/parquet/file"
"github.com/apache/arrow/go/v8/parquet/internal/encryption"
"github.com/stretchr/testify/suite"
)
/*
* This file contains a unit-test for reading encrypted Parquet files with
* different decryption configurations.
*
* The unit-test is called multiple times, each time to decrypt parquet files using
* different decryption configuration as described below.
* In each call two encrypted files are read: one temporary file that was generated using
* encryption_write_config_test.go test and will be deleted upon
* reading it, while the second resides in
* parquet-testing/data repository. Those two encrypted files were encrypted using the
* same encryption configuration.
* The encrypted parquet file names are passed as parameter to the unit-test.
*
* A detailed description of the Parquet Modular Encryption specification can be found
* here:
* https://github.com/apache/parquet-format/blob/encryption/Encryption.md
*
* The following decryption configurations are used to decrypt each parquet file:
*
* - Decryption configuration 1: Decrypt using key retriever that holds the keys of
* two encrypted columns and the footer key.
* - Decryption configuration 2: Decrypt using key retriever that holds the keys of
* two encrypted columns and the footer key. Supplies
* aad_prefix to verify file identity.
* - Decryption configuration 3: Decrypt using explicit column and footer keys
* (instead of key retrieval callback).
* - Decryption Configuration 4: PlainText Footer mode - test legacy reads,
* read the footer + all non-encrypted columns.
* (pairs with encryption configuration 3)
*
* The encrypted parquet files that is read was encrypted using one of the configurations
* below:
*
* - Encryption configuration 1: Encrypt all columns and the footer with the same key.
* (uniform encryption)
* - Encryption configuration 2: Encrypt two columns and the footer, with different
* keys.
* - Encryption configuration 3: Encrypt two columns, with different keys.
* Don’t encrypt footer (to enable legacy readers)
* - plaintext footer mode.
* - Encryption configuration 4: Encrypt two columns and the footer, with different
* keys. Supply aad_prefix for file identity
* verification.
* - Encryption configuration 5: Encrypt two columns and the footer, with different
* keys. Supply aad_prefix, and call
* disable_aad_prefix_storage to prevent file
* identity storage in file metadata.
* - Encryption configuration 6: Encrypt two columns and the footer, with different
* keys. Use the alternative (AES_GCM_CTR_V1) algorithm.
*/
func getDataDir() string {
datadir := os.Getenv("PARQUET_TEST_DATA")
if datadir == "" {
panic("please point the PARQUET_TEST_DATA environment variable to the test data dir")
}
return datadir
}
type TestDecryptionSuite struct {
suite.Suite
pathToDouble string
pathToFloat string
decryptionConfigs []*parquet.FileDecryptionProperties
footerEncryptionKey string
colEncryptionKey1 string
colEncryptionKey2 string
fileName string
}
func (d *TestDecryptionSuite) TearDownSuite() {
os.Remove(tempdir)
}
func TestFileEncryptionDecryption(t *testing.T) {
suite.Run(t, new(EncryptionConfigTestSuite))
suite.Run(t, new(TestDecryptionSuite))
}
func (d *TestDecryptionSuite) SetupSuite() {
d.pathToDouble = "double_field"
d.pathToFloat = "float_field"
d.footerEncryptionKey = FooterEncryptionKey
d.colEncryptionKey1 = ColumnEncryptionKey1
d.colEncryptionKey2 = ColumnEncryptionKey2
d.fileName = FileName
d.createDecryptionConfigs()
}
func (d *TestDecryptionSuite) createDecryptionConfigs() {
// Decryption configuration 1: Decrypt using key retriever callback that holds the
// keys of two encrypted columns and the footer key.
stringKr1 := make(encryption.StringKeyIDRetriever)
stringKr1.PutKey("kf", d.footerEncryptionKey)
stringKr1.PutKey("kc1", d.colEncryptionKey1)
stringKr1.PutKey("kc2", d.colEncryptionKey2)
d.decryptionConfigs = append(d.decryptionConfigs,
parquet.NewFileDecryptionProperties(parquet.WithKeyRetriever(stringKr1)))
// Decryption configuration 2: Decrypt using key retriever callback that holds the
// keys of two encrypted columns and the footer key. Supply aad_prefix.
stringKr2 := make(encryption.StringKeyIDRetriever)
stringKr2.PutKey("kf", d.footerEncryptionKey)
stringKr2.PutKey("kc1", d.colEncryptionKey1)
stringKr2.PutKey("kc2", d.colEncryptionKey2)
d.decryptionConfigs = append(d.decryptionConfigs,
parquet.NewFileDecryptionProperties(parquet.WithKeyRetriever(stringKr2), parquet.WithDecryptAadPrefix(d.fileName)))
// Decryption configuration 3: Decrypt using explicit column and footer keys. Supply
// aad_prefix.
decryptCols := make(parquet.ColumnPathToDecryptionPropsMap)
decryptCols[d.pathToFloat] = parquet.NewColumnDecryptionProperties(d.pathToFloat, parquet.WithDecryptKey(d.colEncryptionKey2))
decryptCols[d.pathToDouble] = parquet.NewColumnDecryptionProperties(d.pathToDouble, parquet.WithDecryptKey(d.colEncryptionKey1))
d.decryptionConfigs = append(d.decryptionConfigs,
parquet.NewFileDecryptionProperties(parquet.WithFooterKey(d.footerEncryptionKey), parquet.WithColumnKeys(decryptCols)))
// Decryption Configuration 4: use plaintext footer mode, read only footer + plaintext
// columns.
d.decryptionConfigs = append(d.decryptionConfigs, nil)
}
func (d *TestDecryptionSuite) decryptFile(filename string, decryptConfigNum int) {
// if we get decryption_config_num = x then it means the actual number is x+1
// and since we want decryption_config_num=4 we set the condition to 3
props := parquet.NewReaderProperties(memory.DefaultAllocator)
if decryptConfigNum != 3 {
props.FileDecryptProps = d.decryptionConfigs[decryptConfigNum].Clone("")
}
fileReader, err := file.OpenParquetFile(filename, false, file.WithReadProps(props))
if err != nil {
panic(err)
}
defer fileReader.Close()
// get metadata
fileMetadata := fileReader.MetaData()
// get number of rowgroups
numRowGroups := len(fileMetadata.RowGroups)
// number of columns
numColumns := fileMetadata.Schema.NumColumns()
d.Equal(8, numColumns)
for r := 0; r < numRowGroups; r++ {
rowGroupReader := fileReader.RowGroup(r)
// get rowgroup meta
rgMeta := fileMetadata.RowGroup(r)
valuesRead := 0
rowsRead := int64(0)
// get col reader for boolean column
colReader := rowGroupReader.Column(0)
boolReader := colReader.(*file.BooleanColumnChunkReader)
// get column chunk metadata for boolean column
boolMd, _ := rgMeta.ColumnChunk(0)
// Read all rows in column
i := 0
for boolReader.HasNext() {
var val [1]bool
// read one value at a time. the number of rows read is returned. values
// read contains the number of non-null rows
rowsRead, valuesRead, _ = boolReader.ReadBatch(1, val[:], nil, nil)
// ensure only 1 value is read
d.EqualValues(1, rowsRead)
// there are no null values
d.EqualValues(1, valuesRead)
// verify the value
expected := i%2 == 0
d.Equal(expected, val[0], "i: ", i)
i++
}
d.EqualValues(i, boolMd.NumValues())
// Get column reader for int32 column
colReader = rowGroupReader.Column(1)
int32reader := colReader.(*file.Int32ColumnChunkReader)
int32md, _ := rgMeta.ColumnChunk(1)
// Read all rows in column
i = 0
for int32reader.HasNext() {
var val [1]int32
// read one value at a time. the number of rows read is returned. values
// read contains the number of non-null rows
rowsRead, valuesRead, _ = int32reader.ReadBatch(1, val[:], nil, nil)
// ensure only 1 value is read
d.EqualValues(1, rowsRead)
// there are no null values
d.EqualValues(1, valuesRead)
// verify the value
d.EqualValues(i, val[0])
i++
}
d.EqualValues(i, int32md.NumValues())
// Get column reader for int64 column
colReader = rowGroupReader.Column(2)
int64reader := colReader.(*file.Int64ColumnChunkReader)
int64md, _ := rgMeta.ColumnChunk(2)
// Read all rows in column
i = 0
for int64reader.HasNext() {
var (
val [1]int64
def [1]int16
rep [1]int16
)
// read one value at a time. the number of rows read is returned. values
// read contains the number of non-null rows
rowsRead, valuesRead, _ = int64reader.ReadBatch(1, val[:], def[:], rep[:])
// ensure only 1 value is read
d.EqualValues(1, rowsRead)
// there are no null values
d.EqualValues(1, valuesRead)
// verify the value
expectedValue := int64(i) * 1000 * 1000 * 1000 * 1000
d.Equal(expectedValue, val[0])
if i%2 == 0 {
d.EqualValues(1, rep[0])
} else {
d.Zero(rep[0])
}
i++
}
d.EqualValues(i, int64md.NumValues())
// Get column reader for int96 column
colReader = rowGroupReader.Column(3)
int96reader := colReader.(*file.Int96ColumnChunkReader)
int96md, _ := rgMeta.ColumnChunk(3)
// Read all rows in column
i = 0
for int96reader.HasNext() {
var (
val [1]parquet.Int96
)
// read one value at a time. the number of rows read is returned. values
// read contains the number of non-null rows
rowsRead, valuesRead, _ = int96reader.ReadBatch(1, val[:], nil, nil)
// ensure only 1 value is read
d.EqualValues(1, rowsRead)
// there are no null values
d.EqualValues(1, valuesRead)
// verify the value
var expectedValue parquet.Int96
binary.LittleEndian.PutUint32(expectedValue[:4], uint32(i))
binary.LittleEndian.PutUint32(expectedValue[4:], uint32(i+1))
binary.LittleEndian.PutUint32(expectedValue[8:], uint32(i+2))
d.Equal(expectedValue, val[0])
i++
}
d.EqualValues(i, int96md.NumValues())
// these two columns are always encrypted when we write them, so don't
// try to read them during the plaintext test.
if props.FileDecryptProps != nil {
// Get column reader for the float column
colReader = rowGroupReader.Column(4)
floatReader := colReader.(*file.Float32ColumnChunkReader)
floatmd, _ := rgMeta.ColumnChunk(4)
i = 0
for floatReader.HasNext() {
var value [1]float32
// read one value at a time. the number of rows read is returned. values
// read contains the number of non-null rows
rowsRead, valuesRead, _ = floatReader.ReadBatch(1, value[:], nil, nil)
// ensure only 1 value is read
d.EqualValues(1, rowsRead)
// there are no null values
d.EqualValues(1, valuesRead)
// verify the value
expectedValue := float32(i) * 1.1
d.Equal(expectedValue, value[0])
i++
}
d.EqualValues(i, floatmd.NumValues())
// Get column reader for the double column
colReader = rowGroupReader.Column(5)
dblReader := colReader.(*file.Float64ColumnChunkReader)
dblmd, _ := rgMeta.ColumnChunk(5)
i = 0
for dblReader.HasNext() {
var value [1]float64
// read one value at a time. the number of rows read is returned. values
// read contains the number of non-null rows
rowsRead, valuesRead, _ = dblReader.ReadBatch(1, value[:], nil, nil)
// ensure only 1 value is read
d.EqualValues(1, rowsRead)
// there are no null values
d.EqualValues(1, valuesRead)
// verify the value
expectedValue := float64(i) * 1.1111111
d.Equal(expectedValue, value[0])
i++
}
d.EqualValues(i, dblmd.NumValues())
}
colReader = rowGroupReader.Column(6)
bareader := colReader.(*file.ByteArrayColumnChunkReader)
bamd, _ := rgMeta.ColumnChunk(6)
i = 0
for bareader.HasNext() {
var value [1]parquet.ByteArray
var def [1]int16
rowsRead, valuesRead, _ := bareader.ReadBatch(1, value[:], def[:], nil)
d.EqualValues(1, rowsRead)
expected := [10]byte{'p', 'a', 'r', 'q', 'u', 'e', 't', 0, 0, 0}
expected[7] = byte('0') + byte(i/100)
expected[8] = byte('0') + byte(i/10)%10
expected[9] = byte('0') + byte(i%10)
if i%2 == 0 {
d.Equal(1, valuesRead)
d.Len(value[0], 10)
d.EqualValues(expected[:], value[0])
d.EqualValues(1, def[0])
} else {
d.Zero(valuesRead)
d.Zero(def[0])
}
i++
}
d.EqualValues(i, bamd.NumValues())
}
}
func (d *TestDecryptionSuite) checkResults(fileName string, decryptionConfig, encryptionConfig uint) {
decFn := func() { d.decryptFile(fileName, int(decryptionConfig-1)) }
// Encryption configuration number 5 contains aad_prefix and disable_aad_prefix_storage
// an exception is expected to be thrown if the file is not decrypted with aad_prefix
if encryptionConfig == 5 {
if decryptionConfig == 1 || decryptionConfig == 3 {
d.Panics(decFn)
return
}
}
// decryption config number two contains aad_prefix. an exception
// is expected to be thrown if the file was not encrypted with the same aad_prefix
if decryptionConfig == 2 {
if encryptionConfig != 5 && encryptionConfig != 4 {
d.Panics(decFn)
return
}
}
// decryption config 4 can only work when the encryption config is 3
if decryptionConfig == 4 && encryptionConfig != 3 {
return
}
d.NotPanics(decFn)
}
// Read encrypted parquet file.
// the test reads two parquet files that were encrypted using the same encryption config
// one was generated in encryption_write_configurations_test.go tests and is deleted
// once the file is read and the second exists in parquet-testing/data folder
func (d *TestDecryptionSuite) TestDecryption() {
tests := []struct {
file string
config uint
}{
{"uniform_encryption.parquet.encrypted", 1},
{"encrypt_columns_and_footer.parquet.encrypted", 2},
{"encrypt_columns_plaintext_footer.parquet.encrypted", 3},
{"encrypt_columns_and_footer_aad.parquet.encrypted", 4},
{"encrypt_columns_and_footer_disable_aad_storage.parquet.encrypted", 5},
{"encrypt_columns_and_footer_ctr.parquet.encrypted", 6},
}
for _, tt := range tests {
d.Run(tt.file, func() {
// decrypt file that was generated in encryption-write-tests
tmpFile := path.Join(tempdir, "tmp_"+tt.file)
d.Require().FileExists(tmpFile)
// iterate over decryption configs and use each one to read the encrypted file
for idx := range d.decryptionConfigs {
decConfig := idx + 1
d.checkResults(tmpFile, uint(decConfig), tt.config)
}
os.Remove(tmpFile)
file := path.Join(getDataDir(), tt.file)
d.Require().FileExists(file)
for idx := range d.decryptionConfigs {
decConfig := idx + 1
d.Run(fmt.Sprintf("config %d", decConfig), func() {
d.checkResults(file, uint(decConfig), tt.config)
})
}
})
}
}
| [
"\"PARQUET_TEST_DATA\""
] | [] | [
"PARQUET_TEST_DATA"
] | [] | ["PARQUET_TEST_DATA"] | go | 1 | 0 | |
context/context.go | package context
import (
"net/http"
_ "net/http/pprof"
"os"
"github.com/0xAX/notificator"
"github.com/erroneousboat/termui"
termbox "github.com/nsf/termbox-go"
"github.com/erroneousboat/slack-term/config"
"github.com/erroneousboat/slack-term/service"
"github.com/erroneousboat/slack-term/views"
)
const (
CommandMode = "command"
InsertMode = "insert"
SearchMode = "search"
)
type AppContext struct {
EventQueue chan termbox.Event
Service *service.SlackService
Body *termui.Grid
View *views.View
Config *config.Config
Debug bool
Mode string
Notify *notificator.Notificator
}
// CreateAppContext creates an application context which can be passed
// and referenced througout the application
func CreateAppContext(flgConfig string, flgToken string, flgDebug bool) (*AppContext, error) {
if flgDebug {
go func() {
http.ListenAndServe(":6060", nil)
}()
}
// Loading screen
views.Loading()
// Load config
config, err := config.NewConfig(flgConfig)
if err != nil {
return nil, err
}
// When slack token isn't set in the config file, we'll check
// the command-line flag or the environment variable
if config.SlackToken == "" {
if flgToken != "" {
config.SlackToken = flgToken
} else {
config.SlackToken = os.Getenv("SLACK_TOKEN")
}
}
// Create Service
svc, err := service.NewSlackService(config)
if err != nil {
return nil, err
}
// Create the main view
view := views.CreateView(config, svc)
// Setup the interface
if flgDebug {
termui.Body.AddRows(
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Channels),
termui.NewCol(config.MainWidth-5, 0, view.Chat),
termui.NewCol(config.MainWidth-6, 0, view.Debug),
),
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Mode),
termui.NewCol(config.MainWidth, 0, view.Input),
),
)
} else {
termui.Body.AddRows(
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Channels),
termui.NewCol(config.MainWidth, 0, view.Chat),
),
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Mode),
termui.NewCol(config.MainWidth, 0, view.Input),
),
)
}
termui.Body.Align()
termui.Render(termui.Body)
return &AppContext{
EventQueue: make(chan termbox.Event, 20),
Service: svc,
Body: termui.Body,
View: view,
Config: config,
Debug: flgDebug,
Mode: CommandMode,
Notify: notificator.New(notificator.Options{AppName: "slack-term"}),
}, nil
}
| [
"\"SLACK_TOKEN\""
] | [] | [
"SLACK_TOKEN"
] | [] | ["SLACK_TOKEN"] | go | 1 | 0 | |
CTCI/FrequencyQueries.py | import math
import os
import random
import re
import sys
def freqQuery(queries):
datadict = {}
out = []
for query in queries:
if (query[0]==1):
if (query[1] in datadict):
datadict[query[1]] += 1
else:
datadict[query[1]] = 1
elif (query[0]==2):
if (query[1] in datadict):
if (datadict[query[1]] != 0):
datadict[query[1]] -= 1
else:
if (query[1] in datadict.values()):
out.append("1")
else:
out.append("0")
return out
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
ans = freqQuery(queries)
fptr.write('\n'.join(map(str, ans)))
fptr.write('\n')
fptr.close()
| [] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.